aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-11-21 16:00:56 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-11-21 16:00:56 -0500
commit4713e962c5d98901890c446843ae4e0a9d37b7b3 (patch)
treebe1c98cde57c1a01c2f84ab67878922ef65e58b1 /drivers
parente3bea1c8751d297c197949db01aa1e7adbc1104d (diff)
parent9b5e2f463ac6f53789bd5ce43d2bb4b4c01e8607 (diff)
Merge branch 'for-linville' of git://github.com/sgruszka/iwlegacy
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3977
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c995
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2751
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h626
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6536
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2421
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1309
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5707
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3424
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1410
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-calib.h75
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1313
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c281
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c549
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c540
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c658
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
70 files changed, 34525 insertions, 36234 deletions
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index c1c0678b1fb6..98db76196b59 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
42obj-$(CONFIG_MWL8K) += mwl8k.o 42obj-$(CONFIG_MWL8K) += mwl8k.o
43 43
44obj-$(CONFIG_IWLWIFI) += iwlwifi/ 44obj-$(CONFIG_IWLWIFI) += iwlwifi/
45obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ 45obj-$(CONFIG_IWLEGACY) += iwlegacy/
46obj-$(CONFIG_RT2X00) += rt2x00/ 46obj-$(CONFIG_RT2X00) += rt2x00/
47 47
48obj-$(CONFIG_P54_COMMON) += p54/ 48obj-$(CONFIG_P54_COMMON) += p54/
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 000000000000..5e1a19fd354d
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "common.h"
30#include "3945.h"
31
32static int
33il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
34{
35 int p = 0;
36
37 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
38 le32_to_cpu(il->_3945.stats.flag));
39 if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(il->_3945.stats.flag) &
44 UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
45 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
46 (le32_to_cpu(il->_3945.stats.flag) &
47 UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
48 return p;
49}
50
51ssize_t
52il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
53 size_t count, loff_t *ppos)
54{
55 struct il_priv *il = file->private_data;
56 int pos = 0;
57 char *buf;
58 int bufsz =
59 sizeof(struct iwl39_stats_rx_phy) * 40 +
60 sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
61 ssize_t ret;
62 struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
63 struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
64 struct iwl39_stats_rx_non_phy *general, *accum_general;
65 struct iwl39_stats_rx_non_phy *delta_general, *max_general;
66
67 if (!il_is_alive(il))
68 return -EAGAIN;
69
70 buf = kzalloc(bufsz, GFP_KERNEL);
71 if (!buf) {
72 IL_ERR("Can not allocate Buffer\n");
73 return -ENOMEM;
74 }
75
76 /*
77 * The statistic information display here is based on
78 * the last stats notification from uCode
79 * might not reflect the current uCode activity
80 */
81 ofdm = &il->_3945.stats.rx.ofdm;
82 cck = &il->_3945.stats.rx.cck;
83 general = &il->_3945.stats.rx.general;
84 accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
85 accum_cck = &il->_3945.accum_stats.rx.cck;
86 accum_general = &il->_3945.accum_stats.rx.general;
87 delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
88 delta_cck = &il->_3945.delta_stats.rx.cck;
89 delta_general = &il->_3945.delta_stats.rx.general;
90 max_ofdm = &il->_3945.max_delta.rx.ofdm;
91 max_cck = &il->_3945.max_delta.rx.cck;
92 max_general = &il->_3945.max_delta.rx.general;
93
94 pos += il3945_stats_flag(il, buf, bufsz);
95 pos +=
96 scnprintf(buf + pos, bufsz - pos,
97 "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos +=
101 scnprintf(buf + pos, bufsz - pos,
102 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
103 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos +=
106 scnprintf(buf + pos, bufsz - pos,
107 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos +=
111 scnprintf(buf + pos, bufsz - pos,
112 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
113 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
114 delta_ofdm->plcp_err, max_ofdm->plcp_err);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos,
117 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
118 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
119 delta_ofdm->crc32_err, max_ofdm->crc32_err);
120 pos +=
121 scnprintf(buf + pos, bufsz - pos,
122 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
123 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
124 delta_ofdm->overrun_err, max_ofdm->overrun_err);
125 pos +=
126 scnprintf(buf + pos, bufsz - pos,
127 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
128 le32_to_cpu(ofdm->early_overrun_err),
129 accum_ofdm->early_overrun_err,
130 delta_ofdm->early_overrun_err,
131 max_ofdm->early_overrun_err);
132 pos +=
133 scnprintf(buf + pos, bufsz - pos,
134 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
135 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
136 delta_ofdm->crc32_good, max_ofdm->crc32_good);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos,
139 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
140 le32_to_cpu(ofdm->false_alarm_cnt),
141 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
142 max_ofdm->false_alarm_cnt);
143 pos +=
144 scnprintf(buf + pos, bufsz - pos,
145 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
146 le32_to_cpu(ofdm->fina_sync_err_cnt),
147 accum_ofdm->fina_sync_err_cnt,
148 delta_ofdm->fina_sync_err_cnt,
149 max_ofdm->fina_sync_err_cnt);
150 pos +=
151 scnprintf(buf + pos, bufsz - pos,
152 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
153 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
154 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
155 pos +=
156 scnprintf(buf + pos, bufsz - pos,
157 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos,
162 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
163 le32_to_cpu(ofdm->unresponded_rts),
164 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
165 max_ofdm->unresponded_rts);
166 pos +=
167 scnprintf(buf + pos, bufsz - pos,
168 " %-30s %10u %10u %10u %10u\n",
169 "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos +=
175 scnprintf(buf + pos, bufsz - pos,
176 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
177 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
178 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos,
181 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
182 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
183 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
184
185 pos +=
186 scnprintf(buf + pos, bufsz - pos,
187 "%-32s current"
188 "acumulative delta max\n",
189 "Statistics_Rx - CCK:");
190 pos +=
191 scnprintf(buf + pos, bufsz - pos,
192 " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
193 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
194 delta_cck->ina_cnt, max_cck->ina_cnt);
195 pos +=
196 scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
198 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
199 delta_cck->fina_cnt, max_cck->fina_cnt);
200 pos +=
201 scnprintf(buf + pos, bufsz - pos,
202 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
203 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
204 delta_cck->plcp_err, max_cck->plcp_err);
205 pos +=
206 scnprintf(buf + pos, bufsz - pos,
207 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
208 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
209 delta_cck->crc32_err, max_cck->crc32_err);
210 pos +=
211 scnprintf(buf + pos, bufsz - pos,
212 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
213 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
214 delta_cck->overrun_err, max_cck->overrun_err);
215 pos +=
216 scnprintf(buf + pos, bufsz - pos,
217 " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
218 le32_to_cpu(cck->early_overrun_err),
219 accum_cck->early_overrun_err,
220 delta_cck->early_overrun_err, max_cck->early_overrun_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n", "crc32_good:",
224 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
225 delta_cck->crc32_good, max_cck->crc32_good);
226 pos +=
227 scnprintf(buf + pos, bufsz - pos,
228 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
229 le32_to_cpu(cck->false_alarm_cnt),
230 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
231 max_cck->false_alarm_cnt);
232 pos +=
233 scnprintf(buf + pos, bufsz - pos,
234 " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
235 le32_to_cpu(cck->fina_sync_err_cnt),
236 accum_cck->fina_sync_err_cnt,
237 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
238 pos +=
239 scnprintf(buf + pos, bufsz - pos,
240 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
241 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
242 delta_cck->sfd_timeout, max_cck->sfd_timeout);
243 pos +=
244 scnprintf(buf + pos, bufsz - pos,
245 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
246 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
247 delta_cck->fina_timeout, max_cck->fina_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos,
250 " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
251 le32_to_cpu(cck->unresponded_rts),
252 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
253 max_cck->unresponded_rts);
254 pos +=
255 scnprintf(buf + pos, bufsz - pos,
256 " %-30s %10u %10u %10u %10u\n",
257 "rxe_frame_lmt_ovrun:",
258 le32_to_cpu(cck->rxe_frame_limit_overrun),
259 accum_cck->rxe_frame_limit_overrun,
260 delta_cck->rxe_frame_limit_overrun,
261 max_cck->rxe_frame_limit_overrun);
262 pos +=
263 scnprintf(buf + pos, bufsz - pos,
264 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos,
269 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
270 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
271 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
272
273 pos +=
274 scnprintf(buf + pos, bufsz - pos,
275 "%-32s current"
276 "acumulative delta max\n",
277 "Statistics_Rx - GENERAL:");
278 pos +=
279 scnprintf(buf + pos, bufsz - pos,
280 " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
281 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
282 delta_general->bogus_cts, max_general->bogus_cts);
283 pos +=
284 scnprintf(buf + pos, bufsz - pos,
285 " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
286 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
287 delta_general->bogus_ack, max_general->bogus_ack);
288 pos +=
289 scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
291 le32_to_cpu(general->non_bssid_frames),
292 accum_general->non_bssid_frames,
293 delta_general->non_bssid_frames,
294 max_general->non_bssid_frames);
295 pos +=
296 scnprintf(buf + pos, bufsz - pos,
297 " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
298 le32_to_cpu(general->filtered_frames),
299 accum_general->filtered_frames,
300 delta_general->filtered_frames,
301 max_general->filtered_frames);
302 pos +=
303 scnprintf(buf + pos, bufsz - pos,
304 " %-30s %10u %10u %10u %10u\n",
305 "non_channel_beacons:",
306 le32_to_cpu(general->non_channel_beacons),
307 accum_general->non_channel_beacons,
308 delta_general->non_channel_beacons,
309 max_general->non_channel_beacons);
310
311 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
312 kfree(buf);
313 return ret;
314}
315
316ssize_t
317il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
318 size_t count, loff_t *ppos)
319{
320 struct il_priv *il = file->private_data;
321 int pos = 0;
322 char *buf;
323 int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
324 ssize_t ret;
325 struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
326
327 if (!il_is_alive(il))
328 return -EAGAIN;
329
330 buf = kzalloc(bufsz, GFP_KERNEL);
331 if (!buf) {
332 IL_ERR("Can not allocate Buffer\n");
333 return -ENOMEM;
334 }
335
336 /*
337 * The statistic information display here is based on
338 * the last stats notification from uCode
339 * might not reflect the current uCode activity
340 */
341 tx = &il->_3945.stats.tx;
342 accum_tx = &il->_3945.accum_stats.tx;
343 delta_tx = &il->_3945.delta_stats.tx;
344 max_tx = &il->_3945.max_delta.tx;
345 pos += il3945_stats_flag(il, buf, bufsz);
346 pos +=
347 scnprintf(buf + pos, bufsz - pos,
348 "%-32s current"
349 "acumulative delta max\n",
350 "Statistics_Tx:");
351 pos +=
352 scnprintf(buf + pos, bufsz - pos,
353 " %-30s %10u %10u %10u %10u\n", "preamble:",
354 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
355 delta_tx->preamble_cnt, max_tx->preamble_cnt);
356 pos +=
357 scnprintf(buf + pos, bufsz - pos,
358 " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
359 le32_to_cpu(tx->rx_detected_cnt),
360 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
361 max_tx->rx_detected_cnt);
362 pos +=
363 scnprintf(buf + pos, bufsz - pos,
364 " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
365 le32_to_cpu(tx->bt_prio_defer_cnt),
366 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
367 max_tx->bt_prio_defer_cnt);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos,
370 " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
371 le32_to_cpu(tx->bt_prio_kill_cnt),
372 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
373 max_tx->bt_prio_kill_cnt);
374 pos +=
375 scnprintf(buf + pos, bufsz - pos,
376 " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
377 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
378 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
379 pos +=
380 scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
382 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
383 delta_tx->cts_timeout, max_tx->cts_timeout);
384 pos +=
385 scnprintf(buf + pos, bufsz - pos,
386 " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
387 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
388 delta_tx->ack_timeout, max_tx->ack_timeout);
389 pos +=
390 scnprintf(buf + pos, bufsz - pos,
391 " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
392 le32_to_cpu(tx->expected_ack_cnt),
393 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
394 max_tx->expected_ack_cnt);
395 pos +=
396 scnprintf(buf + pos, bufsz - pos,
397 " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
398 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
399 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
400
401 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
402 kfree(buf);
403 return ret;
404}
405
406ssize_t
407il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
408 size_t count, loff_t *ppos)
409{
410 struct il_priv *il = file->private_data;
411 int pos = 0;
412 char *buf;
413 int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
414 ssize_t ret;
415 struct iwl39_stats_general *general, *accum_general;
416 struct iwl39_stats_general *delta_general, *max_general;
417 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
418 struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
419
420 if (!il_is_alive(il))
421 return -EAGAIN;
422
423 buf = kzalloc(bufsz, GFP_KERNEL);
424 if (!buf) {
425 IL_ERR("Can not allocate Buffer\n");
426 return -ENOMEM;
427 }
428
429 /*
430 * The statistic information display here is based on
431 * the last stats notification from uCode
432 * might not reflect the current uCode activity
433 */
434 general = &il->_3945.stats.general;
435 dbg = &il->_3945.stats.general.dbg;
436 div = &il->_3945.stats.general.div;
437 accum_general = &il->_3945.accum_stats.general;
438 delta_general = &il->_3945.delta_stats.general;
439 max_general = &il->_3945.max_delta.general;
440 accum_dbg = &il->_3945.accum_stats.general.dbg;
441 delta_dbg = &il->_3945.delta_stats.general.dbg;
442 max_dbg = &il->_3945.max_delta.general.dbg;
443 accum_div = &il->_3945.accum_stats.general.div;
444 delta_div = &il->_3945.delta_stats.general.div;
445 max_div = &il->_3945.max_delta.general.div;
446 pos += il3945_stats_flag(il, buf, bufsz);
447 pos +=
448 scnprintf(buf + pos, bufsz - pos,
449 "%-32s current"
450 "acumulative delta max\n",
451 "Statistics_General:");
452 pos +=
453 scnprintf(buf + pos, bufsz - pos,
454 " %-30s %10u %10u %10u %10u\n", "burst_check:",
455 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
456 delta_dbg->burst_check, max_dbg->burst_check);
457 pos +=
458 scnprintf(buf + pos, bufsz - pos,
459 " %-30s %10u %10u %10u %10u\n", "burst_count:",
460 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
461 delta_dbg->burst_count, max_dbg->burst_count);
462 pos +=
463 scnprintf(buf + pos, bufsz - pos,
464 " %-30s %10u %10u %10u %10u\n", "sleep_time:",
465 le32_to_cpu(general->sleep_time),
466 accum_general->sleep_time, delta_general->sleep_time,
467 max_general->sleep_time);
468 pos +=
469 scnprintf(buf + pos, bufsz - pos,
470 " %-30s %10u %10u %10u %10u\n", "slots_out:",
471 le32_to_cpu(general->slots_out), accum_general->slots_out,
472 delta_general->slots_out, max_general->slots_out);
473 pos +=
474 scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n", "slots_idle:",
476 le32_to_cpu(general->slots_idle),
477 accum_general->slots_idle, delta_general->slots_idle,
478 max_general->slots_idle);
479 pos +=
480 scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
481 le32_to_cpu(general->ttl_timestamp));
482 pos +=
483 scnprintf(buf + pos, bufsz - pos,
484 " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
485 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
486 delta_div->tx_on_a, max_div->tx_on_a);
487 pos +=
488 scnprintf(buf + pos, bufsz - pos,
489 " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
490 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
491 delta_div->tx_on_b, max_div->tx_on_b);
492 pos +=
493 scnprintf(buf + pos, bufsz - pos,
494 " %-30s %10u %10u %10u %10u\n", "exec_time:",
495 le32_to_cpu(div->exec_time), accum_div->exec_time,
496 delta_div->exec_time, max_div->exec_time);
497 pos +=
498 scnprintf(buf + pos, bufsz - pos,
499 " %-30s %10u %10u %10u %10u\n", "probe_time:",
500 le32_to_cpu(div->probe_time), accum_div->probe_time,
501 delta_div->probe_time, max_div->probe_time);
502 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
503 kfree(buf);
504 return ret;
505}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 000000000000..daef6b58f6cc
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3977 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "commands.h"
55#include "common.h"
56#include "3945.h"
57#include "iwl-spectrum.h"
58
59/*
60 * module name, copyright, version, etc.
61 */
62
63#define DRV_DESCRIPTION \
64"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
65
66#ifdef CONFIG_IWLEGACY_DEBUG
67#define VD "d"
68#else
69#define VD
70#endif
71
72/*
73 * add "s" to indicate spectrum measurement included.
74 * we add it here to be consistent with previous releases in which
75 * this was configurable.
76 */
77#define DRV_VERSION IWLWIFI_VERSION VD "s"
78#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
79#define DRV_AUTHOR "<ilw@linux.intel.com>"
80
81MODULE_DESCRIPTION(DRV_DESCRIPTION);
82MODULE_VERSION(DRV_VERSION);
83MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
84MODULE_LICENSE("GPL");
85
86 /* module parameters */
87struct il_mod_params il3945_mod_params = {
88 .sw_crypto = 1,
89 .restart_fw = 1,
90 .disable_hw_scan = 1,
91 /* the rest are 0 by default */
92};
93
94/**
95 * il3945_get_antenna_flags - Get antenna flags for RXON command
96 * @il: eeprom and antenna fields are used to determine antenna flags
97 *
98 * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
99 * il3945_mod_params.antenna specifies the antenna diversity mode:
100 *
101 * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
102 * IL_ANTENNA_MAIN - Force MAIN antenna
103 * IL_ANTENNA_AUX - Force AUX antenna
104 */
105__le32
106il3945_get_antenna_flags(const struct il_priv *il)
107{
108 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
109
110 switch (il3945_mod_params.antenna) {
111 case IL_ANTENNA_DIVERSITY:
112 return 0;
113
114 case IL_ANTENNA_MAIN:
115 if (eeprom->antenna_switch_type)
116 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
117 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
118
119 case IL_ANTENNA_AUX:
120 if (eeprom->antenna_switch_type)
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
123 }
124
125 /* bad antenna selector value */
126 IL_ERR("Bad antenna selector value (0x%x)\n",
127 il3945_mod_params.antenna);
128
129 return 0; /* "diversity" is default if error */
130}
131
132static int
133il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
134 struct ieee80211_key_conf *keyconf, u8 sta_id)
135{
136 unsigned long flags;
137 __le16 key_flags = 0;
138 int ret;
139
140 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
141 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
142
143 if (sta_id == il->ctx.bcast_sta_id)
144 key_flags |= STA_KEY_MULTICAST_MSK;
145
146 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
147 keyconf->hw_key_idx = keyconf->keyidx;
148 key_flags &= ~STA_KEY_FLG_INVALID;
149
150 spin_lock_irqsave(&il->sta_lock, flags);
151 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
152 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
153 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
154
155 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
156
157 if ((il->stations[sta_id].sta.key.
158 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
159 il->stations[sta_id].sta.key.key_offset =
160 il_get_free_ucode_key_idx(il);
161 /* else, we are overriding an existing key => no need to allocated room
162 * in uCode. */
163
164 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
165 "no space for a new key");
166
167 il->stations[sta_id].sta.key.key_flags = key_flags;
168 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
169 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
170
171 D_INFO("hwcrypto: modify ucode station key info\n");
172
173 ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
174
175 spin_unlock_irqrestore(&il->sta_lock, flags);
176
177 return ret;
178}
179
180static int
181il3945_set_tkip_dynamic_key_info(struct il_priv *il,
182 struct ieee80211_key_conf *keyconf, u8 sta_id)
183{
184 return -EOPNOTSUPP;
185}
186
187static int
188il3945_set_wep_dynamic_key_info(struct il_priv *il,
189 struct ieee80211_key_conf *keyconf, u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int
195il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
196{
197 unsigned long flags;
198 struct il_addsta_cmd sta_cmd;
199
200 spin_lock_irqsave(&il->sta_lock, flags);
201 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
202 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
203 il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
204 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
205 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
206 memcpy(&sta_cmd, &il->stations[sta_id].sta,
207 sizeof(struct il_addsta_cmd));
208 spin_unlock_irqrestore(&il->sta_lock, flags);
209
210 D_INFO("hwcrypto: clear ucode station key info\n");
211 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
212}
213
214static int
215il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
216 u8 sta_id)
217{
218 int ret = 0;
219
220 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
221
222 switch (keyconf->cipher) {
223 case WLAN_CIPHER_SUITE_CCMP:
224 ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
225 break;
226 case WLAN_CIPHER_SUITE_TKIP:
227 ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
228 break;
229 case WLAN_CIPHER_SUITE_WEP40:
230 case WLAN_CIPHER_SUITE_WEP104:
231 ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
232 break;
233 default:
234 IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
235 ret = -EINVAL;
236 }
237
238 D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
239 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
240
241 return ret;
242}
243
244static int
245il3945_remove_static_key(struct il_priv *il)
246{
247 int ret = -EOPNOTSUPP;
248
249 return ret;
250}
251
252static int
253il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
254{
255 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
256 key->cipher == WLAN_CIPHER_SUITE_WEP104)
257 return -EOPNOTSUPP;
258
259 IL_ERR("Static key invalid: cipher %x\n", key->cipher);
260 return -EINVAL;
261}
262
263static void
264il3945_clear_free_frames(struct il_priv *il)
265{
266 struct list_head *element;
267
268 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
269
270 while (!list_empty(&il->free_frames)) {
271 element = il->free_frames.next;
272 list_del(element);
273 kfree(list_entry(element, struct il3945_frame, list));
274 il->frames_count--;
275 }
276
277 if (il->frames_count) {
278 IL_WARN("%d frames still in use. Did we lose one?\n",
279 il->frames_count);
280 il->frames_count = 0;
281 }
282}
283
284static struct il3945_frame *
285il3945_get_free_frame(struct il_priv *il)
286{
287 struct il3945_frame *frame;
288 struct list_head *element;
289 if (list_empty(&il->free_frames)) {
290 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
291 if (!frame) {
292 IL_ERR("Could not allocate frame!\n");
293 return NULL;
294 }
295
296 il->frames_count++;
297 return frame;
298 }
299
300 element = il->free_frames.next;
301 list_del(element);
302 return list_entry(element, struct il3945_frame, list);
303}
304
305static void
306il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
307{
308 memset(frame, 0, sizeof(*frame));
309 list_add(&frame->list, &il->free_frames);
310}
311
312unsigned int
313il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
314 int left)
315{
316
317 if (!il_is_associated(il) || !il->beacon_skb)
318 return 0;
319
320 if (il->beacon_skb->len > left)
321 return 0;
322
323 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
324
325 return il->beacon_skb->len;
326}
327
328static int
329il3945_send_beacon_cmd(struct il_priv *il)
330{
331 struct il3945_frame *frame;
332 unsigned int frame_size;
333 int rc;
334 u8 rate;
335
336 frame = il3945_get_free_frame(il);
337
338 if (!frame) {
339 IL_ERR("Could not obtain free frame buffer for beacon "
340 "command.\n");
341 return -ENOMEM;
342 }
343
344 rate = il_get_lowest_plcp(il, &il->ctx);
345
346 frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
347
348 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
349
350 il3945_free_frame(il, frame);
351
352 return rc;
353}
354
355static void
356il3945_unset_hw_params(struct il_priv *il)
357{
358 if (il->_3945.shared_virt)
359 dma_free_coherent(&il->pci_dev->dev,
360 sizeof(struct il3945_shared),
361 il->_3945.shared_virt, il->_3945.shared_phys);
362}
363
364static void
365il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
366 struct il_device_cmd *cmd,
367 struct sk_buff *skb_frag, int sta_id)
368{
369 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
370 struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
371
372 tx_cmd->sec_ctl = 0;
373
374 switch (keyinfo->cipher) {
375 case WLAN_CIPHER_SUITE_CCMP:
376 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
377 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
378 D_TX("tx_cmd with AES hwcrypto\n");
379 break;
380
381 case WLAN_CIPHER_SUITE_TKIP:
382 break;
383
384 case WLAN_CIPHER_SUITE_WEP104:
385 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
386 /* fall through */
387 case WLAN_CIPHER_SUITE_WEP40:
388 tx_cmd->sec_ctl |=
389 TX_CMD_SEC_WEP | (info->control.hw_key->
390 hw_key_idx & TX_CMD_SEC_MSK) <<
391 TX_CMD_SEC_SHIFT;
392
393 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
394
395 D_TX("Configuring packet for WEP encryption " "with key %d\n",
396 info->control.hw_key->hw_key_idx);
397 break;
398
399 default:
400 IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
401 break;
402 }
403}
404
405/*
406 * handle build C_TX command notification.
407 */
408static void
409il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
410 struct ieee80211_tx_info *info,
411 struct ieee80211_hdr *hdr, u8 std_id)
412{
413 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
414 __le32 tx_flags = tx_cmd->tx_flags;
415 __le16 fc = hdr->frame_control;
416
417 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
418 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
419 tx_flags |= TX_CMD_FLG_ACK_MSK;
420 if (ieee80211_is_mgmt(fc))
421 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
422 if (ieee80211_is_probe_resp(fc) &&
423 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
424 tx_flags |= TX_CMD_FLG_TSF_MSK;
425 } else {
426 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
427 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
428 }
429
430 tx_cmd->sta_id = std_id;
431 if (ieee80211_has_morefrags(fc))
432 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
433
434 if (ieee80211_is_data_qos(fc)) {
435 u8 *qc = ieee80211_get_qos_ctl(hdr);
436 tx_cmd->tid_tspec = qc[0] & 0xf;
437 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
438 } else {
439 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
440 }
441
442 il_tx_cmd_protection(il, info, fc, &tx_flags);
443
444 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
445 if (ieee80211_is_mgmt(fc)) {
446 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
448 else
449 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
450 } else {
451 tx_cmd->timeout.pm_frame_timeout = 0;
452 }
453
454 tx_cmd->driver_txop = 0;
455 tx_cmd->tx_flags = tx_flags;
456 tx_cmd->next_frame_len = 0;
457}
458
459/*
460 * start C_TX command process
461 */
462static int
463il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
464{
465 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
466 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
467 struct il3945_tx_cmd *tx_cmd;
468 struct il_tx_queue *txq = NULL;
469 struct il_queue *q = NULL;
470 struct il_device_cmd *out_cmd;
471 struct il_cmd_meta *out_meta;
472 dma_addr_t phys_addr;
473 dma_addr_t txcmd_phys;
474 int txq_id = skb_get_queue_mapping(skb);
475 u16 len, idx, hdr_len;
476 u8 id;
477 u8 unicast;
478 u8 sta_id;
479 u8 tid = 0;
480 __le16 fc;
481 u8 wait_write_ptr = 0;
482 unsigned long flags;
483
484 spin_lock_irqsave(&il->lock, flags);
485 if (il_is_rfkill(il)) {
486 D_DROP("Dropping - RF KILL\n");
487 goto drop_unlock;
488 }
489
490 if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
491 IL_INVALID_RATE) {
492 IL_ERR("ERROR: No TX rate available.\n");
493 goto drop_unlock;
494 }
495
496 unicast = !is_multicast_ether_addr(hdr->addr1);
497 id = 0;
498
499 fc = hdr->frame_control;
500
501#ifdef CONFIG_IWLEGACY_DEBUG
502 if (ieee80211_is_auth(fc))
503 D_TX("Sending AUTH frame\n");
504 else if (ieee80211_is_assoc_req(fc))
505 D_TX("Sending ASSOC frame\n");
506 else if (ieee80211_is_reassoc_req(fc))
507 D_TX("Sending REASSOC frame\n");
508#endif
509
510 spin_unlock_irqrestore(&il->lock, flags);
511
512 hdr_len = ieee80211_hdrlen(fc);
513
514 /* Find idx into station table for destination station */
515 sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
516 if (sta_id == IL_INVALID_STATION) {
517 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
518 goto drop;
519 }
520
521 D_RATE("station Id %d\n", sta_id);
522
523 if (ieee80211_is_data_qos(fc)) {
524 u8 *qc = ieee80211_get_qos_ctl(hdr);
525 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
526 if (unlikely(tid >= MAX_TID_COUNT))
527 goto drop;
528 }
529
530 /* Descriptor for chosen Tx queue */
531 txq = &il->txq[txq_id];
532 q = &txq->q;
533
534 if ((il_queue_space(q) < q->high_mark))
535 goto drop;
536
537 spin_lock_irqsave(&il->lock, flags);
538
539 idx = il_get_cmd_idx(q, q->write_ptr, 0);
540
541 /* Set up driver data for this TFD */
542 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
543 txq->txb[q->write_ptr].skb = skb;
544 txq->txb[q->write_ptr].ctx = &il->ctx;
545
546 /* Init first empty entry in queue's array of Tx/cmd buffers */
547 out_cmd = txq->cmd[idx];
548 out_meta = &txq->meta[idx];
549 tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
550 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
551 memset(tx_cmd, 0, sizeof(*tx_cmd));
552
553 /*
554 * Set up the Tx-command (not MAC!) header.
555 * Store the chosen Tx queue and TFD idx within the sequence field;
556 * after Tx, uCode's Tx response will return this value so driver can
557 * locate the frame within the tx queue and do post-tx processing.
558 */
559 out_cmd->hdr.cmd = C_TX;
560 out_cmd->hdr.sequence =
561 cpu_to_le16((u16)
562 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
563
564 /* Copy MAC header from skb into command buffer */
565 memcpy(tx_cmd->hdr, hdr, hdr_len);
566
567 if (info->control.hw_key)
568 il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
569
570 /* TODO need this for burst mode later on */
571 il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
572
573 /* set is_hcca to 0; it probably will never be implemented */
574 il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id, 0);
575
576 /* Total # bytes to be transmitted */
577 len = (u16) skb->len;
578 tx_cmd->len = cpu_to_le16(len);
579
580 il_dbg_log_tx_data_frame(il, len, hdr);
581 il_update_stats(il, true, fc, len);
582 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
583 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
584
585 if (!ieee80211_has_morefrags(hdr->frame_control)) {
586 txq->need_update = 1;
587 } else {
588 wait_write_ptr = 1;
589 txq->need_update = 0;
590 }
591
592 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
593 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
594 il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
595 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
596 ieee80211_hdrlen(fc));
597
598 /*
599 * Use the first empty entry in this queue's command buffer array
600 * to contain the Tx command and MAC header concatenated together
601 * (payload data will be in another buffer).
602 * Size of this varies, due to varying MAC header length.
603 * If end is not dword aligned, we'll have 2 extra bytes at the end
604 * of the MAC header (device reads on dword boundaries).
605 * We'll tell device about this padding later.
606 */
607 len =
608 sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
609 hdr_len;
610 len = (len + 3) & ~3;
611
612 /* Physical address of this Tx command's header (not MAC header!),
613 * within command buffer array. */
614 txcmd_phys =
615 pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
616 /* we do not map meta data ... so we can safely access address to
617 * provide to unmap command*/
618 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
619 dma_unmap_len_set(out_meta, len, len);
620
621 /* Add buffer containing Tx command and MAC(!) header to TFD's
622 * first entry */
623 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
624 0);
625
626 /* Set up TFD's 2nd entry to point directly to remainder of skb,
627 * if any (802.11 null frames have no payload). */
628 len = skb->len - hdr_len;
629 if (len) {
630 phys_addr =
631 pci_map_single(il->pci_dev, skb->data + hdr_len, len,
632 PCI_DMA_TODEVICE);
633 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
634 len, 0, U32_PAD(len));
635 }
636
637 /* Tell device the write idx *just past* this latest filled TFD */
638 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
639 il_txq_update_write_ptr(il, txq);
640 spin_unlock_irqrestore(&il->lock, flags);
641
642 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
643 if (wait_write_ptr) {
644 spin_lock_irqsave(&il->lock, flags);
645 txq->need_update = 1;
646 il_txq_update_write_ptr(il, txq);
647 spin_unlock_irqrestore(&il->lock, flags);
648 }
649
650 il_stop_queue(il, txq);
651 }
652
653 return 0;
654
655drop_unlock:
656 spin_unlock_irqrestore(&il->lock, flags);
657drop:
658 return -1;
659}
660
661static int
662il3945_get_measurement(struct il_priv *il,
663 struct ieee80211_measurement_params *params, u8 type)
664{
665 struct il_spectrum_cmd spectrum;
666 struct il_rx_pkt *pkt;
667 struct il_host_cmd cmd = {
668 .id = C_SPECTRUM_MEASUREMENT,
669 .data = (void *)&spectrum,
670 .flags = CMD_WANT_SKB,
671 };
672 u32 add_time = le64_to_cpu(params->start_time);
673 int rc;
674 int spectrum_resp_status;
675 int duration = le16_to_cpu(params->duration);
676 struct il_rxon_context *ctx = &il->ctx;
677
678 if (il_is_associated(il))
679 add_time =
680 il_usecs_to_beacons(il,
681 le64_to_cpu(params->start_time) -
682 il->_3945.last_tsf,
683 le16_to_cpu(ctx->timing.
684 beacon_interval));
685
686 memset(&spectrum, 0, sizeof(spectrum));
687
688 spectrum.channel_count = cpu_to_le16(1);
689 spectrum.flags =
690 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
691 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
692 cmd.len = sizeof(spectrum);
693 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
694
695 if (il_is_associated(il))
696 spectrum.start_time =
697 il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
698 le16_to_cpu(ctx->timing.
699 beacon_interval));
700 else
701 spectrum.start_time = 0;
702
703 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
704 spectrum.channels[0].channel = params->channel;
705 spectrum.channels[0].type = type;
706 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
707 spectrum.flags |=
708 RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
709 RXON_FLG_TGG_PROTECT_MSK;
710
711 rc = il_send_cmd_sync(il, &cmd);
712 if (rc)
713 return rc;
714
715 pkt = (struct il_rx_pkt *)cmd.reply_page;
716 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
717 IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
718 rc = -EIO;
719 }
720
721 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
722 switch (spectrum_resp_status) {
723 case 0: /* Command will be handled */
724 if (pkt->u.spectrum.id != 0xff) {
725 D_INFO("Replaced existing measurement: %d\n",
726 pkt->u.spectrum.id);
727 il->measurement_status &= ~MEASUREMENT_READY;
728 }
729 il->measurement_status |= MEASUREMENT_ACTIVE;
730 rc = 0;
731 break;
732
733 case 1: /* Command will not be handled */
734 rc = -EAGAIN;
735 break;
736 }
737
738 il_free_pages(il, cmd.reply_page);
739
740 return rc;
741}
742
743static void
744il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
745{
746 struct il_rx_pkt *pkt = rxb_addr(rxb);
747 struct il_alive_resp *palive;
748 struct delayed_work *pwork;
749
750 palive = &pkt->u.alive_frame;
751
752 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
753 palive->is_valid, palive->ver_type, palive->ver_subtype);
754
755 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
756 D_INFO("Initialization Alive received.\n");
757 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
758 sizeof(struct il_alive_resp));
759 pwork = &il->init_alive_start;
760 } else {
761 D_INFO("Runtime Alive received.\n");
762 memcpy(&il->card_alive, &pkt->u.alive_frame,
763 sizeof(struct il_alive_resp));
764 pwork = &il->alive_start;
765 il3945_disable_events(il);
766 }
767
768 /* We delay the ALIVE response by 5ms to
769 * give the HW RF Kill time to activate... */
770 if (palive->is_valid == UCODE_VALID_OK)
771 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
772 else
773 IL_WARN("uCode did not respond OK.\n");
774}
775
776static void
777il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
778{
779#ifdef CONFIG_IWLEGACY_DEBUG
780 struct il_rx_pkt *pkt = rxb_addr(rxb);
781#endif
782
783 D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
784}
785
786static void
787il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
788{
789 struct il_rx_pkt *pkt = rxb_addr(rxb);
790 struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
791#ifdef CONFIG_IWLEGACY_DEBUG
792 u8 rate = beacon->beacon_notify_hdr.rate;
793
794 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
795 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
796 beacon->beacon_notify_hdr.failure_frame,
797 le32_to_cpu(beacon->ibss_mgr_status),
798 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
799#endif
800
801 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
802
803}
804
805/* Handle notification from uCode that card's power state is changing
806 * due to software, hardware, or critical temperature RFKILL */
807static void
808il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
809{
810 struct il_rx_pkt *pkt = rxb_addr(rxb);
811 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
812 unsigned long status = il->status;
813
814 IL_WARN("Card state received: HW:%s SW:%s\n",
815 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
816 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
817
818 _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
819
820 if (flags & HW_CARD_DISABLED)
821 set_bit(S_RF_KILL_HW, &il->status);
822 else
823 clear_bit(S_RF_KILL_HW, &il->status);
824
825 il_scan_cancel(il);
826
827 if ((test_bit(S_RF_KILL_HW, &status) !=
828 test_bit(S_RF_KILL_HW, &il->status)))
829 wiphy_rfkill_set_hw_state(il->hw->wiphy,
830 test_bit(S_RF_KILL_HW, &il->status));
831 else
832 wake_up(&il->wait_command_queue);
833}
834
835/**
836 * il3945_setup_handlers - Initialize Rx handler callbacks
837 *
838 * Setup the RX handlers for each of the reply types sent from the uCode
839 * to the host.
840 *
841 * This function chains into the hardware specific files for them to setup
842 * any hardware specific handlers as well.
843 */
844static void
845il3945_setup_handlers(struct il_priv *il)
846{
847 il->handlers[N_ALIVE] = il3945_hdl_alive;
848 il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
849 il->handlers[N_ERROR] = il_hdl_error;
850 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
851 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
852 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
853 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
854 il->handlers[N_BEACON] = il3945_hdl_beacon;
855
856 /*
857 * The same handler is used for both the REPLY to a discrete
858 * stats request from the host as well as for the periodic
859 * stats notifications (after received beacons) from the uCode.
860 */
861 il->handlers[C_STATS] = il3945_hdl_c_stats;
862 il->handlers[N_STATS] = il3945_hdl_stats;
863
864 il_setup_rx_scan_handlers(il);
865 il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
866
867 /* Set up hardware specific Rx handlers */
868 il3945_hw_handler_setup(il);
869}
870
871/************************** RX-FUNCTIONS ****************************/
872/*
873 * Rx theory of operation
874 *
875 * The host allocates 32 DMA target addresses and passes the host address
876 * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
877 * 0 to 31
878 *
879 * Rx Queue Indexes
880 * The host/firmware share two idx registers for managing the Rx buffers.
881 *
882 * The READ idx maps to the first position that the firmware may be writing
883 * to -- the driver can read up to (but not including) this position and get
884 * good data.
885 * The READ idx is managed by the firmware once the card is enabled.
886 *
887 * The WRITE idx maps to the last position the driver has read from -- the
888 * position preceding WRITE is the last slot the firmware can place a packet.
889 *
890 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
891 * WRITE = READ.
892 *
893 * During initialization, the host sets up the READ queue position to the first
894 * IDX position, and WRITE to the last (READ - 1 wrapped)
895 *
896 * When the firmware places a packet in a buffer, it will advance the READ idx
897 * and fire the RX interrupt. The driver can then query the READ idx and
898 * process as many packets as possible, moving the WRITE idx forward as it
899 * resets the Rx queue buffers with new memory.
900 *
901 * The management in the driver is as follows:
902 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
903 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
904 * to replenish the iwl->rxq->rx_free.
905 * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
906 * iwl->rxq is replenished and the READ IDX is updated (updating the
907 * 'processed' and 'read' driver idxes as well)
908 * + A received packet is processed and handed to the kernel network stack,
909 * detached from the iwl->rxq. The driver 'processed' idx is updated.
910 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
911 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
912 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
913 * were enough free buffers and RX_STALLED is set it is cleared.
914 *
915 *
916 * Driver sequence:
917 *
918 * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
919 * il3945_rx_queue_restock
920 * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
921 * queue, updates firmware pointers, and updates
922 * the WRITE idx. If insufficient rx_free buffers
923 * are available, schedules il3945_rx_replenish
924 *
925 * -- enable interrupts --
926 * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
927 * READ IDX, detaching the SKB from the pool.
928 * Moves the packet buffer from queue to rx_used.
929 * Calls il3945_rx_queue_restock to refill any empty
930 * slots.
931 * ...
932 *
933 */
934
935/**
936 * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
937 */
938static inline __le32
939il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
940{
941 return cpu_to_le32((u32) dma_addr);
942}
943
944/**
945 * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
946 *
947 * If there are slots in the RX queue that need to be restocked,
948 * and we have free pre-allocated buffers, fill the ranks as much
949 * as we can, pulling from rx_free.
950 *
951 * This moves the 'write' idx forward to catch up with 'processed', and
952 * also updates the memory address in the firmware to reference the new
953 * target buffer.
954 */
955static void
956il3945_rx_queue_restock(struct il_priv *il)
957{
958 struct il_rx_queue *rxq = &il->rxq;
959 struct list_head *element;
960 struct il_rx_buf *rxb;
961 unsigned long flags;
962 int write;
963
964 spin_lock_irqsave(&rxq->lock, flags);
965 write = rxq->write & ~0x7;
966 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
967 /* Get next free Rx buffer, remove from free list */
968 element = rxq->rx_free.next;
969 rxb = list_entry(element, struct il_rx_buf, list);
970 list_del(element);
971
972 /* Point to Rx buffer via next RBD in circular buffer */
973 rxq->bd[rxq->write] =
974 il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
975 rxq->queue[rxq->write] = rxb;
976 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
977 rxq->free_count--;
978 }
979 spin_unlock_irqrestore(&rxq->lock, flags);
980 /* If the pre-allocated buffer pool is dropping low, schedule to
981 * refill it */
982 if (rxq->free_count <= RX_LOW_WATERMARK)
983 queue_work(il->workqueue, &il->rx_replenish);
984
985 /* If we've added more space for the firmware to place data, tell it.
986 * Increment device's write pointer in multiples of 8. */
987 if (rxq->write_actual != (rxq->write & ~0x7) ||
988 abs(rxq->write - rxq->read) > 7) {
989 spin_lock_irqsave(&rxq->lock, flags);
990 rxq->need_update = 1;
991 spin_unlock_irqrestore(&rxq->lock, flags);
992 il_rx_queue_update_write_ptr(il, rxq);
993 }
994}
995
996/**
997 * il3945_rx_replenish - Move all used packet from rx_used to rx_free
998 *
999 * When moving to rx_free an SKB is allocated for the slot.
1000 *
1001 * Also restock the Rx queue via il3945_rx_queue_restock.
1002 * This is called as a scheduled work item (except for during initialization)
1003 */
1004static void
1005il3945_rx_allocate(struct il_priv *il, gfp_t priority)
1006{
1007 struct il_rx_queue *rxq = &il->rxq;
1008 struct list_head *element;
1009 struct il_rx_buf *rxb;
1010 struct page *page;
1011 unsigned long flags;
1012 gfp_t gfp_mask = priority;
1013
1014 while (1) {
1015 spin_lock_irqsave(&rxq->lock, flags);
1016
1017 if (list_empty(&rxq->rx_used)) {
1018 spin_unlock_irqrestore(&rxq->lock, flags);
1019 return;
1020 }
1021 spin_unlock_irqrestore(&rxq->lock, flags);
1022
1023 if (rxq->free_count > RX_LOW_WATERMARK)
1024 gfp_mask |= __GFP_NOWARN;
1025
1026 if (il->hw_params.rx_page_order > 0)
1027 gfp_mask |= __GFP_COMP;
1028
1029 /* Alloc a new receive buffer */
1030 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
1031 if (!page) {
1032 if (net_ratelimit())
1033 D_INFO("Failed to allocate SKB buffer.\n");
1034 if (rxq->free_count <= RX_LOW_WATERMARK &&
1035 net_ratelimit())
1036 IL_ERR("Failed to allocate SKB buffer with %0x."
1037 "Only %u free buffers remaining.\n",
1038 priority, rxq->free_count);
1039 /* We don't reschedule replenish work here -- we will
1040 * call the restock method and if it still needs
1041 * more buffers it will schedule replenish */
1042 break;
1043 }
1044
1045 spin_lock_irqsave(&rxq->lock, flags);
1046 if (list_empty(&rxq->rx_used)) {
1047 spin_unlock_irqrestore(&rxq->lock, flags);
1048 __free_pages(page, il->hw_params.rx_page_order);
1049 return;
1050 }
1051 element = rxq->rx_used.next;
1052 rxb = list_entry(element, struct il_rx_buf, list);
1053 list_del(element);
1054 spin_unlock_irqrestore(&rxq->lock, flags);
1055
1056 rxb->page = page;
1057 /* Get physical address of RB/SKB */
1058 rxb->page_dma =
1059 pci_map_page(il->pci_dev, page, 0,
1060 PAGE_SIZE << il->hw_params.rx_page_order,
1061 PCI_DMA_FROMDEVICE);
1062
1063 spin_lock_irqsave(&rxq->lock, flags);
1064
1065 list_add_tail(&rxb->list, &rxq->rx_free);
1066 rxq->free_count++;
1067 il->alloc_rxb_page++;
1068
1069 spin_unlock_irqrestore(&rxq->lock, flags);
1070 }
1071}
1072
1073void
1074il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
1075{
1076 unsigned long flags;
1077 int i;
1078 spin_lock_irqsave(&rxq->lock, flags);
1079 INIT_LIST_HEAD(&rxq->rx_free);
1080 INIT_LIST_HEAD(&rxq->rx_used);
1081 /* Fill the rx_used queue with _all_ of the Rx buffers */
1082 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1083 /* In the reset function, these buffers may have been allocated
1084 * to an SKB, so we need to unmap and free potential storage */
1085 if (rxq->pool[i].page != NULL) {
1086 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1087 PAGE_SIZE << il->hw_params.rx_page_order,
1088 PCI_DMA_FROMDEVICE);
1089 __il_free_pages(il, rxq->pool[i].page);
1090 rxq->pool[i].page = NULL;
1091 }
1092 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1093 }
1094
1095 /* Set us so that we have processed and used all buffers, but have
1096 * not restocked the Rx queue with fresh buffers */
1097 rxq->read = rxq->write = 0;
1098 rxq->write_actual = 0;
1099 rxq->free_count = 0;
1100 spin_unlock_irqrestore(&rxq->lock, flags);
1101}
1102
1103void
1104il3945_rx_replenish(void *data)
1105{
1106 struct il_priv *il = data;
1107 unsigned long flags;
1108
1109 il3945_rx_allocate(il, GFP_KERNEL);
1110
1111 spin_lock_irqsave(&il->lock, flags);
1112 il3945_rx_queue_restock(il);
1113 spin_unlock_irqrestore(&il->lock, flags);
1114}
1115
1116static void
1117il3945_rx_replenish_now(struct il_priv *il)
1118{
1119 il3945_rx_allocate(il, GFP_ATOMIC);
1120
1121 il3945_rx_queue_restock(il);
1122}
1123
1124/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1125 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1126 * This free routine walks the list of POOL entries and if SKB is set to
1127 * non NULL it is unmapped and freed
1128 */
1129static void
1130il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
1131{
1132 int i;
1133 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1134 if (rxq->pool[i].page != NULL) {
1135 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
1136 PAGE_SIZE << il->hw_params.rx_page_order,
1137 PCI_DMA_FROMDEVICE);
1138 __il_free_pages(il, rxq->pool[i].page);
1139 rxq->pool[i].page = NULL;
1140 }
1141 }
1142
1143 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1144 rxq->bd_dma);
1145 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
1146 rxq->rb_stts, rxq->rb_stts_dma);
1147 rxq->bd = NULL;
1148 rxq->rb_stts = NULL;
1149}
1150
1151/* Convert linear signal-to-noise ratio into dB */
1152static u8 ratio2dB[100] = {
1153/* 0 1 2 3 4 5 6 7 8 9 */
1154 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1155 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1156 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1157 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1158 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1159 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1160 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1161 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1162 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1163 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1164};
1165
1166/* Calculates a relative dB value from a ratio of linear
1167 * (i.e. not dB) signal levels.
1168 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1169int
1170il3945_calc_db_from_ratio(int sig_ratio)
1171{
1172 /* 1000:1 or higher just report as 60 dB */
1173 if (sig_ratio >= 1000)
1174 return 60;
1175
1176 /* 100:1 or higher, divide by 10 and use table,
1177 * add 20 dB to make up for divide by 10 */
1178 if (sig_ratio >= 100)
1179 return 20 + (int)ratio2dB[sig_ratio / 10];
1180
1181 /* We shouldn't see this */
1182 if (sig_ratio < 1)
1183 return 0;
1184
1185 /* Use table for ratios 1:1 - 99:1 */
1186 return (int)ratio2dB[sig_ratio];
1187}
1188
1189/**
1190 * il3945_rx_handle - Main entry function for receiving responses from uCode
1191 *
1192 * Uses the il->handlers callback function array to invoke
1193 * the appropriate handlers, including command responses,
1194 * frame-received notifications, and other notifications.
1195 */
1196static void
1197il3945_rx_handle(struct il_priv *il)
1198{
1199 struct il_rx_buf *rxb;
1200 struct il_rx_pkt *pkt;
1201 struct il_rx_queue *rxq = &il->rxq;
1202 u32 r, i;
1203 int reclaim;
1204 unsigned long flags;
1205 u8 fill_rx = 0;
1206 u32 count = 8;
1207 int total_empty = 0;
1208
1209 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
1210 * buffer that the driver may process (last buffer filled by ucode). */
1211 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1212 i = rxq->read;
1213
1214 /* calculate total frames need to be restock after handling RX */
1215 total_empty = r - rxq->write_actual;
1216 if (total_empty < 0)
1217 total_empty += RX_QUEUE_SIZE;
1218
1219 if (total_empty > (RX_QUEUE_SIZE / 2))
1220 fill_rx = 1;
1221 /* Rx interrupt, but nothing sent from uCode */
1222 if (i == r)
1223 D_RX("r = %d, i = %d\n", r, i);
1224
1225 while (i != r) {
1226 int len;
1227
1228 rxb = rxq->queue[i];
1229
1230 /* If an RXB doesn't have a Rx queue slot associated with it,
1231 * then a bug has been introduced in the queue refilling
1232 * routines -- catch it here */
1233 BUG_ON(rxb == NULL);
1234
1235 rxq->queue[i] = NULL;
1236
1237 pci_unmap_page(il->pci_dev, rxb->page_dma,
1238 PAGE_SIZE << il->hw_params.rx_page_order,
1239 PCI_DMA_FROMDEVICE);
1240 pkt = rxb_addr(rxb);
1241
1242 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
1243 len += sizeof(u32); /* account for status word */
1244
1245 /* Reclaim a command buffer only if this packet is a response
1246 * to a (driver-originated) command.
1247 * If the packet (e.g. Rx frame) originated from uCode,
1248 * there is no command buffer to reclaim.
1249 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1250 * but apparently a few don't get set; catch them here. */
1251 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1252 pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
1253
1254 /* Based on type of command response or notification,
1255 * handle those that need handling via function in
1256 * handlers table. See il3945_setup_handlers() */
1257 if (il->handlers[pkt->hdr.cmd]) {
1258 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
1259 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1260 il->isr_stats.handlers[pkt->hdr.cmd]++;
1261 il->handlers[pkt->hdr.cmd] (il, rxb);
1262 } else {
1263 /* No handling needed */
1264 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
1265 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1266 }
1267
1268 /*
1269 * XXX: After here, we should always check rxb->page
1270 * against NULL before touching it or its virtual
1271 * memory (pkt). Because some handler might have
1272 * already taken or freed the pages.
1273 */
1274
1275 if (reclaim) {
1276 /* Invoke any callbacks, transfer the buffer to caller,
1277 * and fire off the (possibly) blocking il_send_cmd()
1278 * as we reclaim the driver command queue */
1279 if (rxb->page)
1280 il_tx_cmd_complete(il, rxb);
1281 else
1282 IL_WARN("Claim null rxb?\n");
1283 }
1284
1285 /* Reuse the page if possible. For notification packets and
1286 * SKBs that fail to Rx correctly, add them back into the
1287 * rx_free list for reuse later. */
1288 spin_lock_irqsave(&rxq->lock, flags);
1289 if (rxb->page != NULL) {
1290 rxb->page_dma =
1291 pci_map_page(il->pci_dev, rxb->page, 0,
1292 PAGE_SIZE << il->hw_params.
1293 rx_page_order, PCI_DMA_FROMDEVICE);
1294 list_add_tail(&rxb->list, &rxq->rx_free);
1295 rxq->free_count++;
1296 } else
1297 list_add_tail(&rxb->list, &rxq->rx_used);
1298
1299 spin_unlock_irqrestore(&rxq->lock, flags);
1300
1301 i = (i + 1) & RX_QUEUE_MASK;
1302 /* If there are a lot of unused frames,
1303 * restock the Rx queue so ucode won't assert. */
1304 if (fill_rx) {
1305 count++;
1306 if (count >= 8) {
1307 rxq->read = i;
1308 il3945_rx_replenish_now(il);
1309 count = 0;
1310 }
1311 }
1312 }
1313
1314 /* Backtrack one entry */
1315 rxq->read = i;
1316 if (fill_rx)
1317 il3945_rx_replenish_now(il);
1318 else
1319 il3945_rx_queue_restock(il);
1320}
1321
1322/* call this function to flush any scheduled tasklet */
1323static inline void
1324il3945_synchronize_irq(struct il_priv *il)
1325{
1326 /* wait to make sure we flush pending tasklet */
1327 synchronize_irq(il->pci_dev->irq);
1328 tasklet_kill(&il->irq_tasklet);
1329}
1330
1331static const char *
1332il3945_desc_lookup(int i)
1333{
1334 switch (i) {
1335 case 1:
1336 return "FAIL";
1337 case 2:
1338 return "BAD_PARAM";
1339 case 3:
1340 return "BAD_CHECKSUM";
1341 case 4:
1342 return "NMI_INTERRUPT";
1343 case 5:
1344 return "SYSASSERT";
1345 case 6:
1346 return "FATAL_ERROR";
1347 }
1348
1349 return "UNKNOWN";
1350}
1351
1352#define ERROR_START_OFFSET (1 * sizeof(u32))
1353#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1354
1355void
1356il3945_dump_nic_error_log(struct il_priv *il)
1357{
1358 u32 i;
1359 u32 desc, time, count, base, data1;
1360 u32 blink1, blink2, ilink1, ilink2;
1361
1362 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
1363
1364 if (!il3945_hw_valid_rtc_data_addr(base)) {
1365 IL_ERR("Not valid error log pointer 0x%08X\n", base);
1366 return;
1367 }
1368
1369 count = il_read_targ_mem(il, base);
1370
1371 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1372 IL_ERR("Start IWL Error Log Dump:\n");
1373 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
1374 }
1375
1376 IL_ERR("Desc Time asrtPC blink2 "
1377 "ilink1 nmiPC Line\n");
1378 for (i = ERROR_START_OFFSET;
1379 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1380 i += ERROR_ELEM_SIZE) {
1381 desc = il_read_targ_mem(il, base + i);
1382 time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
1383 blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
1384 blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
1385 ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
1386 ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
1387 data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
1388
1389 IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1390 il3945_desc_lookup(desc), desc, time, blink1, blink2,
1391 ilink1, ilink2, data1);
1392 }
1393}
1394
1395static void
1396il3945_irq_tasklet(struct il_priv *il)
1397{
1398 u32 inta, handled = 0;
1399 u32 inta_fh;
1400 unsigned long flags;
1401#ifdef CONFIG_IWLEGACY_DEBUG
1402 u32 inta_mask;
1403#endif
1404
1405 spin_lock_irqsave(&il->lock, flags);
1406
1407 /* Ack/clear/reset pending uCode interrupts.
1408 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1409 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1410 inta = _il_rd(il, CSR_INT);
1411 _il_wr(il, CSR_INT, inta);
1412
1413 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1414 * Any new interrupts that happen after this, either while we're
1415 * in this tasklet, or later, will show up in next ISR/tasklet. */
1416 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1417 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
1418
1419#ifdef CONFIG_IWLEGACY_DEBUG
1420 if (il_get_debug_level(il) & IL_DL_ISR) {
1421 /* just for debug */
1422 inta_mask = _il_rd(il, CSR_INT_MASK);
1423 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
1424 inta_mask, inta_fh);
1425 }
1426#endif
1427
1428 spin_unlock_irqrestore(&il->lock, flags);
1429
1430 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1431 * atomic, make sure that inta covers all the interrupts that
1432 * we've discovered, even if FH interrupt came in just after
1433 * reading CSR_INT. */
1434 if (inta_fh & CSR39_FH_INT_RX_MASK)
1435 inta |= CSR_INT_BIT_FH_RX;
1436 if (inta_fh & CSR39_FH_INT_TX_MASK)
1437 inta |= CSR_INT_BIT_FH_TX;
1438
1439 /* Now service all interrupt bits discovered above. */
1440 if (inta & CSR_INT_BIT_HW_ERR) {
1441 IL_ERR("Hardware error detected. Restarting.\n");
1442
1443 /* Tell the device to stop sending interrupts */
1444 il_disable_interrupts(il);
1445
1446 il->isr_stats.hw++;
1447 il_irq_handle_error(il);
1448
1449 handled |= CSR_INT_BIT_HW_ERR;
1450
1451 return;
1452 }
1453#ifdef CONFIG_IWLEGACY_DEBUG
1454 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1455 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1456 if (inta & CSR_INT_BIT_SCD) {
1457 D_ISR("Scheduler finished to transmit "
1458 "the frame/frames.\n");
1459 il->isr_stats.sch++;
1460 }
1461
1462 /* Alive notification via Rx interrupt will do the real work */
1463 if (inta & CSR_INT_BIT_ALIVE) {
1464 D_ISR("Alive interrupt\n");
1465 il->isr_stats.alive++;
1466 }
1467 }
1468#endif
1469 /* Safely ignore these bits for debug checks below */
1470 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1471
1472 /* Error detected by uCode */
1473 if (inta & CSR_INT_BIT_SW_ERR) {
1474 IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
1475 inta);
1476 il->isr_stats.sw++;
1477 il_irq_handle_error(il);
1478 handled |= CSR_INT_BIT_SW_ERR;
1479 }
1480
1481 /* uCode wakes up after power-down sleep */
1482 if (inta & CSR_INT_BIT_WAKEUP) {
1483 D_ISR("Wakeup interrupt\n");
1484 il_rx_queue_update_write_ptr(il, &il->rxq);
1485 il_txq_update_write_ptr(il, &il->txq[0]);
1486 il_txq_update_write_ptr(il, &il->txq[1]);
1487 il_txq_update_write_ptr(il, &il->txq[2]);
1488 il_txq_update_write_ptr(il, &il->txq[3]);
1489 il_txq_update_write_ptr(il, &il->txq[4]);
1490 il_txq_update_write_ptr(il, &il->txq[5]);
1491
1492 il->isr_stats.wakeup++;
1493 handled |= CSR_INT_BIT_WAKEUP;
1494 }
1495
1496 /* All uCode command responses, including Tx command responses,
1497 * Rx "responses" (frame-received notification), and other
1498 * notifications from uCode come through here*/
1499 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1500 il3945_rx_handle(il);
1501 il->isr_stats.rx++;
1502 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1503 }
1504
1505 if (inta & CSR_INT_BIT_FH_TX) {
1506 D_ISR("Tx interrupt\n");
1507 il->isr_stats.tx++;
1508
1509 _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
1510 il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
1511 handled |= CSR_INT_BIT_FH_TX;
1512 }
1513
1514 if (inta & ~handled) {
1515 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1516 il->isr_stats.unhandled++;
1517 }
1518
1519 if (inta & ~il->inta_mask) {
1520 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
1521 inta & ~il->inta_mask);
1522 IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
1523 }
1524
1525 /* Re-enable all interrupts */
1526 /* only Re-enable if disabled by irq */
1527 if (test_bit(S_INT_ENABLED, &il->status))
1528 il_enable_interrupts(il);
1529
1530#ifdef CONFIG_IWLEGACY_DEBUG
1531 if (il_get_debug_level(il) & (IL_DL_ISR)) {
1532 inta = _il_rd(il, CSR_INT);
1533 inta_mask = _il_rd(il, CSR_INT_MASK);
1534 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
1535 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1536 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1537 }
1538#endif
1539}
1540
1541static int
1542il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
1543 u8 is_active, u8 n_probes,
1544 struct il3945_scan_channel *scan_ch,
1545 struct ieee80211_vif *vif)
1546{
1547 struct ieee80211_channel *chan;
1548 const struct ieee80211_supported_band *sband;
1549 const struct il_channel_info *ch_info;
1550 u16 passive_dwell = 0;
1551 u16 active_dwell = 0;
1552 int added, i;
1553
1554 sband = il_get_hw_mode(il, band);
1555 if (!sband)
1556 return 0;
1557
1558 active_dwell = il_get_active_dwell_time(il, band, n_probes);
1559 passive_dwell = il_get_passive_dwell_time(il, band, vif);
1560
1561 if (passive_dwell <= active_dwell)
1562 passive_dwell = active_dwell + 1;
1563
1564 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
1565 chan = il->scan_request->channels[i];
1566
1567 if (chan->band != band)
1568 continue;
1569
1570 scan_ch->channel = chan->hw_value;
1571
1572 ch_info = il_get_channel_info(il, band, scan_ch->channel);
1573 if (!il_is_channel_valid(ch_info)) {
1574 D_SCAN("Channel %d is INVALID for this band.\n",
1575 scan_ch->channel);
1576 continue;
1577 }
1578
1579 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1580 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1581 /* If passive , set up for auto-switch
1582 * and use long active_dwell time.
1583 */
1584 if (!is_active || il_is_channel_passive(ch_info) ||
1585 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1586 scan_ch->type = 0; /* passive */
1587 if (IL_UCODE_API(il->ucode_ver) == 1)
1588 scan_ch->active_dwell =
1589 cpu_to_le16(passive_dwell - 1);
1590 } else {
1591 scan_ch->type = 1; /* active */
1592 }
1593
1594 /* Set direct probe bits. These may be used both for active
1595 * scan channels (probes gets sent right away),
1596 * or for passive channels (probes get se sent only after
1597 * hearing clear Rx packet).*/
1598 if (IL_UCODE_API(il->ucode_ver) >= 2) {
1599 if (n_probes)
1600 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1601 } else {
1602 /* uCode v1 does not allow setting direct probe bits on
1603 * passive channel. */
1604 if ((scan_ch->type & 1) && n_probes)
1605 scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
1606 }
1607
1608 /* Set txpower levels to defaults */
1609 scan_ch->tpc.dsp_atten = 110;
1610 /* scan_pwr_info->tpc.dsp_atten; */
1611
1612 /*scan_pwr_info->tpc.tx_gain; */
1613 if (band == IEEE80211_BAND_5GHZ)
1614 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1615 else {
1616 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1617 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1618 * power level:
1619 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1620 */
1621 }
1622
1623 D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
1624 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1625 (scan_ch->type & 1) ? active_dwell : passive_dwell);
1626
1627 scan_ch++;
1628 added++;
1629 }
1630
1631 D_SCAN("total channels to scan %d\n", added);
1632 return added;
1633}
1634
1635static void
1636il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
1637{
1638 int i;
1639
1640 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
1641 rates[i].bitrate = il3945_rates[i].ieee * 5;
1642 rates[i].hw_value = i; /* Rate scaling will work on idxes */
1643 rates[i].hw_value_short = i;
1644 rates[i].flags = 0;
1645 if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
1646 /*
1647 * If CCK != 1M then set short preamble rate flag.
1648 */
1649 rates[i].flags |=
1650 (il3945_rates[i].plcp ==
1651 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1652 }
1653 }
1654}
1655
1656/******************************************************************************
1657 *
1658 * uCode download functions
1659 *
1660 ******************************************************************************/
1661
1662static void
1663il3945_dealloc_ucode_pci(struct il_priv *il)
1664{
1665 il_free_fw_desc(il->pci_dev, &il->ucode_code);
1666 il_free_fw_desc(il->pci_dev, &il->ucode_data);
1667 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
1668 il_free_fw_desc(il->pci_dev, &il->ucode_init);
1669 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
1670 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
1671}
1672
1673/**
1674 * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
1675 * looking at all data.
1676 */
1677static int
1678il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
1679{
1680 u32 val;
1681 u32 save_len = len;
1682 int rc = 0;
1683 u32 errcnt;
1684
1685 D_INFO("ucode inst image size is %u\n", len);
1686
1687 il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
1688
1689 errcnt = 0;
1690 for (; len > 0; len -= sizeof(u32), image++) {
1691 /* read data comes through single port, auto-incr addr */
1692 /* NOTE: Use the debugless read so we don't flood kernel log
1693 * if IL_DL_IO is set */
1694 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1695 if (val != le32_to_cpu(*image)) {
1696 IL_ERR("uCode INST section is invalid at "
1697 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1698 save_len - len, val, le32_to_cpu(*image));
1699 rc = -EIO;
1700 errcnt++;
1701 if (errcnt >= 20)
1702 break;
1703 }
1704 }
1705
1706 if (!errcnt)
1707 D_INFO("ucode image in INSTRUCTION memory is good\n");
1708
1709 return rc;
1710}
1711
1712/**
1713 * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1714 * using sample data 100 bytes apart. If these sample points are good,
1715 * it's a pretty good bet that everything between them is good, too.
1716 */
1717static int
1718il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
1719{
1720 u32 val;
1721 int rc = 0;
1722 u32 errcnt = 0;
1723 u32 i;
1724
1725 D_INFO("ucode inst image size is %u\n", len);
1726
1727 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
1728 /* read data comes through single port, auto-incr addr */
1729 /* NOTE: Use the debugless read so we don't flood kernel log
1730 * if IL_DL_IO is set */
1731 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
1732 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
1733 if (val != le32_to_cpu(*image)) {
1734#if 0 /* Enable this if you want to see details */
1735 IL_ERR("uCode INST section is invalid at "
1736 "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
1737 *image);
1738#endif
1739 rc = -EIO;
1740 errcnt++;
1741 if (errcnt >= 3)
1742 break;
1743 }
1744 }
1745
1746 return rc;
1747}
1748
1749/**
1750 * il3945_verify_ucode - determine which instruction image is in SRAM,
1751 * and verify its contents
1752 */
1753static int
1754il3945_verify_ucode(struct il_priv *il)
1755{
1756 __le32 *image;
1757 u32 len;
1758 int rc = 0;
1759
1760 /* Try bootstrap */
1761 image = (__le32 *) il->ucode_boot.v_addr;
1762 len = il->ucode_boot.len;
1763 rc = il3945_verify_inst_sparse(il, image, len);
1764 if (rc == 0) {
1765 D_INFO("Bootstrap uCode is good in inst SRAM\n");
1766 return 0;
1767 }
1768
1769 /* Try initialize */
1770 image = (__le32 *) il->ucode_init.v_addr;
1771 len = il->ucode_init.len;
1772 rc = il3945_verify_inst_sparse(il, image, len);
1773 if (rc == 0) {
1774 D_INFO("Initialize uCode is good in inst SRAM\n");
1775 return 0;
1776 }
1777
1778 /* Try runtime/protocol */
1779 image = (__le32 *) il->ucode_code.v_addr;
1780 len = il->ucode_code.len;
1781 rc = il3945_verify_inst_sparse(il, image, len);
1782 if (rc == 0) {
1783 D_INFO("Runtime uCode is good in inst SRAM\n");
1784 return 0;
1785 }
1786
1787 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1788
1789 /* Since nothing seems to match, show first several data entries in
1790 * instruction SRAM, so maybe visual inspection will give a clue.
1791 * Selection of bootstrap image (vs. other images) is arbitrary. */
1792 image = (__le32 *) il->ucode_boot.v_addr;
1793 len = il->ucode_boot.len;
1794 rc = il3945_verify_inst_full(il, image, len);
1795
1796 return rc;
1797}
1798
1799static void
1800il3945_nic_start(struct il_priv *il)
1801{
1802 /* Remove all resets to allow NIC to operate */
1803 _il_wr(il, CSR_RESET, 0);
1804}
1805
1806#define IL3945_UCODE_GET(item) \
1807static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
1808{ \
1809 return le32_to_cpu(ucode->v1.item); \
1810}
1811
1812static u32
1813il3945_ucode_get_header_size(u32 api_ver)
1814{
1815 return 24;
1816}
1817
1818static u8 *
1819il3945_ucode_get_data(const struct il_ucode_header *ucode)
1820{
1821 return (u8 *) ucode->v1.data;
1822}
1823
1824IL3945_UCODE_GET(inst_size);
1825IL3945_UCODE_GET(data_size);
1826IL3945_UCODE_GET(init_size);
1827IL3945_UCODE_GET(init_data_size);
1828IL3945_UCODE_GET(boot_size);
1829
1830/**
1831 * il3945_read_ucode - Read uCode images from disk file.
1832 *
1833 * Copy into buffers for card to fetch via bus-mastering
1834 */
1835static int
1836il3945_read_ucode(struct il_priv *il)
1837{
1838 const struct il_ucode_header *ucode;
1839 int ret = -EINVAL, idx;
1840 const struct firmware *ucode_raw;
1841 /* firmware file name contains uCode/driver compatibility version */
1842 const char *name_pre = il->cfg->fw_name_pre;
1843 const unsigned int api_max = il->cfg->ucode_api_max;
1844 const unsigned int api_min = il->cfg->ucode_api_min;
1845 char buf[25];
1846 u8 *src;
1847 size_t len;
1848 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1849
1850 /* Ask kernel firmware_class module to get the boot firmware off disk.
1851 * request_firmware() is synchronous, file is in memory on return. */
1852 for (idx = api_max; idx >= api_min; idx--) {
1853 sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
1854 ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
1855 if (ret < 0) {
1856 IL_ERR("%s firmware file req failed: %d\n", buf, ret);
1857 if (ret == -ENOENT)
1858 continue;
1859 else
1860 goto error;
1861 } else {
1862 if (idx < api_max)
1863 IL_ERR("Loaded firmware %s, "
1864 "which is deprecated. "
1865 " Please use API v%u instead.\n", buf,
1866 api_max);
1867 D_INFO("Got firmware '%s' file "
1868 "(%zd bytes) from disk\n", buf, ucode_raw->size);
1869 break;
1870 }
1871 }
1872
1873 if (ret < 0)
1874 goto error;
1875
1876 /* Make sure that we got at least our header! */
1877 if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
1878 IL_ERR("File size way too small!\n");
1879 ret = -EINVAL;
1880 goto err_release;
1881 }
1882
1883 /* Data from ucode file: header followed by uCode images */
1884 ucode = (struct il_ucode_header *)ucode_raw->data;
1885
1886 il->ucode_ver = le32_to_cpu(ucode->ver);
1887 api_ver = IL_UCODE_API(il->ucode_ver);
1888 inst_size = il3945_ucode_get_inst_size(ucode);
1889 data_size = il3945_ucode_get_data_size(ucode);
1890 init_size = il3945_ucode_get_init_size(ucode);
1891 init_data_size = il3945_ucode_get_init_data_size(ucode);
1892 boot_size = il3945_ucode_get_boot_size(ucode);
1893 src = il3945_ucode_get_data(ucode);
1894
1895 /* api_ver should match the api version forming part of the
1896 * firmware filename ... but we don't check for that and only rely
1897 * on the API version read from firmware header from here on forward */
1898
1899 if (api_ver < api_min || api_ver > api_max) {
1900 IL_ERR("Driver unable to support your firmware API. "
1901 "Driver supports v%u, firmware is v%u.\n", api_max,
1902 api_ver);
1903 il->ucode_ver = 0;
1904 ret = -EINVAL;
1905 goto err_release;
1906 }
1907 if (api_ver != api_max)
1908 IL_ERR("Firmware has old API version. Expected %u, "
1909 "got %u. New firmware can be obtained "
1910 "from http://www.intellinuxwireless.org.\n", api_max,
1911 api_ver);
1912
1913 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
1914 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
1915 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
1916
1917 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
1918 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
1919 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
1920 IL_UCODE_SERIAL(il->ucode_ver));
1921
1922 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
1923 D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
1924 D_INFO("f/w package hdr runtime data size = %u\n", data_size);
1925 D_INFO("f/w package hdr init inst size = %u\n", init_size);
1926 D_INFO("f/w package hdr init data size = %u\n", init_data_size);
1927 D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
1928
1929 /* Verify size of file vs. image size info in file's header */
1930 if (ucode_raw->size !=
1931 il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
1932 init_size + init_data_size + boot_size) {
1933
1934 D_INFO("uCode file size %zd does not match expected size\n",
1935 ucode_raw->size);
1936 ret = -EINVAL;
1937 goto err_release;
1938 }
1939
1940 /* Verify that uCode images will fit in card's SRAM */
1941 if (inst_size > IL39_MAX_INST_SIZE) {
1942 D_INFO("uCode instr len %d too large to fit in\n", inst_size);
1943 ret = -EINVAL;
1944 goto err_release;
1945 }
1946
1947 if (data_size > IL39_MAX_DATA_SIZE) {
1948 D_INFO("uCode data len %d too large to fit in\n", data_size);
1949 ret = -EINVAL;
1950 goto err_release;
1951 }
1952 if (init_size > IL39_MAX_INST_SIZE) {
1953 D_INFO("uCode init instr len %d too large to fit in\n",
1954 init_size);
1955 ret = -EINVAL;
1956 goto err_release;
1957 }
1958 if (init_data_size > IL39_MAX_DATA_SIZE) {
1959 D_INFO("uCode init data len %d too large to fit in\n",
1960 init_data_size);
1961 ret = -EINVAL;
1962 goto err_release;
1963 }
1964 if (boot_size > IL39_MAX_BSM_SIZE) {
1965 D_INFO("uCode boot instr len %d too large to fit in\n",
1966 boot_size);
1967 ret = -EINVAL;
1968 goto err_release;
1969 }
1970
1971 /* Allocate ucode buffers for card's bus-master loading ... */
1972
1973 /* Runtime instructions and 2 copies of data:
1974 * 1) unmodified from disk
1975 * 2) backup cache for save/restore during power-downs */
1976 il->ucode_code.len = inst_size;
1977 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
1978
1979 il->ucode_data.len = data_size;
1980 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
1981
1982 il->ucode_data_backup.len = data_size;
1983 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
1984
1985 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
1986 !il->ucode_data_backup.v_addr)
1987 goto err_pci_alloc;
1988
1989 /* Initialization instructions and data */
1990 if (init_size && init_data_size) {
1991 il->ucode_init.len = init_size;
1992 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
1993
1994 il->ucode_init_data.len = init_data_size;
1995 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
1996
1997 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
1998 goto err_pci_alloc;
1999 }
2000
2001 /* Bootstrap (instructions only, no data) */
2002 if (boot_size) {
2003 il->ucode_boot.len = boot_size;
2004 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
2005
2006 if (!il->ucode_boot.v_addr)
2007 goto err_pci_alloc;
2008 }
2009
2010 /* Copy images into buffers for card's bus-master reads ... */
2011
2012 /* Runtime instructions (first block of data in file) */
2013 len = inst_size;
2014 D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
2015 memcpy(il->ucode_code.v_addr, src, len);
2016 src += len;
2017
2018 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2019 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
2020
2021 /* Runtime data (2nd block)
2022 * NOTE: Copy into backup buffer will be done in il3945_up() */
2023 len = data_size;
2024 D_INFO("Copying (but not loading) uCode data len %zd\n", len);
2025 memcpy(il->ucode_data.v_addr, src, len);
2026 memcpy(il->ucode_data_backup.v_addr, src, len);
2027 src += len;
2028
2029 /* Initialization instructions (3rd block) */
2030 if (init_size) {
2031 len = init_size;
2032 D_INFO("Copying (but not loading) init instr len %zd\n", len);
2033 memcpy(il->ucode_init.v_addr, src, len);
2034 src += len;
2035 }
2036
2037 /* Initialization data (4th block) */
2038 if (init_data_size) {
2039 len = init_data_size;
2040 D_INFO("Copying (but not loading) init data len %zd\n", len);
2041 memcpy(il->ucode_init_data.v_addr, src, len);
2042 src += len;
2043 }
2044
2045 /* Bootstrap instructions (5th block) */
2046 len = boot_size;
2047 D_INFO("Copying (but not loading) boot instr len %zd\n", len);
2048 memcpy(il->ucode_boot.v_addr, src, len);
2049
2050 /* We have our copies now, allow OS release its copies */
2051 release_firmware(ucode_raw);
2052 return 0;
2053
2054err_pci_alloc:
2055 IL_ERR("failed to allocate pci memory\n");
2056 ret = -ENOMEM;
2057 il3945_dealloc_ucode_pci(il);
2058
2059err_release:
2060 release_firmware(ucode_raw);
2061
2062error:
2063 return ret;
2064}
2065
2066/**
2067 * il3945_set_ucode_ptrs - Set uCode address location
2068 *
2069 * Tell initialization uCode where to find runtime uCode.
2070 *
2071 * BSM registers initially contain pointers to initialization uCode.
2072 * We need to replace them to load runtime uCode inst and data,
2073 * and to save runtime data when powering down.
2074 */
2075static int
2076il3945_set_ucode_ptrs(struct il_priv *il)
2077{
2078 dma_addr_t pinst;
2079 dma_addr_t pdata;
2080
2081 /* bits 31:0 for 3945 */
2082 pinst = il->ucode_code.p_addr;
2083 pdata = il->ucode_data_backup.p_addr;
2084
2085 /* Tell bootstrap uCode where to find image to load */
2086 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2087 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2088 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
2089
2090 /* Inst byte count must be last to set up, bit 31 signals uCode
2091 * that all new ptr/size info is in place */
2092 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
2093 il->ucode_code.len | BSM_DRAM_INST_LOAD);
2094
2095 D_INFO("Runtime uCode pointers are set.\n");
2096
2097 return 0;
2098}
2099
2100/**
2101 * il3945_init_alive_start - Called after N_ALIVE notification received
2102 *
2103 * Called after N_ALIVE notification received from "initialize" uCode.
2104 *
2105 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2106 */
2107static void
2108il3945_init_alive_start(struct il_priv *il)
2109{
2110 /* Check alive response for "valid" sign from uCode */
2111 if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
2112 /* We had an error bringing up the hardware, so take it
2113 * all the way back down so we can try again */
2114 D_INFO("Initialize Alive failed.\n");
2115 goto restart;
2116 }
2117
2118 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2119 * This is a paranoid check, because we would not have gotten the
2120 * "initialize" alive if code weren't properly loaded. */
2121 if (il3945_verify_ucode(il)) {
2122 /* Runtime instruction load was bad;
2123 * take it all the way back down so we can try again */
2124 D_INFO("Bad \"initialize\" uCode load.\n");
2125 goto restart;
2126 }
2127
2128 /* Send pointers to protocol/runtime uCode image ... init code will
2129 * load and launch runtime uCode, which will send us another "Alive"
2130 * notification. */
2131 D_INFO("Initialization Alive received.\n");
2132 if (il3945_set_ucode_ptrs(il)) {
2133 /* Runtime instruction load won't happen;
2134 * take it all the way back down so we can try again */
2135 D_INFO("Couldn't set up uCode pointers.\n");
2136 goto restart;
2137 }
2138 return;
2139
2140restart:
2141 queue_work(il->workqueue, &il->restart);
2142}
2143
2144/**
2145 * il3945_alive_start - called after N_ALIVE notification received
2146 * from protocol/runtime uCode (initialization uCode's
2147 * Alive gets handled by il3945_init_alive_start()).
2148 */
2149static void
2150il3945_alive_start(struct il_priv *il)
2151{
2152 int thermal_spin = 0;
2153 u32 rfkill;
2154 struct il_rxon_context *ctx = &il->ctx;
2155
2156 D_INFO("Runtime Alive received.\n");
2157
2158 if (il->card_alive.is_valid != UCODE_VALID_OK) {
2159 /* We had an error bringing up the hardware, so take it
2160 * all the way back down so we can try again */
2161 D_INFO("Alive failed.\n");
2162 goto restart;
2163 }
2164
2165 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2166 * This is a paranoid check, because we would not have gotten the
2167 * "runtime" alive if code weren't properly loaded. */
2168 if (il3945_verify_ucode(il)) {
2169 /* Runtime instruction load was bad;
2170 * take it all the way back down so we can try again */
2171 D_INFO("Bad runtime uCode load.\n");
2172 goto restart;
2173 }
2174
2175 rfkill = il_rd_prph(il, APMG_RFKILL_REG);
2176 D_INFO("RFKILL status: 0x%x\n", rfkill);
2177
2178 if (rfkill & 0x1) {
2179 clear_bit(S_RF_KILL_HW, &il->status);
2180 /* if RFKILL is not on, then wait for thermal
2181 * sensor in adapter to kick in */
2182 while (il3945_hw_get_temperature(il) == 0) {
2183 thermal_spin++;
2184 udelay(10);
2185 }
2186
2187 if (thermal_spin)
2188 D_INFO("Thermal calibration took %dus\n",
2189 thermal_spin * 10);
2190 } else
2191 set_bit(S_RF_KILL_HW, &il->status);
2192
2193 /* After the ALIVE response, we can send commands to 3945 uCode */
2194 set_bit(S_ALIVE, &il->status);
2195
2196 /* Enable watchdog to monitor the driver tx queues */
2197 il_setup_watchdog(il);
2198
2199 if (il_is_rfkill(il))
2200 return;
2201
2202 ieee80211_wake_queues(il->hw);
2203
2204 il->active_rate = RATES_MASK_3945;
2205
2206 il_power_update_mode(il, true);
2207
2208 if (il_is_associated(il)) {
2209 struct il3945_rxon_cmd *active_rxon =
2210 (struct il3945_rxon_cmd *)(&ctx->active);
2211
2212 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2213 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2214 } else {
2215 /* Initialize our rx_config data */
2216 il_connection_init_rx_config(il, ctx);
2217 }
2218
2219 /* Configure Bluetooth device coexistence support */
2220 il_send_bt_config(il);
2221
2222 set_bit(S_READY, &il->status);
2223
2224 /* Configure the adapter for unassociated operation */
2225 il3945_commit_rxon(il, ctx);
2226
2227 il3945_reg_txpower_periodic(il);
2228
2229 D_INFO("ALIVE processing complete.\n");
2230 wake_up(&il->wait_command_queue);
2231
2232 return;
2233
2234restart:
2235 queue_work(il->workqueue, &il->restart);
2236}
2237
2238static void il3945_cancel_deferred_work(struct il_priv *il);
2239
2240static void
2241__il3945_down(struct il_priv *il)
2242{
2243 unsigned long flags;
2244 int exit_pending;
2245
2246 D_INFO(DRV_NAME " is going down\n");
2247
2248 il_scan_cancel_timeout(il, 200);
2249
2250 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
2251
2252 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
2253 * to prevent rearm timer */
2254 del_timer_sync(&il->watchdog);
2255
2256 /* Station information will now be cleared in device */
2257 il_clear_ucode_stations(il, NULL);
2258 il_dealloc_bcast_stations(il);
2259 il_clear_driver_stations(il);
2260
2261 /* Unblock any waiting calls */
2262 wake_up_all(&il->wait_command_queue);
2263
2264 /* Wipe out the EXIT_PENDING status bit if we are not actually
2265 * exiting the module */
2266 if (!exit_pending)
2267 clear_bit(S_EXIT_PENDING, &il->status);
2268
2269 /* stop and reset the on-board processor */
2270 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2271
2272 /* tell the device to stop sending interrupts */
2273 spin_lock_irqsave(&il->lock, flags);
2274 il_disable_interrupts(il);
2275 spin_unlock_irqrestore(&il->lock, flags);
2276 il3945_synchronize_irq(il);
2277
2278 if (il->mac80211_registered)
2279 ieee80211_stop_queues(il->hw);
2280
2281 /* If we have not previously called il3945_init() then
2282 * clear all bits but the RF Kill bits and return */
2283 if (!il_is_init(il)) {
2284 il->status =
2285 test_bit(S_RF_KILL_HW,
2286 &il->
2287 status) << S_RF_KILL_HW |
2288 test_bit(S_GEO_CONFIGURED,
2289 &il->
2290 status) << S_GEO_CONFIGURED |
2291 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2292 goto exit;
2293 }
2294
2295 /* ...otherwise clear out all the status bits but the RF Kill
2296 * bit and continue taking the NIC down. */
2297 il->status &=
2298 test_bit(S_RF_KILL_HW,
2299 &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
2300 &il->
2301 status) <<
2302 S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
2303 &il->
2304 status) << S_FW_ERROR |
2305 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
2306
2307 il3945_hw_txq_ctx_stop(il);
2308 il3945_hw_rxq_stop(il);
2309
2310 /* Power-down device's busmaster DMA clocks */
2311 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2312 udelay(5);
2313
2314 /* Stop the device, and put it in low power state */
2315 il_apm_stop(il);
2316
2317exit:
2318 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
2319
2320 if (il->beacon_skb)
2321 dev_kfree_skb(il->beacon_skb);
2322 il->beacon_skb = NULL;
2323
2324 /* clear out any free frames */
2325 il3945_clear_free_frames(il);
2326}
2327
2328static void
2329il3945_down(struct il_priv *il)
2330{
2331 mutex_lock(&il->mutex);
2332 __il3945_down(il);
2333 mutex_unlock(&il->mutex);
2334
2335 il3945_cancel_deferred_work(il);
2336}
2337
2338#define MAX_HW_RESTARTS 5
2339
2340static int
2341il3945_alloc_bcast_station(struct il_priv *il)
2342{
2343 struct il_rxon_context *ctx = &il->ctx;
2344 unsigned long flags;
2345 u8 sta_id;
2346
2347 spin_lock_irqsave(&il->sta_lock, flags);
2348 sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
2349 if (sta_id == IL_INVALID_STATION) {
2350 IL_ERR("Unable to prepare broadcast station\n");
2351 spin_unlock_irqrestore(&il->sta_lock, flags);
2352
2353 return -EINVAL;
2354 }
2355
2356 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
2357 il->stations[sta_id].used |= IL_STA_BCAST;
2358 spin_unlock_irqrestore(&il->sta_lock, flags);
2359
2360 return 0;
2361}
2362
2363static int
2364__il3945_up(struct il_priv *il)
2365{
2366 int rc, i;
2367
2368 rc = il3945_alloc_bcast_station(il);
2369 if (rc)
2370 return rc;
2371
2372 if (test_bit(S_EXIT_PENDING, &il->status)) {
2373 IL_WARN("Exit pending; will not bring the NIC up\n");
2374 return -EIO;
2375 }
2376
2377 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
2378 IL_ERR("ucode not available for device bring up\n");
2379 return -EIO;
2380 }
2381
2382 /* If platform's RF_KILL switch is NOT set to KILL */
2383 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2384 clear_bit(S_RF_KILL_HW, &il->status);
2385 else {
2386 set_bit(S_RF_KILL_HW, &il->status);
2387 IL_WARN("Radio disabled by HW RF Kill switch\n");
2388 return -ENODEV;
2389 }
2390
2391 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2392
2393 rc = il3945_hw_nic_init(il);
2394 if (rc) {
2395 IL_ERR("Unable to int nic\n");
2396 return rc;
2397 }
2398
2399 /* make sure rfkill handshake bits are cleared */
2400 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2401 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2402
2403 /* clear (again), then enable host interrupts */
2404 _il_wr(il, CSR_INT, 0xFFFFFFFF);
2405 il_enable_interrupts(il);
2406
2407 /* really make sure rfkill handshake bits are cleared */
2408 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2409 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2410
2411 /* Copy original ucode data image from disk into backup cache.
2412 * This will be used to initialize the on-board processor's
2413 * data SRAM for a clean start when the runtime program first loads. */
2414 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
2415 il->ucode_data.len);
2416
2417 /* We return success when we resume from suspend and rf_kill is on. */
2418 if (test_bit(S_RF_KILL_HW, &il->status))
2419 return 0;
2420
2421 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2422
2423 /* load bootstrap state machine,
2424 * load bootstrap program into processor's memory,
2425 * prepare to load the "initialize" uCode */
2426 rc = il->cfg->ops->lib->load_ucode(il);
2427
2428 if (rc) {
2429 IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
2430 continue;
2431 }
2432
2433 /* start card; "initialize" will load runtime ucode */
2434 il3945_nic_start(il);
2435
2436 D_INFO(DRV_NAME " is coming up\n");
2437
2438 return 0;
2439 }
2440
2441 set_bit(S_EXIT_PENDING, &il->status);
2442 __il3945_down(il);
2443 clear_bit(S_EXIT_PENDING, &il->status);
2444
2445 /* tried to restart and config the device for as long as our
2446 * patience could withstand */
2447 IL_ERR("Unable to initialize device after %d attempts.\n", i);
2448 return -EIO;
2449}
2450
2451/*****************************************************************************
2452 *
2453 * Workqueue callbacks
2454 *
2455 *****************************************************************************/
2456
2457static void
2458il3945_bg_init_alive_start(struct work_struct *data)
2459{
2460 struct il_priv *il =
2461 container_of(data, struct il_priv, init_alive_start.work);
2462
2463 mutex_lock(&il->mutex);
2464 if (test_bit(S_EXIT_PENDING, &il->status))
2465 goto out;
2466
2467 il3945_init_alive_start(il);
2468out:
2469 mutex_unlock(&il->mutex);
2470}
2471
2472static void
2473il3945_bg_alive_start(struct work_struct *data)
2474{
2475 struct il_priv *il =
2476 container_of(data, struct il_priv, alive_start.work);
2477
2478 mutex_lock(&il->mutex);
2479 if (test_bit(S_EXIT_PENDING, &il->status))
2480 goto out;
2481
2482 il3945_alive_start(il);
2483out:
2484 mutex_unlock(&il->mutex);
2485}
2486
2487/*
2488 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2489 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2490 * *is* readable even when device has been SW_RESET into low power mode
2491 * (e.g. during RF KILL).
2492 */
2493static void
2494il3945_rfkill_poll(struct work_struct *data)
2495{
2496 struct il_priv *il =
2497 container_of(data, struct il_priv, _3945.rfkill_poll.work);
2498 bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
2499 bool new_rfkill =
2500 !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2501
2502 if (new_rfkill != old_rfkill) {
2503 if (new_rfkill)
2504 set_bit(S_RF_KILL_HW, &il->status);
2505 else
2506 clear_bit(S_RF_KILL_HW, &il->status);
2507
2508 wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
2509
2510 D_RF_KILL("RF_KILL bit toggled to %s.\n",
2511 new_rfkill ? "disable radio" : "enable radio");
2512 }
2513
2514 /* Keep this running, even if radio now enabled. This will be
2515 * cancelled in mac_start() if system decides to start again */
2516 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2517 round_jiffies_relative(2 * HZ));
2518
2519}
2520
2521int
2522il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
2523{
2524 struct il_host_cmd cmd = {
2525 .id = C_SCAN,
2526 .len = sizeof(struct il3945_scan_cmd),
2527 .flags = CMD_SIZE_HUGE,
2528 };
2529 struct il3945_scan_cmd *scan;
2530 u8 n_probes = 0;
2531 enum ieee80211_band band;
2532 bool is_active = false;
2533 int ret;
2534 u16 len;
2535
2536 lockdep_assert_held(&il->mutex);
2537
2538 if (!il->scan_cmd) {
2539 il->scan_cmd =
2540 kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
2541 GFP_KERNEL);
2542 if (!il->scan_cmd) {
2543 D_SCAN("Fail to allocate scan memory\n");
2544 return -ENOMEM;
2545 }
2546 }
2547 scan = il->scan_cmd;
2548 memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
2549
2550 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
2551 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
2552
2553 if (il_is_associated(il)) {
2554 u16 interval;
2555 u32 extra;
2556 u32 suspend_time = 100;
2557 u32 scan_suspend_time = 100;
2558
2559 D_INFO("Scanning while associated...\n");
2560
2561 interval = vif->bss_conf.beacon_int;
2562
2563 scan->suspend_time = 0;
2564 scan->max_out_time = cpu_to_le32(200 * 1024);
2565 if (!interval)
2566 interval = suspend_time;
2567 /*
2568 * suspend time format:
2569 * 0-19: beacon interval in usec (time before exec.)
2570 * 20-23: 0
2571 * 24-31: number of beacons (suspend between channels)
2572 */
2573
2574 extra = (suspend_time / interval) << 24;
2575 scan_suspend_time =
2576 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
2577
2578 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2579 D_SCAN("suspend_time 0x%X beacon interval %d\n",
2580 scan_suspend_time, interval);
2581 }
2582
2583 if (il->scan_request->n_ssids) {
2584 int i, p = 0;
2585 D_SCAN("Kicking off active scan\n");
2586 for (i = 0; i < il->scan_request->n_ssids; i++) {
2587 /* always does wildcard anyway */
2588 if (!il->scan_request->ssids[i].ssid_len)
2589 continue;
2590 scan->direct_scan[p].id = WLAN_EID_SSID;
2591 scan->direct_scan[p].len =
2592 il->scan_request->ssids[i].ssid_len;
2593 memcpy(scan->direct_scan[p].ssid,
2594 il->scan_request->ssids[i].ssid,
2595 il->scan_request->ssids[i].ssid_len);
2596 n_probes++;
2597 p++;
2598 }
2599 is_active = true;
2600 } else
2601 D_SCAN("Kicking off passive scan.\n");
2602
2603 /* We don't build a direct scan probe request; the uCode will do
2604 * that based on the direct_mask added to each channel entry */
2605 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2606 scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
2607 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2608
2609 /* flags + rate selection */
2610
2611 switch (il->scan_band) {
2612 case IEEE80211_BAND_2GHZ:
2613 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2614 scan->tx_cmd.rate = RATE_1M_PLCP;
2615 band = IEEE80211_BAND_2GHZ;
2616 break;
2617 case IEEE80211_BAND_5GHZ:
2618 scan->tx_cmd.rate = RATE_6M_PLCP;
2619 band = IEEE80211_BAND_5GHZ;
2620 break;
2621 default:
2622 IL_WARN("Invalid scan band\n");
2623 return -EIO;
2624 }
2625
2626 /*
2627 * If active scaning is requested but a certain channel
2628 * is marked passive, we can do active scanning if we
2629 * detect transmissions.
2630 */
2631 scan->good_CRC_th =
2632 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_DISABLED;
2633
2634 len =
2635 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
2636 vif->addr, il->scan_request->ie,
2637 il->scan_request->ie_len,
2638 IL_MAX_SCAN_SIZE - sizeof(*scan));
2639 scan->tx_cmd.len = cpu_to_le16(len);
2640
2641 /* select Rx antennas */
2642 scan->flags |= il3945_get_antenna_flags(il);
2643
2644 scan->channel_count =
2645 il3945_get_channels_for_scan(il, band, is_active, n_probes,
2646 (void *)&scan->data[len], vif);
2647 if (scan->channel_count == 0) {
2648 D_SCAN("channel count %d\n", scan->channel_count);
2649 return -EIO;
2650 }
2651
2652 cmd.len +=
2653 le16_to_cpu(scan->tx_cmd.len) +
2654 scan->channel_count * sizeof(struct il3945_scan_channel);
2655 cmd.data = scan;
2656 scan->len = cpu_to_le16(cmd.len);
2657
2658 set_bit(S_SCAN_HW, &il->status);
2659 ret = il_send_cmd_sync(il, &cmd);
2660 if (ret)
2661 clear_bit(S_SCAN_HW, &il->status);
2662 return ret;
2663}
2664
2665void
2666il3945_post_scan(struct il_priv *il)
2667{
2668 struct il_rxon_context *ctx = &il->ctx;
2669
2670 /*
2671 * Since setting the RXON may have been deferred while
2672 * performing the scan, fire one off if needed
2673 */
2674 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2675 il3945_commit_rxon(il, ctx);
2676}
2677
2678static void
2679il3945_bg_restart(struct work_struct *data)
2680{
2681 struct il_priv *il = container_of(data, struct il_priv, restart);
2682
2683 if (test_bit(S_EXIT_PENDING, &il->status))
2684 return;
2685
2686 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
2687 mutex_lock(&il->mutex);
2688 il->ctx.vif = NULL;
2689 il->is_open = 0;
2690 mutex_unlock(&il->mutex);
2691 il3945_down(il);
2692 ieee80211_restart_hw(il->hw);
2693 } else {
2694 il3945_down(il);
2695
2696 mutex_lock(&il->mutex);
2697 if (test_bit(S_EXIT_PENDING, &il->status)) {
2698 mutex_unlock(&il->mutex);
2699 return;
2700 }
2701
2702 __il3945_up(il);
2703 mutex_unlock(&il->mutex);
2704 }
2705}
2706
2707static void
2708il3945_bg_rx_replenish(struct work_struct *data)
2709{
2710 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
2711
2712 mutex_lock(&il->mutex);
2713 if (test_bit(S_EXIT_PENDING, &il->status))
2714 goto out;
2715
2716 il3945_rx_replenish(il);
2717out:
2718 mutex_unlock(&il->mutex);
2719}
2720
2721void
2722il3945_post_associate(struct il_priv *il)
2723{
2724 int rc = 0;
2725 struct ieee80211_conf *conf = NULL;
2726 struct il_rxon_context *ctx = &il->ctx;
2727
2728 if (!ctx->vif || !il->is_open)
2729 return;
2730
2731 D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
2732 ctx->active.bssid_addr);
2733
2734 if (test_bit(S_EXIT_PENDING, &il->status))
2735 return;
2736
2737 il_scan_cancel_timeout(il, 200);
2738
2739 conf = &il->hw->conf;
2740
2741 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2742 il3945_commit_rxon(il, ctx);
2743
2744 rc = il_send_rxon_timing(il, ctx);
2745 if (rc)
2746 IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
2747
2748 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2749
2750 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2751
2752 D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
2753 ctx->vif->bss_conf.beacon_int);
2754
2755 if (ctx->vif->bss_conf.use_short_preamble)
2756 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2757 else
2758 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2759
2760 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2761 if (ctx->vif->bss_conf.use_short_slot)
2762 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2763 else
2764 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2765 }
2766
2767 il3945_commit_rxon(il, ctx);
2768
2769 switch (ctx->vif->type) {
2770 case NL80211_IFTYPE_STATION:
2771 il3945_rate_scale_init(il->hw, IL_AP_ID);
2772 break;
2773 case NL80211_IFTYPE_ADHOC:
2774 il3945_send_beacon_cmd(il);
2775 break;
2776 default:
2777 IL_ERR("%s Should not be called in %d mode\n", __func__,
2778 ctx->vif->type);
2779 break;
2780 }
2781}
2782
2783/*****************************************************************************
2784 *
2785 * mac80211 entry point functions
2786 *
2787 *****************************************************************************/
2788
2789#define UCODE_READY_TIMEOUT (2 * HZ)
2790
2791static int
2792il3945_mac_start(struct ieee80211_hw *hw)
2793{
2794 struct il_priv *il = hw->priv;
2795 int ret;
2796
2797 D_MAC80211("enter\n");
2798
2799 /* we should be verifying the device is ready to be opened */
2800 mutex_lock(&il->mutex);
2801
2802 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2803 * ucode filename and max sizes are card-specific. */
2804
2805 if (!il->ucode_code.len) {
2806 ret = il3945_read_ucode(il);
2807 if (ret) {
2808 IL_ERR("Could not read microcode: %d\n", ret);
2809 mutex_unlock(&il->mutex);
2810 goto out_release_irq;
2811 }
2812 }
2813
2814 ret = __il3945_up(il);
2815
2816 mutex_unlock(&il->mutex);
2817
2818 if (ret)
2819 goto out_release_irq;
2820
2821 D_INFO("Start UP work.\n");
2822
2823 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2824 * mac80211 will not be run successfully. */
2825 ret = wait_event_timeout(il->wait_command_queue,
2826 test_bit(S_READY, &il->status),
2827 UCODE_READY_TIMEOUT);
2828 if (!ret) {
2829 if (!test_bit(S_READY, &il->status)) {
2830 IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
2831 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2832 ret = -ETIMEDOUT;
2833 goto out_release_irq;
2834 }
2835 }
2836
2837 /* ucode is running and will send rfkill notifications,
2838 * no need to poll the killswitch state anymore */
2839 cancel_delayed_work(&il->_3945.rfkill_poll);
2840
2841 il->is_open = 1;
2842 D_MAC80211("leave\n");
2843 return 0;
2844
2845out_release_irq:
2846 il->is_open = 0;
2847 D_MAC80211("leave - failed\n");
2848 return ret;
2849}
2850
2851static void
2852il3945_mac_stop(struct ieee80211_hw *hw)
2853{
2854 struct il_priv *il = hw->priv;
2855
2856 D_MAC80211("enter\n");
2857
2858 if (!il->is_open) {
2859 D_MAC80211("leave - skip\n");
2860 return;
2861 }
2862
2863 il->is_open = 0;
2864
2865 il3945_down(il);
2866
2867 flush_workqueue(il->workqueue);
2868
2869 /* start polling the killswitch state again */
2870 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
2871 round_jiffies_relative(2 * HZ));
2872
2873 D_MAC80211("leave\n");
2874}
2875
2876static void
2877il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2878{
2879 struct il_priv *il = hw->priv;
2880
2881 D_MAC80211("enter\n");
2882
2883 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2884 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2885
2886 if (il3945_tx_skb(il, skb))
2887 dev_kfree_skb_any(skb);
2888
2889 D_MAC80211("leave\n");
2890}
2891
2892void
2893il3945_config_ap(struct il_priv *il)
2894{
2895 struct il_rxon_context *ctx = &il->ctx;
2896 struct ieee80211_vif *vif = ctx->vif;
2897 int rc = 0;
2898
2899 if (test_bit(S_EXIT_PENDING, &il->status))
2900 return;
2901
2902 /* The following should be done only at AP bring up */
2903 if (!(il_is_associated(il))) {
2904
2905 /* RXON - unassoc (to set timing command) */
2906 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2907 il3945_commit_rxon(il, ctx);
2908
2909 /* RXON Timing */
2910 rc = il_send_rxon_timing(il, ctx);
2911 if (rc)
2912 IL_WARN("C_RXON_TIMING failed - "
2913 "Attempting to continue.\n");
2914
2915 ctx->staging.assoc_id = 0;
2916
2917 if (vif->bss_conf.use_short_preamble)
2918 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2919 else
2920 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2921
2922 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2923 if (vif->bss_conf.use_short_slot)
2924 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2925 else
2926 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2927 }
2928 /* restore RXON assoc */
2929 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2930 il3945_commit_rxon(il, ctx);
2931 }
2932 il3945_send_beacon_cmd(il);
2933}
2934
2935static int
2936il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2937 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2938 struct ieee80211_key_conf *key)
2939{
2940 struct il_priv *il = hw->priv;
2941 int ret = 0;
2942 u8 sta_id = IL_INVALID_STATION;
2943 u8 static_key;
2944
2945 D_MAC80211("enter\n");
2946
2947 if (il3945_mod_params.sw_crypto) {
2948 D_MAC80211("leave - hwcrypto disabled\n");
2949 return -EOPNOTSUPP;
2950 }
2951
2952 /*
2953 * To support IBSS RSN, don't program group keys in IBSS, the
2954 * hardware will then not attempt to decrypt the frames.
2955 */
2956 if (vif->type == NL80211_IFTYPE_ADHOC &&
2957 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2958 return -EOPNOTSUPP;
2959
2960 static_key = !il_is_associated(il);
2961
2962 if (!static_key) {
2963 sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
2964 if (sta_id == IL_INVALID_STATION)
2965 return -EINVAL;
2966 }
2967
2968 mutex_lock(&il->mutex);
2969 il_scan_cancel_timeout(il, 100);
2970
2971 switch (cmd) {
2972 case SET_KEY:
2973 if (static_key)
2974 ret = il3945_set_static_key(il, key);
2975 else
2976 ret = il3945_set_dynamic_key(il, key, sta_id);
2977 D_MAC80211("enable hwcrypto key\n");
2978 break;
2979 case DISABLE_KEY:
2980 if (static_key)
2981 ret = il3945_remove_static_key(il);
2982 else
2983 ret = il3945_clear_sta_key_info(il, sta_id);
2984 D_MAC80211("disable hwcrypto key\n");
2985 break;
2986 default:
2987 ret = -EINVAL;
2988 }
2989
2990 mutex_unlock(&il->mutex);
2991 D_MAC80211("leave\n");
2992
2993 return ret;
2994}
2995
2996static int
2997il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2998 struct ieee80211_sta *sta)
2999{
3000 struct il_priv *il = hw->priv;
3001 struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3002 int ret;
3003 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3004 u8 sta_id;
3005
3006 D_INFO("received request to add station %pM\n", sta->addr);
3007 mutex_lock(&il->mutex);
3008 D_INFO("proceeding to add station %pM\n", sta->addr);
3009 sta_priv->common.sta_id = IL_INVALID_STATION;
3010
3011 ret =
3012 il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
3013 if (ret) {
3014 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
3015 /* Should we return success if return code is EEXIST ? */
3016 mutex_unlock(&il->mutex);
3017 return ret;
3018 }
3019
3020 sta_priv->common.sta_id = sta_id;
3021
3022 /* Initialize rate scaling */
3023 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
3024 il3945_rs_rate_init(il, sta, sta_id);
3025 mutex_unlock(&il->mutex);
3026
3027 return 0;
3028}
3029
3030static void
3031il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
3032 unsigned int *total_flags, u64 multicast)
3033{
3034 struct il_priv *il = hw->priv;
3035 __le32 filter_or = 0, filter_nand = 0;
3036 struct il_rxon_context *ctx = &il->ctx;
3037
3038#define CHK(test, flag) do { \
3039 if (*total_flags & (test)) \
3040 filter_or |= (flag); \
3041 else \
3042 filter_nand |= (flag); \
3043 } while (0)
3044
3045 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
3046 *total_flags);
3047
3048 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3049 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3050 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3051
3052#undef CHK
3053
3054 mutex_lock(&il->mutex);
3055
3056 ctx->staging.filter_flags &= ~filter_nand;
3057 ctx->staging.filter_flags |= filter_or;
3058
3059 /*
3060 * Not committing directly because hardware can perform a scan,
3061 * but even if hw is ready, committing here breaks for some reason,
3062 * we'll eventually commit the filter flags change anyway.
3063 */
3064
3065 mutex_unlock(&il->mutex);
3066
3067 /*
3068 * Receiving all multicast frames is always enabled by the
3069 * default flags setup in il_connection_init_rx_config()
3070 * since we currently do not support programming multicast
3071 * filters into the device.
3072 */
3073 *total_flags &=
3074 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3075 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3076}
3077
3078/*****************************************************************************
3079 *
3080 * sysfs attributes
3081 *
3082 *****************************************************************************/
3083
3084#ifdef CONFIG_IWLEGACY_DEBUG
3085
3086/*
3087 * The following adds a new attribute to the sysfs representation
3088 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3089 * used for controlling the debug level.
3090 *
3091 * See the level definitions in iwl for details.
3092 *
3093 * The debug_level being managed using sysfs below is a per device debug
3094 * level that is used instead of the global debug level if it (the per
3095 * device debug level) is set.
3096 */
3097static ssize_t
3098il3945_show_debug_level(struct device *d, struct device_attribute *attr,
3099 char *buf)
3100{
3101 struct il_priv *il = dev_get_drvdata(d);
3102 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
3103}
3104
3105static ssize_t
3106il3945_store_debug_level(struct device *d, struct device_attribute *attr,
3107 const char *buf, size_t count)
3108{
3109 struct il_priv *il = dev_get_drvdata(d);
3110 unsigned long val;
3111 int ret;
3112
3113 ret = strict_strtoul(buf, 0, &val);
3114 if (ret)
3115 IL_INFO("%s is not in hex or decimal form.\n", buf);
3116 else {
3117 il->debug_level = val;
3118 if (il_alloc_traffic_mem(il))
3119 IL_ERR("Not enough memory to generate traffic log\n");
3120 }
3121 return strnlen(buf, count);
3122}
3123
3124static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
3125 il3945_store_debug_level);
3126
3127#endif /* CONFIG_IWLEGACY_DEBUG */
3128
3129static ssize_t
3130il3945_show_temperature(struct device *d, struct device_attribute *attr,
3131 char *buf)
3132{
3133 struct il_priv *il = dev_get_drvdata(d);
3134
3135 if (!il_is_alive(il))
3136 return -EAGAIN;
3137
3138 return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
3139}
3140
3141static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
3142
3143static ssize_t
3144il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
3145{
3146 struct il_priv *il = dev_get_drvdata(d);
3147 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
3148}
3149
3150static ssize_t
3151il3945_store_tx_power(struct device *d, struct device_attribute *attr,
3152 const char *buf, size_t count)
3153{
3154 struct il_priv *il = dev_get_drvdata(d);
3155 char *p = (char *)buf;
3156 u32 val;
3157
3158 val = simple_strtoul(p, &p, 10);
3159 if (p == buf)
3160 IL_INFO(": %s is not in decimal form.\n", buf);
3161 else
3162 il3945_hw_reg_set_txpower(il, val);
3163
3164 return count;
3165}
3166
3167static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
3168 il3945_store_tx_power);
3169
3170static ssize_t
3171il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
3172{
3173 struct il_priv *il = dev_get_drvdata(d);
3174 struct il_rxon_context *ctx = &il->ctx;
3175
3176 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3177}
3178
3179static ssize_t
3180il3945_store_flags(struct device *d, struct device_attribute *attr,
3181 const char *buf, size_t count)
3182{
3183 struct il_priv *il = dev_get_drvdata(d);
3184 u32 flags = simple_strtoul(buf, NULL, 0);
3185 struct il_rxon_context *ctx = &il->ctx;
3186
3187 mutex_lock(&il->mutex);
3188 if (le32_to_cpu(ctx->staging.flags) != flags) {
3189 /* Cancel any currently running scans... */
3190 if (il_scan_cancel_timeout(il, 100))
3191 IL_WARN("Could not cancel scan.\n");
3192 else {
3193 D_INFO("Committing rxon.flags = 0x%04X\n", flags);
3194 ctx->staging.flags = cpu_to_le32(flags);
3195 il3945_commit_rxon(il, ctx);
3196 }
3197 }
3198 mutex_unlock(&il->mutex);
3199
3200 return count;
3201}
3202
3203static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
3204 il3945_store_flags);
3205
3206static ssize_t
3207il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
3208 char *buf)
3209{
3210 struct il_priv *il = dev_get_drvdata(d);
3211 struct il_rxon_context *ctx = &il->ctx;
3212
3213 return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
3214}
3215
3216static ssize_t
3217il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct il_priv *il = dev_get_drvdata(d);
3221 struct il_rxon_context *ctx = &il->ctx;
3222 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3223
3224 mutex_lock(&il->mutex);
3225 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3226 /* Cancel any currently running scans... */
3227 if (il_scan_cancel_timeout(il, 100))
3228 IL_WARN("Could not cancel scan.\n");
3229 else {
3230 D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
3231 filter_flags);
3232 ctx->staging.filter_flags = cpu_to_le32(filter_flags);
3233 il3945_commit_rxon(il, ctx);
3234 }
3235 }
3236 mutex_unlock(&il->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
3242 il3945_store_filter_flags);
3243
3244static ssize_t
3245il3945_show_measurement(struct device *d, struct device_attribute *attr,
3246 char *buf)
3247{
3248 struct il_priv *il = dev_get_drvdata(d);
3249 struct il_spectrum_notification measure_report;
3250 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3251 u8 *data = (u8 *) &measure_report;
3252 unsigned long flags;
3253
3254 spin_lock_irqsave(&il->lock, flags);
3255 if (!(il->measurement_status & MEASUREMENT_READY)) {
3256 spin_unlock_irqrestore(&il->lock, flags);
3257 return 0;
3258 }
3259 memcpy(&measure_report, &il->measure_report, size);
3260 il->measurement_status = 0;
3261 spin_unlock_irqrestore(&il->lock, flags);
3262
3263 while (size && PAGE_SIZE - len) {
3264 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3265 PAGE_SIZE - len, 1);
3266 len = strlen(buf);
3267 if (PAGE_SIZE - len)
3268 buf[len++] = '\n';
3269
3270 ofs += 16;
3271 size -= min(size, 16U);
3272 }
3273
3274 return len;
3275}
3276
3277static ssize_t
3278il3945_store_measurement(struct device *d, struct device_attribute *attr,
3279 const char *buf, size_t count)
3280{
3281 struct il_priv *il = dev_get_drvdata(d);
3282 struct il_rxon_context *ctx = &il->ctx;
3283 struct ieee80211_measurement_params params = {
3284 .channel = le16_to_cpu(ctx->active.channel),
3285 .start_time = cpu_to_le64(il->_3945.last_tsf),
3286 .duration = cpu_to_le16(1),
3287 };
3288 u8 type = IL_MEASURE_BASIC;
3289 u8 buffer[32];
3290 u8 channel;
3291
3292 if (count) {
3293 char *p = buffer;
3294 strncpy(buffer, buf, min(sizeof(buffer), count));
3295 channel = simple_strtoul(p, NULL, 0);
3296 if (channel)
3297 params.channel = channel;
3298
3299 p = buffer;
3300 while (*p && *p != ' ')
3301 p++;
3302 if (*p)
3303 type = simple_strtoul(p + 1, NULL, 0);
3304 }
3305
3306 D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
3307 type, params.channel, buf);
3308 il3945_get_measurement(il, &params, type);
3309
3310 return count;
3311}
3312
3313static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
3314 il3945_store_measurement);
3315
3316static ssize_t
3317il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
3318 const char *buf, size_t count)
3319{
3320 struct il_priv *il = dev_get_drvdata(d);
3321
3322 il->retry_rate = simple_strtoul(buf, NULL, 0);
3323 if (il->retry_rate <= 0)
3324 il->retry_rate = 1;
3325
3326 return count;
3327}
3328
3329static ssize_t
3330il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
3331 char *buf)
3332{
3333 struct il_priv *il = dev_get_drvdata(d);
3334 return sprintf(buf, "%d", il->retry_rate);
3335}
3336
3337static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
3338 il3945_store_retry_rate);
3339
3340static ssize_t
3341il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
3342{
3343 /* all this shit doesn't belong into sysfs anyway */
3344 return 0;
3345}
3346
3347static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
3348
3349static ssize_t
3350il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
3351{
3352 struct il_priv *il = dev_get_drvdata(d);
3353
3354 if (!il_is_alive(il))
3355 return -EAGAIN;
3356
3357 return sprintf(buf, "%d\n", il3945_mod_params.antenna);
3358}
3359
3360static ssize_t
3361il3945_store_antenna(struct device *d, struct device_attribute *attr,
3362 const char *buf, size_t count)
3363{
3364 struct il_priv *il __maybe_unused = dev_get_drvdata(d);
3365 int ant;
3366
3367 if (count == 0)
3368 return 0;
3369
3370 if (sscanf(buf, "%1i", &ant) != 1) {
3371 D_INFO("not in hex or decimal form.\n");
3372 return count;
3373 }
3374
3375 if (ant >= 0 && ant <= 2) {
3376 D_INFO("Setting antenna select to %d.\n", ant);
3377 il3945_mod_params.antenna = (enum il3945_antenna)ant;
3378 } else
3379 D_INFO("Bad antenna select value %d.\n", ant);
3380
3381 return count;
3382}
3383
3384static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
3385 il3945_store_antenna);
3386
3387static ssize_t
3388il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
3389{
3390 struct il_priv *il = dev_get_drvdata(d);
3391 if (!il_is_alive(il))
3392 return -EAGAIN;
3393 return sprintf(buf, "0x%08x\n", (int)il->status);
3394}
3395
3396static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
3397
3398static ssize_t
3399il3945_dump_error_log(struct device *d, struct device_attribute *attr,
3400 const char *buf, size_t count)
3401{
3402 struct il_priv *il = dev_get_drvdata(d);
3403 char *p = (char *)buf;
3404
3405 if (p[0] == '1')
3406 il3945_dump_nic_error_log(il);
3407
3408 return strnlen(buf, count);
3409}
3410
3411static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
3412
3413/*****************************************************************************
3414 *
3415 * driver setup and tear down
3416 *
3417 *****************************************************************************/
3418
3419static void
3420il3945_setup_deferred_work(struct il_priv *il)
3421{
3422 il->workqueue = create_singlethread_workqueue(DRV_NAME);
3423
3424 init_waitqueue_head(&il->wait_command_queue);
3425
3426 INIT_WORK(&il->restart, il3945_bg_restart);
3427 INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
3428 INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
3429 INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
3430 INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
3431
3432 il_setup_scan_deferred_work(il);
3433
3434 il3945_hw_setup_deferred_work(il);
3435
3436 init_timer(&il->watchdog);
3437 il->watchdog.data = (unsigned long)il;
3438 il->watchdog.function = il_bg_watchdog;
3439
3440 tasklet_init(&il->irq_tasklet,
3441 (void (*)(unsigned long))il3945_irq_tasklet,
3442 (unsigned long)il);
3443}
3444
3445static void
3446il3945_cancel_deferred_work(struct il_priv *il)
3447{
3448 il3945_hw_cancel_deferred_work(il);
3449
3450 cancel_delayed_work_sync(&il->init_alive_start);
3451 cancel_delayed_work(&il->alive_start);
3452
3453 il_cancel_scan_deferred_work(il);
3454}
3455
3456static struct attribute *il3945_sysfs_entries[] = {
3457 &dev_attr_antenna.attr,
3458 &dev_attr_channels.attr,
3459 &dev_attr_dump_errors.attr,
3460 &dev_attr_flags.attr,
3461 &dev_attr_filter_flags.attr,
3462 &dev_attr_measurement.attr,
3463 &dev_attr_retry_rate.attr,
3464 &dev_attr_status.attr,
3465 &dev_attr_temperature.attr,
3466 &dev_attr_tx_power.attr,
3467#ifdef CONFIG_IWLEGACY_DEBUG
3468 &dev_attr_debug_level.attr,
3469#endif
3470 NULL
3471};
3472
3473static struct attribute_group il3945_attribute_group = {
3474 .name = NULL, /* put in device directory */
3475 .attrs = il3945_sysfs_entries,
3476};
3477
3478struct ieee80211_ops il3945_hw_ops = {
3479 .tx = il3945_mac_tx,
3480 .start = il3945_mac_start,
3481 .stop = il3945_mac_stop,
3482 .add_interface = il_mac_add_interface,
3483 .remove_interface = il_mac_remove_interface,
3484 .change_interface = il_mac_change_interface,
3485 .config = il_mac_config,
3486 .configure_filter = il3945_configure_filter,
3487 .set_key = il3945_mac_set_key,
3488 .conf_tx = il_mac_conf_tx,
3489 .reset_tsf = il_mac_reset_tsf,
3490 .bss_info_changed = il_mac_bss_info_changed,
3491 .hw_scan = il_mac_hw_scan,
3492 .sta_add = il3945_mac_sta_add,
3493 .sta_remove = il_mac_sta_remove,
3494 .tx_last_beacon = il_mac_tx_last_beacon,
3495};
3496
3497static int
3498il3945_init_drv(struct il_priv *il)
3499{
3500 int ret;
3501 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
3502
3503 il->retry_rate = 1;
3504 il->beacon_skb = NULL;
3505
3506 spin_lock_init(&il->sta_lock);
3507 spin_lock_init(&il->hcmd_lock);
3508
3509 INIT_LIST_HEAD(&il->free_frames);
3510
3511 mutex_init(&il->mutex);
3512
3513 il->ieee_channels = NULL;
3514 il->ieee_rates = NULL;
3515 il->band = IEEE80211_BAND_2GHZ;
3516
3517 il->iw_mode = NL80211_IFTYPE_STATION;
3518 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
3519
3520 /* initialize force reset */
3521 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
3522
3523 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3524 IL_WARN("Unsupported EEPROM version: 0x%04X\n",
3525 eeprom->version);
3526 ret = -EINVAL;
3527 goto err;
3528 }
3529 ret = il_init_channel_map(il);
3530 if (ret) {
3531 IL_ERR("initializing regulatory failed: %d\n", ret);
3532 goto err;
3533 }
3534
3535 /* Set up txpower settings in driver for all channels */
3536 if (il3945_txpower_set_from_eeprom(il)) {
3537 ret = -EIO;
3538 goto err_free_channel_map;
3539 }
3540
3541 ret = il_init_geos(il);
3542 if (ret) {
3543 IL_ERR("initializing geos failed: %d\n", ret);
3544 goto err_free_channel_map;
3545 }
3546 il3945_init_hw_rates(il, il->ieee_rates);
3547
3548 return 0;
3549
3550err_free_channel_map:
3551 il_free_channel_map(il);
3552err:
3553 return ret;
3554}
3555
3556#define IL3945_MAX_PROBE_REQUEST 200
3557
3558static int
3559il3945_setup_mac(struct il_priv *il)
3560{
3561 int ret;
3562 struct ieee80211_hw *hw = il->hw;
3563
3564 hw->rate_control_algorithm = "iwl-3945-rs";
3565 hw->sta_data_size = sizeof(struct il3945_sta_priv);
3566 hw->vif_data_size = sizeof(struct il_vif_priv);
3567
3568 /* Tell mac80211 our characteristics */
3569 hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
3570
3571 hw->wiphy->interface_modes = il->ctx.interface_modes;
3572
3573 hw->wiphy->flags |=
3574 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
3575 WIPHY_FLAG_IBSS_RSN;
3576
3577 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3578 /* we create the 802.11 header and a zero-length SSID element */
3579 hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
3580
3581 /* Default value; 4 EDCA QOS priorities */
3582 hw->queues = 4;
3583
3584 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
3585 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3586 &il->bands[IEEE80211_BAND_2GHZ];
3587
3588 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
3589 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3590 &il->bands[IEEE80211_BAND_5GHZ];
3591
3592 il_leds_init(il);
3593
3594 ret = ieee80211_register_hw(il->hw);
3595 if (ret) {
3596 IL_ERR("Failed to register hw (error %d)\n", ret);
3597 return ret;
3598 }
3599 il->mac80211_registered = 1;
3600
3601 return 0;
3602}
3603
3604static int
3605il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3606{
3607 int err = 0;
3608 struct il_priv *il;
3609 struct ieee80211_hw *hw;
3610 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
3611 struct il3945_eeprom *eeprom;
3612 unsigned long flags;
3613
3614 /***********************
3615 * 1. Allocating HW data
3616 * ********************/
3617
3618 /* mac80211 allocates memory for this device instance, including
3619 * space for this driver's ilate structure */
3620 hw = il_alloc_all(cfg);
3621 if (hw == NULL) {
3622 pr_err("Can not allocate network device\n");
3623 err = -ENOMEM;
3624 goto out;
3625 }
3626 il = hw->priv;
3627 SET_IEEE80211_DEV(hw, &pdev->dev);
3628
3629 il->cmd_queue = IL39_CMD_QUEUE_NUM;
3630
3631 il->ctx.ctxid = 0;
3632
3633 il->ctx.rxon_cmd = C_RXON;
3634 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
3635 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
3636 il->ctx.qos_cmd = C_QOS_PARAM;
3637 il->ctx.ap_sta_id = IL_AP_ID;
3638 il->ctx.wep_key_cmd = C_WEPKEY;
3639 il->ctx.interface_modes =
3640 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
3641 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
3642 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
3643 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
3644
3645 /*
3646 * Disabling hardware scan means that mac80211 will perform scans
3647 * "the hard way", rather than using device's scan.
3648 */
3649 if (il3945_mod_params.disable_hw_scan) {
3650 D_INFO("Disabling hw_scan\n");
3651 il3945_hw_ops.hw_scan = NULL;
3652 }
3653
3654 D_INFO("*** LOAD DRIVER ***\n");
3655 il->cfg = cfg;
3656 il->pci_dev = pdev;
3657 il->inta_mask = CSR_INI_SET_MASK;
3658
3659 if (il_alloc_traffic_mem(il))
3660 IL_ERR("Not enough memory to generate traffic log\n");
3661
3662 /***************************
3663 * 2. Initializing PCI bus
3664 * *************************/
3665 pci_disable_link_state(pdev,
3666 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3667 PCIE_LINK_STATE_CLKPM);
3668
3669 if (pci_enable_device(pdev)) {
3670 err = -ENODEV;
3671 goto out_ieee80211_free_hw;
3672 }
3673
3674 pci_set_master(pdev);
3675
3676 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3677 if (!err)
3678 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3679 if (err) {
3680 IL_WARN("No suitable DMA available.\n");
3681 goto out_pci_disable_device;
3682 }
3683
3684 pci_set_drvdata(pdev, il);
3685 err = pci_request_regions(pdev, DRV_NAME);
3686 if (err)
3687 goto out_pci_disable_device;
3688
3689 /***********************
3690 * 3. Read REV Register
3691 * ********************/
3692 il->hw_base = pci_iomap(pdev, 0, 0);
3693 if (!il->hw_base) {
3694 err = -ENODEV;
3695 goto out_pci_release_regions;
3696 }
3697
3698 D_INFO("pci_resource_len = 0x%08llx\n",
3699 (unsigned long long)pci_resource_len(pdev, 0));
3700 D_INFO("pci_resource_base = %p\n", il->hw_base);
3701
3702 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3703 * PCI Tx retries from interfering with C3 CPU state */
3704 pci_write_config_byte(pdev, 0x41, 0x00);
3705
3706 /* these spin locks will be used in apm_ops.init and EEPROM access
3707 * we should init now
3708 */
3709 spin_lock_init(&il->reg_lock);
3710 spin_lock_init(&il->lock);
3711
3712 /*
3713 * stop and reset the on-board processor just in case it is in a
3714 * strange state ... like being left stranded by a primary kernel
3715 * and this is now the kdump kernel trying to start up
3716 */
3717 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3718
3719 /***********************
3720 * 4. Read EEPROM
3721 * ********************/
3722
3723 /* Read the EEPROM */
3724 err = il_eeprom_init(il);
3725 if (err) {
3726 IL_ERR("Unable to init EEPROM\n");
3727 goto out_iounmap;
3728 }
3729 /* MAC Address location in EEPROM same for 3945/4965 */
3730 eeprom = (struct il3945_eeprom *)il->eeprom;
3731 D_INFO("MAC address: %pM\n", eeprom->mac_address);
3732 SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
3733
3734 /***********************
3735 * 5. Setup HW Constants
3736 * ********************/
3737 /* Device-specific setup */
3738 if (il3945_hw_set_hw_params(il)) {
3739 IL_ERR("failed to set hw settings\n");
3740 goto out_eeprom_free;
3741 }
3742
3743 /***********************
3744 * 6. Setup il
3745 * ********************/
3746
3747 err = il3945_init_drv(il);
3748 if (err) {
3749 IL_ERR("initializing driver failed\n");
3750 goto out_unset_hw_params;
3751 }
3752
3753 IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
3754
3755 /***********************
3756 * 7. Setup Services
3757 * ********************/
3758
3759 spin_lock_irqsave(&il->lock, flags);
3760 il_disable_interrupts(il);
3761 spin_unlock_irqrestore(&il->lock, flags);
3762
3763 pci_enable_msi(il->pci_dev);
3764
3765 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
3766 if (err) {
3767 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
3768 goto out_disable_msi;
3769 }
3770
3771 err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
3772 if (err) {
3773 IL_ERR("failed to create sysfs device attributes\n");
3774 goto out_release_irq;
3775 }
3776
3777 il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
3778 &il->ctx);
3779 il3945_setup_deferred_work(il);
3780 il3945_setup_handlers(il);
3781 il_power_initialize(il);
3782
3783 /*********************************
3784 * 8. Setup and Register mac80211
3785 * *******************************/
3786
3787 il_enable_interrupts(il);
3788
3789 err = il3945_setup_mac(il);
3790 if (err)
3791 goto out_remove_sysfs;
3792
3793 err = il_dbgfs_register(il, DRV_NAME);
3794 if (err)
3795 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
3796 err);
3797
3798 /* Start monitoring the killswitch */
3799 queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
3800
3801 return 0;
3802
3803out_remove_sysfs:
3804 destroy_workqueue(il->workqueue);
3805 il->workqueue = NULL;
3806 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3807out_release_irq:
3808 free_irq(il->pci_dev->irq, il);
3809out_disable_msi:
3810 pci_disable_msi(il->pci_dev);
3811 il_free_geos(il);
3812 il_free_channel_map(il);
3813out_unset_hw_params:
3814 il3945_unset_hw_params(il);
3815out_eeprom_free:
3816 il_eeprom_free(il);
3817out_iounmap:
3818 pci_iounmap(pdev, il->hw_base);
3819out_pci_release_regions:
3820 pci_release_regions(pdev);
3821out_pci_disable_device:
3822 pci_set_drvdata(pdev, NULL);
3823 pci_disable_device(pdev);
3824out_ieee80211_free_hw:
3825 il_free_traffic_mem(il);
3826 ieee80211_free_hw(il->hw);
3827out:
3828 return err;
3829}
3830
3831static void __devexit
3832il3945_pci_remove(struct pci_dev *pdev)
3833{
3834 struct il_priv *il = pci_get_drvdata(pdev);
3835 unsigned long flags;
3836
3837 if (!il)
3838 return;
3839
3840 D_INFO("*** UNLOAD DRIVER ***\n");
3841
3842 il_dbgfs_unregister(il);
3843
3844 set_bit(S_EXIT_PENDING, &il->status);
3845
3846 il_leds_exit(il);
3847
3848 if (il->mac80211_registered) {
3849 ieee80211_unregister_hw(il->hw);
3850 il->mac80211_registered = 0;
3851 } else {
3852 il3945_down(il);
3853 }
3854
3855 /*
3856 * Make sure device is reset to low power before unloading driver.
3857 * This may be redundant with il_down(), but there are paths to
3858 * run il_down() without calling apm_ops.stop(), and there are
3859 * paths to avoid running il_down() at all before leaving driver.
3860 * This (inexpensive) call *makes sure* device is reset.
3861 */
3862 il_apm_stop(il);
3863
3864 /* make sure we flush any pending irq or
3865 * tasklet for the driver
3866 */
3867 spin_lock_irqsave(&il->lock, flags);
3868 il_disable_interrupts(il);
3869 spin_unlock_irqrestore(&il->lock, flags);
3870
3871 il3945_synchronize_irq(il);
3872
3873 sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
3874
3875 cancel_delayed_work_sync(&il->_3945.rfkill_poll);
3876
3877 il3945_dealloc_ucode_pci(il);
3878
3879 if (il->rxq.bd)
3880 il3945_rx_queue_free(il, &il->rxq);
3881 il3945_hw_txq_ctx_free(il);
3882
3883 il3945_unset_hw_params(il);
3884
3885 /*netif_stop_queue(dev); */
3886 flush_workqueue(il->workqueue);
3887
3888 /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
3889 * il->workqueue... so we can't take down the workqueue
3890 * until now... */
3891 destroy_workqueue(il->workqueue);
3892 il->workqueue = NULL;
3893 il_free_traffic_mem(il);
3894
3895 free_irq(pdev->irq, il);
3896 pci_disable_msi(pdev);
3897
3898 pci_iounmap(pdev, il->hw_base);
3899 pci_release_regions(pdev);
3900 pci_disable_device(pdev);
3901 pci_set_drvdata(pdev, NULL);
3902
3903 il_free_channel_map(il);
3904 il_free_geos(il);
3905 kfree(il->scan_cmd);
3906 if (il->beacon_skb)
3907 dev_kfree_skb(il->beacon_skb);
3908
3909 ieee80211_free_hw(il->hw);
3910}
3911
3912/*****************************************************************************
3913 *
3914 * driver and module entry point
3915 *
3916 *****************************************************************************/
3917
3918static struct pci_driver il3945_driver = {
3919 .name = DRV_NAME,
3920 .id_table = il3945_hw_card_ids,
3921 .probe = il3945_pci_probe,
3922 .remove = __devexit_p(il3945_pci_remove),
3923 .driver.pm = IL_LEGACY_PM_OPS,
3924};
3925
3926static int __init
3927il3945_init(void)
3928{
3929
3930 int ret;
3931 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3932 pr_info(DRV_COPYRIGHT "\n");
3933
3934 ret = il3945_rate_control_register();
3935 if (ret) {
3936 pr_err("Unable to register rate control algorithm: %d\n", ret);
3937 return ret;
3938 }
3939
3940 ret = pci_register_driver(&il3945_driver);
3941 if (ret) {
3942 pr_err("Unable to initialize PCI module\n");
3943 goto error_register;
3944 }
3945
3946 return ret;
3947
3948error_register:
3949 il3945_rate_control_unregister();
3950 return ret;
3951}
3952
3953static void __exit
3954il3945_exit(void)
3955{
3956 pci_unregister_driver(&il3945_driver);
3957 il3945_rate_control_unregister();
3958}
3959
3960MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
3961
3962module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
3963MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
3964module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
3965MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
3966module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
3967 S_IRUGO);
3968MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
3969#ifdef CONFIG_IWLEGACY_DEBUG
3970module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
3971MODULE_PARM_DESC(debug, "debug output mask");
3972#endif
3973module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
3974MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3975
3976module_exit(il3945_exit);
3977module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 000000000000..30ad404f8df7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,995 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "commands.h"
40#include "3945.h"
41
42#define RS_NAME "iwl-3945-rs"
43
44static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
45 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
46};
47
48static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
49 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
50};
51
52static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
53 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
54};
55
56static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
57 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
58};
59
60struct il3945_tpt_entry {
61 s8 min_rssi;
62 u8 idx;
63};
64
65static struct il3945_tpt_entry il3945_tpt_table_a[] = {
66 {-60, RATE_54M_IDX},
67 {-64, RATE_48M_IDX},
68 {-72, RATE_36M_IDX},
69 {-80, RATE_24M_IDX},
70 {-84, RATE_18M_IDX},
71 {-85, RATE_12M_IDX},
72 {-87, RATE_9M_IDX},
73 {-89, RATE_6M_IDX}
74};
75
76static struct il3945_tpt_entry il3945_tpt_table_g[] = {
77 {-60, RATE_54M_IDX},
78 {-64, RATE_48M_IDX},
79 {-68, RATE_36M_IDX},
80 {-80, RATE_24M_IDX},
81 {-84, RATE_18M_IDX},
82 {-85, RATE_12M_IDX},
83 {-86, RATE_11M_IDX},
84 {-88, RATE_5M_IDX},
85 {-90, RATE_2M_IDX},
86 {-92, RATE_1M_IDX}
87};
88
89#define RATE_MAX_WINDOW 62
90#define RATE_FLUSH (3*HZ)
91#define RATE_WIN_FLUSH (HZ/2)
92#define IL39_RATE_HIGH_TH 11520
93#define IL_SUCCESS_UP_TH 8960
94#define IL_SUCCESS_DOWN_TH 10880
95#define RATE_MIN_FAILURE_TH 6
96#define RATE_MIN_SUCCESS_TH 8
97#define RATE_DECREASE_TH 1920
98#define RATE_RETRY_TH 15
99
100static u8
101il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
102{
103 u32 idx = 0;
104 u32 table_size = 0;
105 struct il3945_tpt_entry *tpt_table = NULL;
106
107 if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
108 rssi = IL_MIN_RSSI_VAL;
109
110 switch (band) {
111 case IEEE80211_BAND_2GHZ:
112 tpt_table = il3945_tpt_table_g;
113 table_size = ARRAY_SIZE(il3945_tpt_table_g);
114 break;
115
116 case IEEE80211_BAND_5GHZ:
117 tpt_table = il3945_tpt_table_a;
118 table_size = ARRAY_SIZE(il3945_tpt_table_a);
119 break;
120
121 default:
122 BUG();
123 break;
124 }
125
126 while (idx < table_size && rssi < tpt_table[idx].min_rssi)
127 idx++;
128
129 idx = min(idx, (table_size - 1));
130
131 return tpt_table[idx].idx;
132}
133
134static void
135il3945_clear_win(struct il3945_rate_scale_data *win)
136{
137 win->data = 0;
138 win->success_counter = 0;
139 win->success_ratio = -1;
140 win->counter = 0;
141 win->average_tpt = IL_INVALID_VALUE;
142 win->stamp = 0;
143}
144
145/**
146 * il3945_rate_scale_flush_wins - flush out the rate scale wins
147 *
148 * Returns the number of wins that have gathered data but were
149 * not flushed. If there were any that were not flushed, then
150 * reschedule the rate flushing routine.
151 */
152static int
153il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
154{
155 int unflushed = 0;
156 int i;
157 unsigned long flags;
158 struct il_priv *il __maybe_unused = rs_sta->il;
159
160 /*
161 * For each rate, if we have collected data on that rate
162 * and it has been more than RATE_WIN_FLUSH
163 * since we flushed, clear out the gathered stats
164 */
165 for (i = 0; i < RATE_COUNT_3945; i++) {
166 if (!rs_sta->win[i].counter)
167 continue;
168
169 spin_lock_irqsave(&rs_sta->lock, flags);
170 if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
171 D_RATE("flushing %d samples of rate " "idx %d\n",
172 rs_sta->win[i].counter, i);
173 il3945_clear_win(&rs_sta->win[i]);
174 } else
175 unflushed++;
176 spin_unlock_irqrestore(&rs_sta->lock, flags);
177 }
178
179 return unflushed;
180}
181
182#define RATE_FLUSH_MAX 5000 /* msec */
183#define RATE_FLUSH_MIN 50 /* msec */
184#define IL_AVERAGE_PACKETS 1500
185
186static void
187il3945_bg_rate_scale_flush(unsigned long data)
188{
189 struct il3945_rs_sta *rs_sta = (void *)data;
190 struct il_priv *il __maybe_unused = rs_sta->il;
191 int unflushed = 0;
192 unsigned long flags;
193 u32 packet_count, duration, pps;
194
195 D_RATE("enter\n");
196
197 unflushed = il3945_rate_scale_flush_wins(rs_sta);
198
199 spin_lock_irqsave(&rs_sta->lock, flags);
200
201 /* Number of packets Rx'd since last time this timer ran */
202 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
203
204 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
205
206 if (unflushed) {
207 duration =
208 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
209
210 D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
211
212 /* Determine packets per second */
213 if (duration)
214 pps = (packet_count * 1000) / duration;
215 else
216 pps = 0;
217
218 if (pps) {
219 duration = (IL_AVERAGE_PACKETS * 1000) / pps;
220 if (duration < RATE_FLUSH_MIN)
221 duration = RATE_FLUSH_MIN;
222 else if (duration > RATE_FLUSH_MAX)
223 duration = RATE_FLUSH_MAX;
224 } else
225 duration = RATE_FLUSH_MAX;
226
227 rs_sta->flush_time = msecs_to_jiffies(duration);
228
229 D_RATE("new flush period: %d msec ave %d\n", duration,
230 packet_count);
231
232 mod_timer(&rs_sta->rate_scale_flush,
233 jiffies + rs_sta->flush_time);
234
235 rs_sta->last_partial_flush = jiffies;
236 } else {
237 rs_sta->flush_time = RATE_FLUSH;
238 rs_sta->flush_pending = 0;
239 }
240 /* If there weren't any unflushed entries, we don't schedule the timer
241 * to run again */
242
243 rs_sta->last_flush = jiffies;
244
245 spin_unlock_irqrestore(&rs_sta->lock, flags);
246
247 D_RATE("leave\n");
248}
249
250/**
251 * il3945_collect_tx_data - Update the success/failure sliding win
252 *
253 * We keep a sliding win of the last 64 packets transmitted
254 * at this rate. win->data contains the bitmask of successful
255 * packets.
256 */
257static void
258il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
259 struct il3945_rate_scale_data *win, int success,
260 int retries, int idx)
261{
262 unsigned long flags;
263 s32 fail_count;
264 struct il_priv *il __maybe_unused = rs_sta->il;
265
266 if (!retries) {
267 D_RATE("leave: retries == 0 -- should be at least 1\n");
268 return;
269 }
270
271 spin_lock_irqsave(&rs_sta->lock, flags);
272
273 /*
274 * Keep track of only the latest 62 tx frame attempts in this rate's
275 * history win; anything older isn't really relevant any more.
276 * If we have filled up the sliding win, drop the oldest attempt;
277 * if the oldest attempt (highest bit in bitmap) shows "success",
278 * subtract "1" from the success counter (this is the main reason
279 * we keep these bitmaps!).
280 * */
281 while (retries > 0) {
282 if (win->counter >= RATE_MAX_WINDOW) {
283
284 /* remove earliest */
285 win->counter = RATE_MAX_WINDOW - 1;
286
287 if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
288 win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
289 win->success_counter--;
290 }
291 }
292
293 /* Increment frames-attempted counter */
294 win->counter++;
295
296 /* Shift bitmap by one frame (throw away oldest history),
297 * OR in "1", and increment "success" if this
298 * frame was successful. */
299 win->data <<= 1;
300 if (success > 0) {
301 win->success_counter++;
302 win->data |= 0x1;
303 success--;
304 }
305
306 retries--;
307 }
308
309 /* Calculate current success ratio, avoid divide-by-0! */
310 if (win->counter > 0)
311 win->success_ratio =
312 128 * (100 * win->success_counter) / win->counter;
313 else
314 win->success_ratio = IL_INVALID_VALUE;
315
316 fail_count = win->counter - win->success_counter;
317
318 /* Calculate average throughput, if we have enough history. */
319 if (fail_count >= RATE_MIN_FAILURE_TH ||
320 win->success_counter >= RATE_MIN_SUCCESS_TH)
321 win->average_tpt =
322 ((win->success_ratio * rs_sta->expected_tpt[idx] +
323 64) / 128);
324 else
325 win->average_tpt = IL_INVALID_VALUE;
326
327 /* Tag this win as having been updated */
328 win->stamp = jiffies;
329
330 spin_unlock_irqrestore(&rs_sta->lock, flags);
331
332}
333
334/*
335 * Called after adding a new station to initialize rate scaling
336 */
337void
338il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
339{
340 struct ieee80211_hw *hw = il->hw;
341 struct ieee80211_conf *conf = &il->hw->conf;
342 struct il3945_sta_priv *psta;
343 struct il3945_rs_sta *rs_sta;
344 struct ieee80211_supported_band *sband;
345 int i;
346
347 D_INFO("enter\n");
348 if (sta_id == il->ctx.bcast_sta_id)
349 goto out;
350
351 psta = (struct il3945_sta_priv *)sta->drv_priv;
352 rs_sta = &psta->rs_sta;
353 sband = hw->wiphy->bands[conf->channel->band];
354
355 rs_sta->il = il;
356
357 rs_sta->start_rate = RATE_INVALID;
358
359 /* default to just 802.11b */
360 rs_sta->expected_tpt = il3945_expected_tpt_b;
361
362 rs_sta->last_partial_flush = jiffies;
363 rs_sta->last_flush = jiffies;
364 rs_sta->flush_time = RATE_FLUSH;
365 rs_sta->last_tx_packets = 0;
366
367 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
368 rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
369
370 for (i = 0; i < RATE_COUNT_3945; i++)
371 il3945_clear_win(&rs_sta->win[i]);
372
373 /* TODO: what is a good starting rate for STA? About middle? Maybe not
374 * the lowest or the highest rate.. Could consider using RSSI from
375 * previous packets? Need to have IEEE 802.1X auth succeed immediately
376 * after assoc.. */
377
378 for (i = sband->n_bitrates - 1; i >= 0; i--) {
379 if (sta->supp_rates[sband->band] & (1 << i)) {
380 rs_sta->last_txrate_idx = i;
381 break;
382 }
383 }
384
385 il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
386 /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
387 if (sband->band == IEEE80211_BAND_5GHZ) {
388 rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
389 il->_3945.sta_supp_rates =
390 il->_3945.sta_supp_rates << IL_FIRST_OFDM_RATE;
391 }
392
393out:
394 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
395
396 D_INFO("leave\n");
397}
398
399static void *
400il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
401{
402 return hw->priv;
403}
404
405/* rate scale requires free function to be implemented */
406static void
407il3945_rs_free(void *il)
408{
409 return;
410}
411
412static void *
413il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
414{
415 struct il3945_rs_sta *rs_sta;
416 struct il3945_sta_priv *psta = (void *)sta->drv_priv;
417 struct il_priv *il __maybe_unused = il_priv;
418
419 D_RATE("enter\n");
420
421 rs_sta = &psta->rs_sta;
422
423 spin_lock_init(&rs_sta->lock);
424 init_timer(&rs_sta->rate_scale_flush);
425
426 D_RATE("leave\n");
427
428 return rs_sta;
429}
430
431static void
432il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
433{
434 struct il3945_rs_sta *rs_sta = il_sta;
435
436 /*
437 * Be careful not to use any members of il3945_rs_sta (like trying
438 * to use il_priv to print out debugging) since it may not be fully
439 * initialized at this point.
440 */
441 del_timer_sync(&rs_sta->rate_scale_flush);
442}
443
444/**
445 * il3945_rs_tx_status - Update rate control values based on Tx results
446 *
447 * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
448 * the hardware for each rate.
449 */
450static void
451il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
452 struct ieee80211_sta *sta, void *il_sta,
453 struct sk_buff *skb)
454{
455 s8 retries = 0, current_count;
456 int scale_rate_idx, first_idx, last_idx;
457 unsigned long flags;
458 struct il_priv *il = (struct il_priv *)il_rate;
459 struct il3945_rs_sta *rs_sta = il_sta;
460 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461
462 D_RATE("enter\n");
463
464 retries = info->status.rates[0].count;
465 /* Sanity Check for retries */
466 if (retries > RATE_RETRY_TH)
467 retries = RATE_RETRY_TH;
468
469 first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
470 if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
471 D_RATE("leave: Rate out of bounds: %d\n", first_idx);
472 return;
473 }
474
475 if (!il_sta) {
476 D_RATE("leave: No STA il data to update!\n");
477 return;
478 }
479
480 /* Treat uninitialized rate scaling data same as non-existing. */
481 if (!rs_sta->il) {
482 D_RATE("leave: STA il data uninitialized!\n");
483 return;
484 }
485
486 rs_sta->tx_packets++;
487
488 scale_rate_idx = first_idx;
489 last_idx = first_idx;
490
491 /*
492 * Update the win for each rate. We determine which rates
493 * were Tx'd based on the total number of retries vs. the number
494 * of retries configured for each rate -- currently set to the
495 * il value 'retry_rate' vs. rate specific
496 *
497 * On exit from this while loop last_idx indicates the rate
498 * at which the frame was finally transmitted (or failed if no
499 * ACK)
500 */
501 while (retries > 1) {
502 if ((retries - 1) < il->retry_rate) {
503 current_count = (retries - 1);
504 last_idx = scale_rate_idx;
505 } else {
506 current_count = il->retry_rate;
507 last_idx = il3945_rs_next_rate(il, scale_rate_idx);
508 }
509
510 /* Update this rate accounting for as many retries
511 * as was used for it (per current_count) */
512 il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
513 current_count, scale_rate_idx);
514 D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
515 current_count);
516
517 retries -= current_count;
518
519 scale_rate_idx = last_idx;
520 }
521
522 /* Update the last idx win with success/failure based on ACK */
523 D_RATE("Update rate %d with %s.\n", last_idx,
524 (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
525 il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
526 info->flags & IEEE80211_TX_STAT_ACK, 1,
527 last_idx);
528
529 /* We updated the rate scale win -- if its been more than
530 * flush_time since the last run, schedule the flush
531 * again */
532 spin_lock_irqsave(&rs_sta->lock, flags);
533
534 if (!rs_sta->flush_pending &&
535 time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
536
537 rs_sta->last_partial_flush = jiffies;
538 rs_sta->flush_pending = 1;
539 mod_timer(&rs_sta->rate_scale_flush,
540 jiffies + rs_sta->flush_time);
541 }
542
543 spin_unlock_irqrestore(&rs_sta->lock, flags);
544
545 D_RATE("leave\n");
546}
547
548static u16
549il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
550 enum ieee80211_band band)
551{
552 u8 high = RATE_INVALID;
553 u8 low = RATE_INVALID;
554 struct il_priv *il __maybe_unused = rs_sta->il;
555
556 /* 802.11A walks to the next literal adjacent rate in
557 * the rate table */
558 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
559 int i;
560 u32 mask;
561
562 /* Find the previous rate that is in the rate mask */
563 i = idx - 1;
564 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
565 if (rate_mask & mask) {
566 low = i;
567 break;
568 }
569 }
570
571 /* Find the next rate that is in the rate mask */
572 i = idx + 1;
573 for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
574 if (rate_mask & mask) {
575 high = i;
576 break;
577 }
578 }
579
580 return (high << 8) | low;
581 }
582
583 low = idx;
584 while (low != RATE_INVALID) {
585 if (rs_sta->tgg)
586 low = il3945_rates[low].prev_rs_tgg;
587 else
588 low = il3945_rates[low].prev_rs;
589 if (low == RATE_INVALID)
590 break;
591 if (rate_mask & (1 << low))
592 break;
593 D_RATE("Skipping masked lower rate: %d\n", low);
594 }
595
596 high = idx;
597 while (high != RATE_INVALID) {
598 if (rs_sta->tgg)
599 high = il3945_rates[high].next_rs_tgg;
600 else
601 high = il3945_rates[high].next_rs;
602 if (high == RATE_INVALID)
603 break;
604 if (rate_mask & (1 << high))
605 break;
606 D_RATE("Skipping masked higher rate: %d\n", high);
607 }
608
609 return (high << 8) | low;
610}
611
612/**
613 * il3945_rs_get_rate - find the rate for the requested packet
614 *
615 * Returns the ieee80211_rate structure allocated by the driver.
616 *
617 * The rate control algorithm has no internal mapping between hw_mode's
618 * rate ordering and the rate ordering used by the rate control algorithm.
619 *
620 * The rate control algorithm uses a single table of rates that goes across
621 * the entire A/B/G spectrum vs. being limited to just one particular
622 * hw_mode.
623 *
624 * As such, we can't convert the idx obtained below into the hw_mode's
625 * rate table and must reference the driver allocated rate table
626 *
627 */
628static void
629il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
630 struct ieee80211_tx_rate_control *txrc)
631{
632 struct ieee80211_supported_band *sband = txrc->sband;
633 struct sk_buff *skb = txrc->skb;
634 u8 low = RATE_INVALID;
635 u8 high = RATE_INVALID;
636 u16 high_low;
637 int idx;
638 struct il3945_rs_sta *rs_sta = il_sta;
639 struct il3945_rate_scale_data *win = NULL;
640 int current_tpt = IL_INVALID_VALUE;
641 int low_tpt = IL_INVALID_VALUE;
642 int high_tpt = IL_INVALID_VALUE;
643 u32 fail_count;
644 s8 scale_action = 0;
645 unsigned long flags;
646 u16 rate_mask;
647 s8 max_rate_idx = -1;
648 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 D_RATE("enter\n");
652
653 /* Treat uninitialized rate scaling data same as non-existing. */
654 if (rs_sta && !rs_sta->il) {
655 D_RATE("Rate scaling information not initialized yet.\n");
656 il_sta = NULL;
657 }
658
659 if (rate_control_send_low(sta, il_sta, txrc))
660 return;
661
662 rate_mask = sta->supp_rates[sband->band];
663
664 /* get user max rate if set */
665 max_rate_idx = txrc->max_rate_idx;
666 if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
667 max_rate_idx += IL_FIRST_OFDM_RATE;
668 if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
669 max_rate_idx = -1;
670
671 idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
672
673 if (sband->band == IEEE80211_BAND_5GHZ)
674 rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
675
676 spin_lock_irqsave(&rs_sta->lock, flags);
677
678 /* for recent assoc, choose best rate regarding
679 * to rssi value
680 */
681 if (rs_sta->start_rate != RATE_INVALID) {
682 if (rs_sta->start_rate < idx &&
683 (rate_mask & (1 << rs_sta->start_rate)))
684 idx = rs_sta->start_rate;
685 rs_sta->start_rate = RATE_INVALID;
686 }
687
688 /* force user max rate if set by user */
689 if (max_rate_idx != -1 && max_rate_idx < idx) {
690 if (rate_mask & (1 << max_rate_idx))
691 idx = max_rate_idx;
692 }
693
694 win = &(rs_sta->win[idx]);
695
696 fail_count = win->counter - win->success_counter;
697
698 if (fail_count < RATE_MIN_FAILURE_TH &&
699 win->success_counter < RATE_MIN_SUCCESS_TH) {
700 spin_unlock_irqrestore(&rs_sta->lock, flags);
701
702 D_RATE("Invalid average_tpt on rate %d: "
703 "counter: %d, success_counter: %d, "
704 "expected_tpt is %sNULL\n", idx, win->counter,
705 win->success_counter,
706 rs_sta->expected_tpt ? "not " : "");
707
708 /* Can't calculate this yet; not enough history */
709 win->average_tpt = IL_INVALID_VALUE;
710 goto out;
711
712 }
713
714 current_tpt = win->average_tpt;
715
716 high_low =
717 il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
718 low = high_low & 0xff;
719 high = (high_low >> 8) & 0xff;
720
721 /* If user set max rate, dont allow higher than user constrain */
722 if (max_rate_idx != -1 && max_rate_idx < high)
723 high = RATE_INVALID;
724
725 /* Collect Measured throughputs of adjacent rates */
726 if (low != RATE_INVALID)
727 low_tpt = rs_sta->win[low].average_tpt;
728
729 if (high != RATE_INVALID)
730 high_tpt = rs_sta->win[high].average_tpt;
731
732 spin_unlock_irqrestore(&rs_sta->lock, flags);
733
734 scale_action = 0;
735
736 /* Low success ratio , need to drop the rate */
737 if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
738 D_RATE("decrease rate because of low success_ratio\n");
739 scale_action = -1;
740 /* No throughput measured yet for adjacent rates,
741 * try increase */
742 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
743
744 if (high != RATE_INVALID &&
745 win->success_ratio >= RATE_INCREASE_TH)
746 scale_action = 1;
747 else if (low != RATE_INVALID)
748 scale_action = 0;
749
750 /* Both adjacent throughputs are measured, but neither one has
751 * better throughput; we're using the best rate, don't change
752 * it! */
753 } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
754 && low_tpt < current_tpt && high_tpt < current_tpt) {
755
756 D_RATE("No action -- low [%d] & high [%d] < "
757 "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
758 scale_action = 0;
759
760 /* At least one of the rates has better throughput */
761 } else {
762 if (high_tpt != IL_INVALID_VALUE) {
763
764 /* High rate has better throughput, Increase
765 * rate */
766 if (high_tpt > current_tpt &&
767 win->success_ratio >= RATE_INCREASE_TH)
768 scale_action = 1;
769 else {
770 D_RATE("decrease rate because of high tpt\n");
771 scale_action = 0;
772 }
773 } else if (low_tpt != IL_INVALID_VALUE) {
774 if (low_tpt > current_tpt) {
775 D_RATE("decrease rate because of low tpt\n");
776 scale_action = -1;
777 } else if (win->success_ratio >= RATE_INCREASE_TH) {
778 /* Lower rate has better
779 * throughput,decrease rate */
780 scale_action = 1;
781 }
782 }
783 }
784
785 /* Sanity check; asked for decrease, but success rate or throughput
786 * has been good at old rate. Don't change it. */
787 if (scale_action == -1 && low != RATE_INVALID &&
788 (win->success_ratio > RATE_HIGH_TH ||
789 current_tpt > 100 * rs_sta->expected_tpt[low]))
790 scale_action = 0;
791
792 switch (scale_action) {
793 case -1:
794
795 /* Decrese rate */
796 if (low != RATE_INVALID)
797 idx = low;
798 break;
799
800 case 1:
801 /* Increase rate */
802 if (high != RATE_INVALID)
803 idx = high;
804
805 break;
806
807 case 0:
808 default:
809 /* No change */
810 break;
811 }
812
813 D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
814 low, high);
815
816out:
817
818 if (sband->band == IEEE80211_BAND_5GHZ) {
819 if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
820 idx = IL_FIRST_OFDM_RATE;
821 rs_sta->last_txrate_idx = idx;
822 info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
823 } else {
824 rs_sta->last_txrate_idx = idx;
825 info->control.rates[0].idx = rs_sta->last_txrate_idx;
826 }
827
828 D_RATE("leave: %d\n", idx);
829}
830
831#ifdef CONFIG_MAC80211_DEBUGFS
832static int
833il3945_open_file_generic(struct inode *inode, struct file *file)
834{
835 file->private_data = inode->i_private;
836 return 0;
837}
838
839static ssize_t
840il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
841 size_t count, loff_t *ppos)
842{
843 char *buff;
844 int desc = 0;
845 int j;
846 ssize_t ret;
847 struct il3945_rs_sta *lq_sta = file->private_data;
848
849 buff = kmalloc(1024, GFP_KERNEL);
850 if (!buff)
851 return -ENOMEM;
852
853 desc +=
854 sprintf(buff + desc,
855 "tx packets=%d last rate idx=%d\n"
856 "rate=0x%X flush time %d\n", lq_sta->tx_packets,
857 lq_sta->last_txrate_idx, lq_sta->start_rate,
858 jiffies_to_msecs(lq_sta->flush_time));
859 for (j = 0; j < RATE_COUNT_3945; j++) {
860 desc +=
861 sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
862 lq_sta->win[j].counter,
863 lq_sta->win[j].success_counter,
864 lq_sta->win[j].success_ratio);
865 }
866 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
867 kfree(buff);
868 return ret;
869}
870
871static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
872 .read = il3945_sta_dbgfs_stats_table_read,
873 .open = il3945_open_file_generic,
874 .llseek = default_llseek,
875};
876
877static void
878il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
879{
880 struct il3945_rs_sta *lq_sta = il_sta;
881
882 lq_sta->rs_sta_dbgfs_stats_table_file =
883 debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
884 &rs_sta_dbgfs_stats_table_ops);
885
886}
887
888static void
889il3945_remove_debugfs(void *il, void *il_sta)
890{
891 struct il3945_rs_sta *lq_sta = il_sta;
892 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
893}
894#endif
895
896/*
897 * Initialization of rate scaling information is done by driver after
898 * the station is added. Since mac80211 calls this function before a
899 * station is added we ignore it.
900 */
901static void
902il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
903 struct ieee80211_sta *sta, void *il_sta)
904{
905}
906
907static struct rate_control_ops rs_ops = {
908 .module = NULL,
909 .name = RS_NAME,
910 .tx_status = il3945_rs_tx_status,
911 .get_rate = il3945_rs_get_rate,
912 .rate_init = il3945_rs_rate_init_stub,
913 .alloc = il3945_rs_alloc,
914 .free = il3945_rs_free,
915 .alloc_sta = il3945_rs_alloc_sta,
916 .free_sta = il3945_rs_free_sta,
917#ifdef CONFIG_MAC80211_DEBUGFS
918 .add_sta_debugfs = il3945_add_debugfs,
919 .remove_sta_debugfs = il3945_remove_debugfs,
920#endif
921
922};
923
924void
925il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
926{
927 struct il_priv *il = hw->priv;
928 s32 rssi = 0;
929 unsigned long flags;
930 struct il3945_rs_sta *rs_sta;
931 struct ieee80211_sta *sta;
932 struct il3945_sta_priv *psta;
933
934 D_RATE("enter\n");
935
936 rcu_read_lock();
937
938 sta =
939 ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
940 if (!sta) {
941 D_RATE("Unable to find station to initialize rate scaling.\n");
942 rcu_read_unlock();
943 return;
944 }
945
946 psta = (void *)sta->drv_priv;
947 rs_sta = &psta->rs_sta;
948
949 spin_lock_irqsave(&rs_sta->lock, flags);
950
951 rs_sta->tgg = 0;
952 switch (il->band) {
953 case IEEE80211_BAND_2GHZ:
954 /* TODO: this always does G, not a regression */
955 if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
956 rs_sta->tgg = 1;
957 rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
958 } else
959 rs_sta->expected_tpt = il3945_expected_tpt_g;
960 break;
961
962 case IEEE80211_BAND_5GHZ:
963 rs_sta->expected_tpt = il3945_expected_tpt_a;
964 break;
965 case IEEE80211_NUM_BANDS:
966 BUG();
967 break;
968 }
969
970 spin_unlock_irqrestore(&rs_sta->lock, flags);
971
972 rssi = il->_3945.last_rx_rssi;
973 if (rssi == 0)
974 rssi = IL_MIN_RSSI_VAL;
975
976 D_RATE("Network RSSI: %d\n", rssi);
977
978 rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
979
980 D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
981 rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
982 rcu_read_unlock();
983}
984
985int
986il3945_rate_control_register(void)
987{
988 return ieee80211_rate_control_register(&rs_ops);
989}
990
991void
992il3945_rate_control_unregister(void)
993{
994 ieee80211_rate_control_unregister(&rs_ops);
995}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 000000000000..863664f9ba8b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2751 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/firmware.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40#include <net/mac80211.h>
41
42#include "common.h"
43#include "3945.h"
44
45/* Send led command */
46static int
47il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
48{
49 struct il_host_cmd cmd = {
50 .id = C_LEDS,
51 .len = sizeof(struct il_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56
57 return il_send_cmd(il, &cmd);
58}
59
60const struct il_led_ops il3945_led_ops = {
61 .cmd = il3945_send_led_cmd,
62};
63
64#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
65 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
66 RATE_##r##M_IEEE, \
67 RATE_##ip##M_IDX, \
68 RATE_##in##M_IDX, \
69 RATE_##rp##M_IDX, \
70 RATE_##rn##M_IDX, \
71 RATE_##pp##M_IDX, \
72 RATE_##np##M_IDX, \
73 RATE_##r##M_IDX_TBL, \
74 RATE_##ip##M_IDX_TBL }
75
76/*
77 * Parameter order:
78 * rate, prev rate, next rate, prev tgg rate, next tgg rate
79 *
80 * If there isn't a valid next or previous rate then INV is used which
81 * maps to RATE_INVALID
82 *
83 */
84const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
85 IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
86 IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
87 IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
88 IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
89 IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
90 IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
91 IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
92 IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
93 IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
94 IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
95 IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
96 IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
97};
98
99static inline u8
100il3945_get_prev_ieee_rate(u8 rate_idx)
101{
102 u8 rate = il3945_rates[rate_idx].prev_ieee;
103
104 if (rate == RATE_INVALID)
105 rate = rate_idx;
106 return rate;
107}
108
109/* 1 = enable the il3945_disable_events() function */
110#define IL_EVT_DISABLE (0)
111#define IL_EVT_DISABLE_SIZE (1532/32)
112
113/**
114 * il3945_disable_events - Disable selected events in uCode event log
115 *
116 * Disable an event by writing "1"s into "disable"
117 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
118 * Default values of 0 enable uCode events to be logged.
119 * Use for only special debugging. This function is just a placeholder as-is,
120 * you'll need to provide the special bits! ...
121 * ... and set IL_EVT_DISABLE to 1. */
122void
123il3945_disable_events(struct il_priv *il)
124{
125 int i;
126 u32 base; /* SRAM address of event log header */
127 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
128 u32 array_size; /* # of u32 entries in array */
129 static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
130 0x00000000, /* 31 - 0 Event id numbers */
131 0x00000000, /* 63 - 32 */
132 0x00000000, /* 95 - 64 */
133 0x00000000, /* 127 - 96 */
134 0x00000000, /* 159 - 128 */
135 0x00000000, /* 191 - 160 */
136 0x00000000, /* 223 - 192 */
137 0x00000000, /* 255 - 224 */
138 0x00000000, /* 287 - 256 */
139 0x00000000, /* 319 - 288 */
140 0x00000000, /* 351 - 320 */
141 0x00000000, /* 383 - 352 */
142 0x00000000, /* 415 - 384 */
143 0x00000000, /* 447 - 416 */
144 0x00000000, /* 479 - 448 */
145 0x00000000, /* 511 - 480 */
146 0x00000000, /* 543 - 512 */
147 0x00000000, /* 575 - 544 */
148 0x00000000, /* 607 - 576 */
149 0x00000000, /* 639 - 608 */
150 0x00000000, /* 671 - 640 */
151 0x00000000, /* 703 - 672 */
152 0x00000000, /* 735 - 704 */
153 0x00000000, /* 767 - 736 */
154 0x00000000, /* 799 - 768 */
155 0x00000000, /* 831 - 800 */
156 0x00000000, /* 863 - 832 */
157 0x00000000, /* 895 - 864 */
158 0x00000000, /* 927 - 896 */
159 0x00000000, /* 959 - 928 */
160 0x00000000, /* 991 - 960 */
161 0x00000000, /* 1023 - 992 */
162 0x00000000, /* 1055 - 1024 */
163 0x00000000, /* 1087 - 1056 */
164 0x00000000, /* 1119 - 1088 */
165 0x00000000, /* 1151 - 1120 */
166 0x00000000, /* 1183 - 1152 */
167 0x00000000, /* 1215 - 1184 */
168 0x00000000, /* 1247 - 1216 */
169 0x00000000, /* 1279 - 1248 */
170 0x00000000, /* 1311 - 1280 */
171 0x00000000, /* 1343 - 1312 */
172 0x00000000, /* 1375 - 1344 */
173 0x00000000, /* 1407 - 1376 */
174 0x00000000, /* 1439 - 1408 */
175 0x00000000, /* 1471 - 1440 */
176 0x00000000, /* 1503 - 1472 */
177 };
178
179 base = le32_to_cpu(il->card_alive.log_event_table_ptr);
180 if (!il3945_hw_valid_rtc_data_addr(base)) {
181 IL_ERR("Invalid event log pointer 0x%08X\n", base);
182 return;
183 }
184
185 disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
186 array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
187
188 if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
189 D_INFO("Disabling selected uCode log events at 0x%x\n",
190 disable_ptr);
191 for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
192 il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
193 evt_disable[i]);
194
195 } else {
196 D_INFO("Selected uCode log events may be disabled\n");
197 D_INFO(" by writing \"1\"s into disable bitmap\n");
198 D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
199 array_size);
200 }
201
202}
203
204static int
205il3945_hwrate_to_plcp_idx(u8 plcp)
206{
207 int idx;
208
209 for (idx = 0; idx < RATE_COUNT_3945; idx++)
210 if (il3945_rates[idx].plcp == plcp)
211 return idx;
212 return -1;
213}
214
215#ifdef CONFIG_IWLEGACY_DEBUG
216#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
217
218static const char *
219il3945_get_tx_fail_reason(u32 status)
220{
221 switch (status & TX_STATUS_MSK) {
222 case TX_3945_STATUS_SUCCESS:
223 return "SUCCESS";
224 TX_STATUS_ENTRY(SHORT_LIMIT);
225 TX_STATUS_ENTRY(LONG_LIMIT);
226 TX_STATUS_ENTRY(FIFO_UNDERRUN);
227 TX_STATUS_ENTRY(MGMNT_ABORT);
228 TX_STATUS_ENTRY(NEXT_FRAG);
229 TX_STATUS_ENTRY(LIFE_EXPIRE);
230 TX_STATUS_ENTRY(DEST_PS);
231 TX_STATUS_ENTRY(ABORTED);
232 TX_STATUS_ENTRY(BT_RETRY);
233 TX_STATUS_ENTRY(STA_INVALID);
234 TX_STATUS_ENTRY(FRAG_DROPPED);
235 TX_STATUS_ENTRY(TID_DISABLE);
236 TX_STATUS_ENTRY(FRAME_FLUSHED);
237 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
238 TX_STATUS_ENTRY(TX_LOCKED);
239 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
240 }
241
242 return "UNKNOWN";
243}
244#else
245static inline const char *
246il3945_get_tx_fail_reason(u32 status)
247{
248 return "";
249}
250#endif
251
252/*
253 * get ieee prev rate from rate scale table.
254 * for A and B mode we need to overright prev
255 * value
256 */
257int
258il3945_rs_next_rate(struct il_priv *il, int rate)
259{
260 int next_rate = il3945_get_prev_ieee_rate(rate);
261
262 switch (il->band) {
263 case IEEE80211_BAND_5GHZ:
264 if (rate == RATE_12M_IDX)
265 next_rate = RATE_9M_IDX;
266 else if (rate == RATE_6M_IDX)
267 next_rate = RATE_6M_IDX;
268 break;
269 case IEEE80211_BAND_2GHZ:
270 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
271 il_is_associated(il)) {
272 if (rate == RATE_11M_IDX)
273 next_rate = RATE_5M_IDX;
274 }
275 break;
276
277 default:
278 break;
279 }
280
281 return next_rate;
282}
283
284/**
285 * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
286 *
287 * When FW advances 'R' idx, all entries between old and new 'R' idx
288 * need to be reclaimed. As result, some free space forms. If there is
289 * enough free space (> low mark), wake the stack that feeds us.
290 */
291static void
292il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
293{
294 struct il_tx_queue *txq = &il->txq[txq_id];
295 struct il_queue *q = &txq->q;
296 struct il_tx_info *tx_info;
297
298 BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
299
300 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
301 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
302
303 tx_info = &txq->txb[txq->q.read_ptr];
304 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
305 tx_info->skb = NULL;
306 il->cfg->ops->lib->txq_free_tfd(il, txq);
307 }
308
309 if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
310 txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
311 il_wake_queue(il, txq);
312}
313
314/**
315 * il3945_hdl_tx - Handle Tx response
316 */
317static void
318il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
319{
320 struct il_rx_pkt *pkt = rxb_addr(rxb);
321 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
322 int txq_id = SEQ_TO_QUEUE(sequence);
323 int idx = SEQ_TO_IDX(sequence);
324 struct il_tx_queue *txq = &il->txq[txq_id];
325 struct ieee80211_tx_info *info;
326 struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
327 u32 status = le32_to_cpu(tx_resp->status);
328 int rate_idx;
329 int fail;
330
331 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
332 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
333 "is out of range [0-%d] %d %d\n", txq_id, idx,
334 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
335 return;
336 }
337
338 txq->time_stamp = jiffies;
339 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
340 ieee80211_tx_info_clear_status(info);
341
342 /* Fill the MRR chain with some info about on-chip retransmissions */
343 rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
344 if (info->band == IEEE80211_BAND_5GHZ)
345 rate_idx -= IL_FIRST_OFDM_RATE;
346
347 fail = tx_resp->failure_frame;
348
349 info->status.rates[0].idx = rate_idx;
350 info->status.rates[0].count = fail + 1; /* add final attempt */
351
352 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
353 info->flags |=
354 ((status & TX_STATUS_MSK) ==
355 TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
356
357 D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
358 il3945_get_tx_fail_reason(status), status, tx_resp->rate,
359 tx_resp->failure_frame);
360
361 D_TX_REPLY("Tx queue reclaim %d\n", idx);
362 il3945_tx_queue_reclaim(il, txq_id, idx);
363
364 if (status & TX_ABORT_REQUIRED_MSK)
365 IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
366}
367
368/*****************************************************************************
369 *
370 * Intel PRO/Wireless 3945ABG/BG Network Connection
371 *
372 * RX handler implementations
373 *
374 *****************************************************************************/
375#ifdef CONFIG_IWLEGACY_DEBUGFS
376static void
377il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
378{
379 int i;
380 __le32 *prev_stats;
381 u32 *accum_stats;
382 u32 *delta, *max_delta;
383
384 prev_stats = (__le32 *) &il->_3945.stats;
385 accum_stats = (u32 *) &il->_3945.accum_stats;
386 delta = (u32 *) &il->_3945.delta_stats;
387 max_delta = (u32 *) &il->_3945.max_delta;
388
389 for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
390 i +=
391 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
392 accum_stats++) {
393 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
394 *delta =
395 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
396 *accum_stats += *delta;
397 if (*delta > *max_delta)
398 *max_delta = *delta;
399 }
400 }
401
402 /* reset accumulative stats for "no-counter" type stats */
403 il->_3945.accum_stats.general.temperature =
404 il->_3945.stats.general.temperature;
405 il->_3945.accum_stats.general.ttl_timestamp =
406 il->_3945.stats.general.ttl_timestamp;
407}
408#endif
409
410void
411il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
412{
413 struct il_rx_pkt *pkt = rxb_addr(rxb);
414
415 D_RX("Statistics notification received (%d vs %d).\n",
416 (int)sizeof(struct il3945_notif_stats),
417 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
418#ifdef CONFIG_IWLEGACY_DEBUGFS
419 il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
420#endif
421
422 memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
423}
424
425void
426il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
427{
428 struct il_rx_pkt *pkt = rxb_addr(rxb);
429 __le32 *flag = (__le32 *) &pkt->u.raw;
430
431 if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
432#ifdef CONFIG_IWLEGACY_DEBUGFS
433 memset(&il->_3945.accum_stats, 0,
434 sizeof(struct il3945_notif_stats));
435 memset(&il->_3945.delta_stats, 0,
436 sizeof(struct il3945_notif_stats));
437 memset(&il->_3945.max_delta, 0,
438 sizeof(struct il3945_notif_stats));
439#endif
440 D_RX("Statistics have been cleared\n");
441 }
442 il3945_hdl_stats(il, rxb);
443}
444
445/******************************************************************************
446 *
447 * Misc. internal state and helper functions
448 *
449 ******************************************************************************/
450
451/* This is necessary only for a number of stats, see the caller. */
452static int
453il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
454{
455 /* Filter incoming packets to determine if they are targeted toward
456 * this network, discarding packets coming from ourselves */
457 switch (il->iw_mode) {
458 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
459 /* packets to our IBSS update information */
460 return !compare_ether_addr(header->addr3, il->bssid);
461 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
462 /* packets to our IBSS update information */
463 return !compare_ether_addr(header->addr2, il->bssid);
464 default:
465 return 1;
466 }
467}
468
469static void
470il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
471 struct ieee80211_rx_status *stats)
472{
473 struct il_rx_pkt *pkt = rxb_addr(rxb);
474 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
475 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
476 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
477 u16 len = le16_to_cpu(rx_hdr->len);
478 struct sk_buff *skb;
479 __le16 fc = hdr->frame_control;
480
481 /* We received data from the HW, so stop the watchdog */
482 if (unlikely
483 (len + IL39_RX_FRAME_SIZE >
484 PAGE_SIZE << il->hw_params.rx_page_order)) {
485 D_DROP("Corruption detected!\n");
486 return;
487 }
488
489 /* We only process data packets if the interface is open */
490 if (unlikely(!il->is_open)) {
491 D_DROP("Dropping packet while interface is not open.\n");
492 return;
493 }
494
495 skb = dev_alloc_skb(128);
496 if (!skb) {
497 IL_ERR("dev_alloc_skb failed\n");
498 return;
499 }
500
501 if (!il3945_mod_params.sw_crypto)
502 il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
503 le32_to_cpu(rx_end->status), stats);
504
505 skb_add_rx_frag(skb, 0, rxb->page,
506 (void *)rx_hdr->payload - (void *)pkt, len);
507
508 il_update_stats(il, false, fc, len);
509 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
510
511 ieee80211_rx(il->hw, skb);
512 il->alloc_rxb_page--;
513 rxb->page = NULL;
514}
515
516#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
517
518static void
519il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
520{
521 struct ieee80211_hdr *header;
522 struct ieee80211_rx_status rx_status;
523 struct il_rx_pkt *pkt = rxb_addr(rxb);
524 struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
525 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
526 struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
527 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
528 u16 rx_stats_noise_diff __maybe_unused =
529 le16_to_cpu(rx_stats->noise_diff);
530 u8 network_packet;
531
532 rx_status.flag = 0;
533 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
534 rx_status.band =
535 (rx_hdr->
536 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
537 IEEE80211_BAND_5GHZ;
538 rx_status.freq =
539 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
540 rx_status.band);
541
542 rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
543 if (rx_status.band == IEEE80211_BAND_5GHZ)
544 rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
545
546 rx_status.antenna =
547 (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
548 4;
549
550 /* set the preamble flag if appropriate */
551 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
552 rx_status.flag |= RX_FLAG_SHORTPRE;
553
554 if ((unlikely(rx_stats->phy_count > 20))) {
555 D_DROP("dsp size out of range [0,20]: %d/n",
556 rx_stats->phy_count);
557 return;
558 }
559
560 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
561 !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
562 D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
563 return;
564 }
565
566 /* Convert 3945's rssi indicator to dBm */
567 rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
568
569 D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
570 rx_stats_sig_avg, rx_stats_noise_diff);
571
572 header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
573
574 network_packet = il3945_is_network_packet(il, header);
575
576 D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
577 network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
578 rx_status.signal, rx_status.signal, rx_status.rate_idx);
579
580 il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
581
582 if (network_packet) {
583 il->_3945.last_beacon_time =
584 le32_to_cpu(rx_end->beacon_timestamp);
585 il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
586 il->_3945.last_rx_rssi = rx_status.signal;
587 }
588
589 il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
590}
591
592int
593il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
594 dma_addr_t addr, u16 len, u8 reset, u8 pad)
595{
596 int count;
597 struct il_queue *q;
598 struct il3945_tfd *tfd, *tfd_tmp;
599
600 q = &txq->q;
601 tfd_tmp = (struct il3945_tfd *)txq->tfds;
602 tfd = &tfd_tmp[q->write_ptr];
603
604 if (reset)
605 memset(tfd, 0, sizeof(*tfd));
606
607 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
608
609 if (count >= NUM_TFD_CHUNKS || count < 0) {
610 IL_ERR("Error can not send more than %d chunks\n",
611 NUM_TFD_CHUNKS);
612 return -EINVAL;
613 }
614
615 tfd->tbs[count].addr = cpu_to_le32(addr);
616 tfd->tbs[count].len = cpu_to_le32(len);
617
618 count++;
619
620 tfd->control_flags =
621 cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
622
623 return 0;
624}
625
626/**
627 * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
628 *
629 * Does NOT advance any idxes
630 */
631void
632il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
633{
634 struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
635 int idx = txq->q.read_ptr;
636 struct il3945_tfd *tfd = &tfd_tmp[idx];
637 struct pci_dev *dev = il->pci_dev;
638 int i;
639 int counter;
640
641 /* sanity check */
642 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
643 if (counter > NUM_TFD_CHUNKS) {
644 IL_ERR("Too many chunks: %i\n", counter);
645 /* @todo issue fatal error, it is quite serious situation */
646 return;
647 }
648
649 /* Unmap tx_cmd */
650 if (counter)
651 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
652 dma_unmap_len(&txq->meta[idx], len),
653 PCI_DMA_TODEVICE);
654
655 /* unmap chunks if any */
656
657 for (i = 1; i < counter; i++)
658 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
659 le32_to_cpu(tfd->tbs[i].len),
660 PCI_DMA_TODEVICE);
661
662 /* free SKB */
663 if (txq->txb) {
664 struct sk_buff *skb;
665
666 skb = txq->txb[txq->q.read_ptr].skb;
667
668 /* can be called from irqs-disabled context */
669 if (skb) {
670 dev_kfree_skb_any(skb);
671 txq->txb[txq->q.read_ptr].skb = NULL;
672 }
673 }
674}
675
676/**
677 * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
678 *
679*/
680void
681il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
682 struct ieee80211_tx_info *info,
683 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
684{
685 u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
686 u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945);
687 u16 rate_mask;
688 int rate;
689 u8 rts_retry_limit;
690 u8 data_retry_limit;
691 __le32 tx_flags;
692 __le16 fc = hdr->frame_control;
693 struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
694
695 rate = il3945_rates[rate_idx].plcp;
696 tx_flags = tx_cmd->tx_flags;
697
698 /* We need to figure out how to get the sta->supp_rates while
699 * in this running context */
700 rate_mask = RATES_MASK_3945;
701
702 /* Set retry limit on DATA packets and Probe Responses */
703 if (ieee80211_is_probe_resp(fc))
704 data_retry_limit = 3;
705 else
706 data_retry_limit = IL_DEFAULT_TX_RETRY;
707 tx_cmd->data_retry_limit = data_retry_limit;
708
709 if (tx_id >= IL39_CMD_QUEUE_NUM)
710 rts_retry_limit = 3;
711 else
712 rts_retry_limit = 7;
713
714 if (data_retry_limit < rts_retry_limit)
715 rts_retry_limit = data_retry_limit;
716 tx_cmd->rts_retry_limit = rts_retry_limit;
717
718 tx_cmd->rate = rate;
719 tx_cmd->tx_flags = tx_flags;
720
721 /* OFDM */
722 tx_cmd->supp_rates[0] =
723 ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
724
725 /* CCK */
726 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
727
728 D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
729 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
730 le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
731 tx_cmd->supp_rates[0]);
732}
733
734static u8
735il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
736{
737 unsigned long flags_spin;
738 struct il_station_entry *station;
739
740 if (sta_id == IL_INVALID_STATION)
741 return IL_INVALID_STATION;
742
743 spin_lock_irqsave(&il->sta_lock, flags_spin);
744 station = &il->stations[sta_id];
745
746 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
747 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
748 station->sta.mode = STA_CONTROL_MODIFY_MSK;
749 il_send_add_sta(il, &station->sta, CMD_ASYNC);
750 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
751
752 D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
753 return sta_id;
754}
755
756static void
757il3945_set_pwr_vmain(struct il_priv *il)
758{
759/*
760 * (for documentation purposes)
761 * to set power to V_AUX, do
762
763 if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
764 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
765 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
766 ~APMG_PS_CTRL_MSK_PWR_SRC);
767
768 _il_poll_bit(il, CSR_GPIO_IN,
769 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
770 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
771 }
772 */
773
774 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
775 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
776 ~APMG_PS_CTRL_MSK_PWR_SRC);
777
778 _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
779 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
780}
781
782static int
783il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
784{
785 il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
786 il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
787 il_wr(il, FH39_RCSR_WPTR(0), 0);
788 il_wr(il, FH39_RCSR_CONFIG(0),
789 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
790 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
791 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
792 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
793 <<
794 FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
795 | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
796 FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
797 | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
798
799 /* fake read to flush all prev I/O */
800 il_rd(il, FH39_RSSR_CTRL);
801
802 return 0;
803}
804
805static int
806il3945_tx_reset(struct il_priv *il)
807{
808
809 /* bypass mode */
810 il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
811
812 /* RA 0 is active */
813 il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
814
815 /* all 6 fifo are active */
816 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
817
818 il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
819 il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
820 il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
821 il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
822
823 il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
824
825 il_wr(il, FH39_TSSR_MSG_CONFIG,
826 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
827 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
828 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
829 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
830 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
831 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
832 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
833
834 return 0;
835}
836
837/**
838 * il3945_txq_ctx_reset - Reset TX queue context
839 *
840 * Destroys all DMA structures and initialize them again
841 */
842static int
843il3945_txq_ctx_reset(struct il_priv *il)
844{
845 int rc;
846 int txq_id, slots_num;
847
848 il3945_hw_txq_ctx_free(il);
849
850 /* allocate tx queue structure */
851 rc = il_alloc_txq_mem(il);
852 if (rc)
853 return rc;
854
855 /* Tx CMD queue */
856 rc = il3945_tx_reset(il);
857 if (rc)
858 goto error;
859
860 /* Tx queue(s) */
861 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
862 slots_num =
863 (txq_id ==
864 IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
865 rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
866 if (rc) {
867 IL_ERR("Tx %d queue init failed\n", txq_id);
868 goto error;
869 }
870 }
871
872 return rc;
873
874error:
875 il3945_hw_txq_ctx_free(il);
876 return rc;
877}
878
879/*
880 * Start up 3945's basic functionality after it has been reset
881 * (e.g. after platform boot, or shutdown via il_apm_stop())
882 * NOTE: This does not load uCode nor start the embedded processor
883 */
884static int
885il3945_apm_init(struct il_priv *il)
886{
887 int ret = il_apm_init(il);
888
889 /* Clear APMG (NIC's internal power management) interrupts */
890 il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
891 il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
892
893 /* Reset radio chip */
894 il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
895 udelay(5);
896 il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
897
898 return ret;
899}
900
901static void
902il3945_nic_config(struct il_priv *il)
903{
904 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
905 unsigned long flags;
906 u8 rev_id = il->pci_dev->revision;
907
908 spin_lock_irqsave(&il->lock, flags);
909
910 /* Determine HW type */
911 D_INFO("HW Revision ID = 0x%X\n", rev_id);
912
913 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
914 D_INFO("RTP type\n");
915 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
916 D_INFO("3945 RADIO-MB type\n");
917 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
918 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
919 } else {
920 D_INFO("3945 RADIO-MM type\n");
921 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
922 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
923 }
924
925 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
926 D_INFO("SKU OP mode is mrc\n");
927 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
928 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
929 } else
930 D_INFO("SKU OP mode is basic\n");
931
932 if ((eeprom->board_revision & 0xF0) == 0xD0) {
933 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
934 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
935 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
936 } else {
937 D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
938 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
939 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
940 }
941
942 if (eeprom->almgor_m_version <= 1) {
943 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
944 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
945 D_INFO("Card M type A version is 0x%X\n",
946 eeprom->almgor_m_version);
947 } else {
948 D_INFO("Card M type B version is 0x%X\n",
949 eeprom->almgor_m_version);
950 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
951 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
952 }
953 spin_unlock_irqrestore(&il->lock, flags);
954
955 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
956 D_RF_KILL("SW RF KILL supported in EEPROM.\n");
957
958 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
959 D_RF_KILL("HW RF KILL supported in EEPROM.\n");
960}
961
962int
963il3945_hw_nic_init(struct il_priv *il)
964{
965 int rc;
966 unsigned long flags;
967 struct il_rx_queue *rxq = &il->rxq;
968
969 spin_lock_irqsave(&il->lock, flags);
970 il->cfg->ops->lib->apm_ops.init(il);
971 spin_unlock_irqrestore(&il->lock, flags);
972
973 il3945_set_pwr_vmain(il);
974
975 il->cfg->ops->lib->apm_ops.config(il);
976
977 /* Allocate the RX queue, or reset if it is already allocated */
978 if (!rxq->bd) {
979 rc = il_rx_queue_alloc(il);
980 if (rc) {
981 IL_ERR("Unable to initialize Rx queue\n");
982 return -ENOMEM;
983 }
984 } else
985 il3945_rx_queue_reset(il, rxq);
986
987 il3945_rx_replenish(il);
988
989 il3945_rx_init(il, rxq);
990
991 /* Look at using this instead:
992 rxq->need_update = 1;
993 il_rx_queue_update_write_ptr(il, rxq);
994 */
995
996 il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
997
998 rc = il3945_txq_ctx_reset(il);
999 if (rc)
1000 return rc;
1001
1002 set_bit(S_INIT, &il->status);
1003
1004 return 0;
1005}
1006
1007/**
1008 * il3945_hw_txq_ctx_free - Free TXQ Context
1009 *
1010 * Destroy all TX DMA queues and structures
1011 */
1012void
1013il3945_hw_txq_ctx_free(struct il_priv *il)
1014{
1015 int txq_id;
1016
1017 /* Tx queues */
1018 if (il->txq)
1019 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1020 if (txq_id == IL39_CMD_QUEUE_NUM)
1021 il_cmd_queue_free(il);
1022 else
1023 il_tx_queue_free(il, txq_id);
1024
1025 /* free tx queue structure */
1026 il_txq_mem(il);
1027}
1028
1029void
1030il3945_hw_txq_ctx_stop(struct il_priv *il)
1031{
1032 int txq_id;
1033
1034 /* stop SCD */
1035 il_wr_prph(il, ALM_SCD_MODE_REG, 0);
1036 il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
1037
1038 /* reset TFD queues */
1039 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
1040 il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
1041 il_poll_bit(il, FH39_TSSR_TX_STATUS,
1042 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1043 1000);
1044 }
1045
1046 il3945_hw_txq_ctx_free(il);
1047}
1048
1049/**
1050 * il3945_hw_reg_adjust_power_by_temp
1051 * return idx delta into power gain settings table
1052*/
1053static int
1054il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1055{
1056 return (new_reading - old_reading) * (-11) / 100;
1057}
1058
1059/**
1060 * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1061 */
1062static inline int
1063il3945_hw_reg_temp_out_of_range(int temperature)
1064{
1065 return (temperature < -260 || temperature > 25) ? 1 : 0;
1066}
1067
1068int
1069il3945_hw_get_temperature(struct il_priv *il)
1070{
1071 return _il_rd(il, CSR_UCODE_DRV_GP2);
1072}
1073
1074/**
1075 * il3945_hw_reg_txpower_get_temperature
1076 * get the current temperature by reading from NIC
1077*/
1078static int
1079il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
1080{
1081 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1082 int temperature;
1083
1084 temperature = il3945_hw_get_temperature(il);
1085
1086 /* driver's okay range is -260 to +25.
1087 * human readable okay range is 0 to +285 */
1088 D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
1089
1090 /* handle insane temp reading */
1091 if (il3945_hw_reg_temp_out_of_range(temperature)) {
1092 IL_ERR("Error bad temperature value %d\n", temperature);
1093
1094 /* if really really hot(?),
1095 * substitute the 3rd band/group's temp measured at factory */
1096 if (il->last_temperature > 100)
1097 temperature = eeprom->groups[2].temperature;
1098 else /* else use most recent "sane" value from driver */
1099 temperature = il->last_temperature;
1100 }
1101
1102 return temperature; /* raw, not "human readable" */
1103}
1104
1105/* Adjust Txpower only if temperature variance is greater than threshold.
1106 *
1107 * Both are lower than older versions' 9 degrees */
1108#define IL_TEMPERATURE_LIMIT_TIMER 6
1109
1110/**
1111 * il3945_is_temp_calib_needed - determines if new calibration is needed
1112 *
1113 * records new temperature in tx_mgr->temperature.
1114 * replaces tx_mgr->last_temperature *only* if calib needed
1115 * (assumes caller will actually do the calibration!). */
1116static int
1117il3945_is_temp_calib_needed(struct il_priv *il)
1118{
1119 int temp_diff;
1120
1121 il->temperature = il3945_hw_reg_txpower_get_temperature(il);
1122 temp_diff = il->temperature - il->last_temperature;
1123
1124 /* get absolute value */
1125 if (temp_diff < 0) {
1126 D_POWER("Getting cooler, delta %d,\n", temp_diff);
1127 temp_diff = -temp_diff;
1128 } else if (temp_diff == 0)
1129 D_POWER("Same temp,\n");
1130 else
1131 D_POWER("Getting warmer, delta %d,\n", temp_diff);
1132
1133 /* if we don't need calibration, *don't* update last_temperature */
1134 if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
1135 D_POWER("Timed thermal calib not needed\n");
1136 return 0;
1137 }
1138
1139 D_POWER("Timed thermal calib needed\n");
1140
1141 /* assume that caller will actually do calib ...
1142 * update the "last temperature" value */
1143 il->last_temperature = il->temperature;
1144 return 1;
1145}
1146
1147#define IL_MAX_GAIN_ENTRIES 78
1148#define IL_CCK_FROM_OFDM_POWER_DIFF -5
1149#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
1150
1151/* radio and DSP power table, each step is 1/2 dB.
1152 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1153static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
1154 {
1155 {251, 127}, /* 2.4 GHz, highest power */
1156 {251, 127},
1157 {251, 127},
1158 {251, 127},
1159 {251, 125},
1160 {251, 110},
1161 {251, 105},
1162 {251, 98},
1163 {187, 125},
1164 {187, 115},
1165 {187, 108},
1166 {187, 99},
1167 {243, 119},
1168 {243, 111},
1169 {243, 105},
1170 {243, 97},
1171 {243, 92},
1172 {211, 106},
1173 {211, 100},
1174 {179, 120},
1175 {179, 113},
1176 {179, 107},
1177 {147, 125},
1178 {147, 119},
1179 {147, 112},
1180 {147, 106},
1181 {147, 101},
1182 {147, 97},
1183 {147, 91},
1184 {115, 107},
1185 {235, 121},
1186 {235, 115},
1187 {235, 109},
1188 {203, 127},
1189 {203, 121},
1190 {203, 115},
1191 {203, 108},
1192 {203, 102},
1193 {203, 96},
1194 {203, 92},
1195 {171, 110},
1196 {171, 104},
1197 {171, 98},
1198 {139, 116},
1199 {227, 125},
1200 {227, 119},
1201 {227, 113},
1202 {227, 107},
1203 {227, 101},
1204 {227, 96},
1205 {195, 113},
1206 {195, 106},
1207 {195, 102},
1208 {195, 95},
1209 {163, 113},
1210 {163, 106},
1211 {163, 102},
1212 {163, 95},
1213 {131, 113},
1214 {131, 106},
1215 {131, 102},
1216 {131, 95},
1217 {99, 113},
1218 {99, 106},
1219 {99, 102},
1220 {99, 95},
1221 {67, 113},
1222 {67, 106},
1223 {67, 102},
1224 {67, 95},
1225 {35, 113},
1226 {35, 106},
1227 {35, 102},
1228 {35, 95},
1229 {3, 113},
1230 {3, 106},
1231 {3, 102},
1232 {3, 95} /* 2.4 GHz, lowest power */
1233 },
1234 {
1235 {251, 127}, /* 5.x GHz, highest power */
1236 {251, 120},
1237 {251, 114},
1238 {219, 119},
1239 {219, 101},
1240 {187, 113},
1241 {187, 102},
1242 {155, 114},
1243 {155, 103},
1244 {123, 117},
1245 {123, 107},
1246 {123, 99},
1247 {123, 92},
1248 {91, 108},
1249 {59, 125},
1250 {59, 118},
1251 {59, 109},
1252 {59, 102},
1253 {59, 96},
1254 {59, 90},
1255 {27, 104},
1256 {27, 98},
1257 {27, 92},
1258 {115, 118},
1259 {115, 111},
1260 {115, 104},
1261 {83, 126},
1262 {83, 121},
1263 {83, 113},
1264 {83, 105},
1265 {83, 99},
1266 {51, 118},
1267 {51, 111},
1268 {51, 104},
1269 {51, 98},
1270 {19, 116},
1271 {19, 109},
1272 {19, 102},
1273 {19, 98},
1274 {19, 93},
1275 {171, 113},
1276 {171, 107},
1277 {171, 99},
1278 {139, 120},
1279 {139, 113},
1280 {139, 107},
1281 {139, 99},
1282 {107, 120},
1283 {107, 113},
1284 {107, 107},
1285 {107, 99},
1286 {75, 120},
1287 {75, 113},
1288 {75, 107},
1289 {75, 99},
1290 {43, 120},
1291 {43, 113},
1292 {43, 107},
1293 {43, 99},
1294 {11, 120},
1295 {11, 113},
1296 {11, 107},
1297 {11, 99},
1298 {131, 107},
1299 {131, 99},
1300 {99, 120},
1301 {99, 113},
1302 {99, 107},
1303 {99, 99},
1304 {67, 120},
1305 {67, 113},
1306 {67, 107},
1307 {67, 99},
1308 {35, 120},
1309 {35, 113},
1310 {35, 107},
1311 {35, 99},
1312 {3, 120} /* 5.x GHz, lowest power */
1313 }
1314};
1315
1316static inline u8
1317il3945_hw_reg_fix_power_idx(int idx)
1318{
1319 if (idx < 0)
1320 return 0;
1321 if (idx >= IL_MAX_GAIN_ENTRIES)
1322 return IL_MAX_GAIN_ENTRIES - 1;
1323 return (u8) idx;
1324}
1325
1326/* Kick off thermal recalibration check every 60 seconds */
1327#define REG_RECALIB_PERIOD (60)
1328
1329/**
1330 * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1331 *
1332 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1333 * or 6 Mbit (OFDM) rates.
1334 */
1335static void
1336il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
1337 const s8 *clip_pwrs,
1338 struct il_channel_info *ch_info, int band_idx)
1339{
1340 struct il3945_scan_power_info *scan_power_info;
1341 s8 power;
1342 u8 power_idx;
1343
1344 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
1345
1346 /* use this channel group's 6Mbit clipping/saturation pwr,
1347 * but cap at regulatory scan power restriction (set during init
1348 * based on eeprom channel data) for this channel. */
1349 power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
1350
1351 power = min(power, il->tx_power_user_lmt);
1352 scan_power_info->requested_power = power;
1353
1354 /* find difference between new scan *power* and current "normal"
1355 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1356 * current "normal" temperature-compensated Tx power *idx* for
1357 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1358 * *idx*. */
1359 power_idx =
1360 ch_info->power_info[rate_idx].power_table_idx - (power -
1361 ch_info->
1362 power_info
1363 [RATE_6M_IDX_TBL].
1364 requested_power) *
1365 2;
1366
1367 /* store reference idx that we use when adjusting *all* scan
1368 * powers. So we can accommodate user (all channel) or spectrum
1369 * management (single channel) power changes "between" temperature
1370 * feedback compensation procedures.
1371 * don't force fit this reference idx into gain table; it may be a
1372 * negative number. This will help avoid errors when we're at
1373 * the lower bounds (highest gains, for warmest temperatures)
1374 * of the table. */
1375
1376 /* don't exceed table bounds for "real" setting */
1377 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1378
1379 scan_power_info->power_table_idx = power_idx;
1380 scan_power_info->tpc.tx_gain =
1381 power_gain_table[band_idx][power_idx].tx_gain;
1382 scan_power_info->tpc.dsp_atten =
1383 power_gain_table[band_idx][power_idx].dsp_atten;
1384}
1385
1386/**
1387 * il3945_send_tx_power - fill in Tx Power command with gain settings
1388 *
1389 * Configures power settings for all rates for the current channel,
1390 * using values from channel info struct, and send to NIC
1391 */
1392static int
1393il3945_send_tx_power(struct il_priv *il)
1394{
1395 int rate_idx, i;
1396 const struct il_channel_info *ch_info = NULL;
1397 struct il3945_txpowertable_cmd txpower = {
1398 .channel = il->ctx.active.channel,
1399 };
1400 u16 chan;
1401
1402 if (WARN_ONCE
1403 (test_bit(S_SCAN_HW, &il->status),
1404 "TX Power requested while scanning!\n"))
1405 return -EAGAIN;
1406
1407 chan = le16_to_cpu(il->ctx.active.channel);
1408
1409 txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1410 ch_info = il_get_channel_info(il, il->band, chan);
1411 if (!ch_info) {
1412 IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
1413 il->band);
1414 return -EINVAL;
1415 }
1416
1417 if (!il_is_channel_valid(ch_info)) {
1418 D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
1419 return 0;
1420 }
1421
1422 /* fill cmd with power settings for all rates for current channel */
1423 /* Fill OFDM rate */
1424 for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
1425 rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
1426
1427 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1428 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1429
1430 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1431 le16_to_cpu(txpower.channel), txpower.band,
1432 txpower.power[i].tpc.tx_gain,
1433 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1434 }
1435 /* Fill CCK rates */
1436 for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
1437 rate_idx++, i++) {
1438 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1439 txpower.power[i].rate = il3945_rates[rate_idx].plcp;
1440
1441 D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1442 le16_to_cpu(txpower.channel), txpower.band,
1443 txpower.power[i].tpc.tx_gain,
1444 txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
1445 }
1446
1447 return il_send_cmd_pdu(il, C_TX_PWR_TBL,
1448 sizeof(struct il3945_txpowertable_cmd),
1449 &txpower);
1450
1451}
1452
1453/**
1454 * il3945_hw_reg_set_new_power - Configures power tables at new levels
1455 * @ch_info: Channel to update. Uses power_info.requested_power.
1456 *
1457 * Replace requested_power and base_power_idx ch_info fields for
1458 * one channel.
1459 *
1460 * Called if user or spectrum management changes power preferences.
1461 * Takes into account h/w and modulation limitations (clip power).
1462 *
1463 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1464 *
1465 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1466 * properly fill out the scan powers, and actual h/w gain settings,
1467 * and send changes to NIC
1468 */
1469static int
1470il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
1471{
1472 struct il3945_channel_power_info *power_info;
1473 int power_changed = 0;
1474 int i;
1475 const s8 *clip_pwrs;
1476 int power;
1477
1478 /* Get this chnlgrp's rate-to-max/clip-powers table */
1479 clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1480
1481 /* Get this channel's rate-to-current-power settings table */
1482 power_info = ch_info->power_info;
1483
1484 /* update OFDM Txpower settings */
1485 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
1486 int delta_idx;
1487
1488 /* limit new power to be no more than h/w capability */
1489 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1490 if (power == power_info->requested_power)
1491 continue;
1492
1493 /* find difference between old and new requested powers,
1494 * update base (non-temp-compensated) power idx */
1495 delta_idx = (power - power_info->requested_power) * 2;
1496 power_info->base_power_idx -= delta_idx;
1497
1498 /* save new requested power value */
1499 power_info->requested_power = power;
1500
1501 power_changed = 1;
1502 }
1503
1504 /* update CCK Txpower settings, based on OFDM 12M setting ...
1505 * ... all CCK power settings for a given channel are the *same*. */
1506 if (power_changed) {
1507 power =
1508 ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
1509 IL_CCK_FROM_OFDM_POWER_DIFF;
1510
1511 /* do all CCK rates' il3945_channel_power_info structures */
1512 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
1513 power_info->requested_power = power;
1514 power_info->base_power_idx =
1515 ch_info->power_info[RATE_12M_IDX_TBL].
1516 base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
1517 ++power_info;
1518 }
1519 }
1520
1521 return 0;
1522}
1523
1524/**
1525 * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1526 *
1527 * NOTE: Returned power limit may be less (but not more) than requested,
1528 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1529 * (no consideration for h/w clipping limitations).
1530 */
1531static int
1532il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
1533{
1534 s8 max_power;
1535
1536#if 0
1537 /* if we're using TGd limits, use lower of TGd or EEPROM */
1538 if (ch_info->tgd_data.max_power != 0)
1539 max_power =
1540 min(ch_info->tgd_data.max_power,
1541 ch_info->eeprom.max_power_avg);
1542
1543 /* else just use EEPROM limits */
1544 else
1545#endif
1546 max_power = ch_info->eeprom.max_power_avg;
1547
1548 return min(max_power, ch_info->max_power_avg);
1549}
1550
1551/**
1552 * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
1553 *
1554 * Compensate txpower settings of *all* channels for temperature.
1555 * This only accounts for the difference between current temperature
1556 * and the factory calibration temperatures, and bases the new settings
1557 * on the channel's base_power_idx.
1558 *
1559 * If RxOn is "associated", this sends the new Txpower to NIC!
1560 */
1561static int
1562il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
1563{
1564 struct il_channel_info *ch_info = NULL;
1565 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1566 int delta_idx;
1567 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1568 u8 a_band;
1569 u8 rate_idx;
1570 u8 scan_tbl_idx;
1571 u8 i;
1572 int ref_temp;
1573 int temperature = il->temperature;
1574
1575 if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
1576 /* do not perform tx power calibration */
1577 return 0;
1578 }
1579 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1580 for (i = 0; i < il->channel_count; i++) {
1581 ch_info = &il->channel_info[i];
1582 a_band = il_is_channel_a_band(ch_info);
1583
1584 /* Get this chnlgrp's factory calibration temperature */
1585 ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
1586
1587 /* get power idx adjustment based on current and factory
1588 * temps */
1589 delta_idx =
1590 il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
1591
1592 /* set tx power value for all rates, OFDM and CCK */
1593 for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
1594 int power_idx =
1595 ch_info->power_info[rate_idx].base_power_idx;
1596
1597 /* temperature compensate */
1598 power_idx += delta_idx;
1599
1600 /* stay within table range */
1601 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
1602 ch_info->power_info[rate_idx].power_table_idx =
1603 (u8) power_idx;
1604 ch_info->power_info[rate_idx].tpc =
1605 power_gain_table[a_band][power_idx];
1606 }
1607
1608 /* Get this chnlgrp's rate-to-max/clip-powers table */
1609 clip_pwrs =
1610 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
1611
1612 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1613 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
1614 scan_tbl_idx++) {
1615 s32 actual_idx =
1616 (scan_tbl_idx ==
1617 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
1618 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
1619 actual_idx, clip_pwrs,
1620 ch_info, a_band);
1621 }
1622 }
1623
1624 /* send Txpower command for current channel to ucode */
1625 return il->cfg->ops->lib->send_tx_power(il);
1626}
1627
1628int
1629il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
1630{
1631 struct il_channel_info *ch_info;
1632 s8 max_power;
1633 u8 a_band;
1634 u8 i;
1635
1636 if (il->tx_power_user_lmt == power) {
1637 D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
1638 power);
1639 return 0;
1640 }
1641
1642 D_POWER("Setting upper limit clamp to %ddBm.\n", power);
1643 il->tx_power_user_lmt = power;
1644
1645 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1646
1647 for (i = 0; i < il->channel_count; i++) {
1648 ch_info = &il->channel_info[i];
1649 a_band = il_is_channel_a_band(ch_info);
1650
1651 /* find minimum power of all user and regulatory constraints
1652 * (does not consider h/w clipping limitations) */
1653 max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
1654 max_power = min(power, max_power);
1655 if (max_power != ch_info->curr_txpow) {
1656 ch_info->curr_txpow = max_power;
1657
1658 /* this considers the h/w clipping limitations */
1659 il3945_hw_reg_set_new_power(il, ch_info);
1660 }
1661 }
1662
1663 /* update txpower settings for all channels,
1664 * send to NIC if associated. */
1665 il3945_is_temp_calib_needed(il);
1666 il3945_hw_reg_comp_txpower_temp(il);
1667
1668 return 0;
1669}
1670
1671static int
1672il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
1673{
1674 int rc = 0;
1675 struct il_rx_pkt *pkt;
1676 struct il3945_rxon_assoc_cmd rxon_assoc;
1677 struct il_host_cmd cmd = {
1678 .id = C_RXON_ASSOC,
1679 .len = sizeof(rxon_assoc),
1680 .flags = CMD_WANT_SKB,
1681 .data = &rxon_assoc,
1682 };
1683 const struct il_rxon_cmd *rxon1 = &ctx->staging;
1684 const struct il_rxon_cmd *rxon2 = &ctx->active;
1685
1686 if (rxon1->flags == rxon2->flags &&
1687 rxon1->filter_flags == rxon2->filter_flags &&
1688 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1689 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1690 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1691 return 0;
1692 }
1693
1694 rxon_assoc.flags = ctx->staging.flags;
1695 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1696 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1697 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1698 rxon_assoc.reserved = 0;
1699
1700 rc = il_send_cmd_sync(il, &cmd);
1701 if (rc)
1702 return rc;
1703
1704 pkt = (struct il_rx_pkt *)cmd.reply_page;
1705 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1706 IL_ERR("Bad return from C_RXON_ASSOC command\n");
1707 rc = -EIO;
1708 }
1709
1710 il_free_pages(il, cmd.reply_page);
1711
1712 return rc;
1713}
1714
1715/**
1716 * il3945_commit_rxon - commit staging_rxon to hardware
1717 *
1718 * The RXON command in staging_rxon is committed to the hardware and
1719 * the active_rxon structure is updated with the new data. This
1720 * function correctly transitions out of the RXON_ASSOC_MSK state if
1721 * a HW tune is required based on the RXON structure changes.
1722 */
1723int
1724il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
1725{
1726 /* cast away the const for active_rxon in this function */
1727 struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1728 struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1729 int rc = 0;
1730 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1731
1732 if (test_bit(S_EXIT_PENDING, &il->status))
1733 return -EINVAL;
1734
1735 if (!il_is_alive(il))
1736 return -1;
1737
1738 /* always get timestamp with Rx frame */
1739 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1740
1741 /* select antenna */
1742 staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1743 staging_rxon->flags |= il3945_get_antenna_flags(il);
1744
1745 rc = il_check_rxon_cmd(il, ctx);
1746 if (rc) {
1747 IL_ERR("Invalid RXON configuration. Not committing.\n");
1748 return -EINVAL;
1749 }
1750
1751 /* If we don't need to send a full RXON, we can use
1752 * il3945_rxon_assoc_cmd which is used to reconfigure filter
1753 * and other flags for the current radio configuration. */
1754 if (!il_full_rxon_required(il, &il->ctx)) {
1755 rc = il_send_rxon_assoc(il, &il->ctx);
1756 if (rc) {
1757 IL_ERR("Error setting RXON_ASSOC "
1758 "configuration (%d).\n", rc);
1759 return rc;
1760 }
1761
1762 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1763 /*
1764 * We do not commit tx power settings while channel changing,
1765 * do it now if tx power changed.
1766 */
1767 il_set_tx_power(il, il->tx_power_next, false);
1768 return 0;
1769 }
1770
1771 /* If we are currently associated and the new config requires
1772 * an RXON_ASSOC and the new config wants the associated mask enabled,
1773 * we must clear the associated from the active configuration
1774 * before we apply the new config */
1775 if (il_is_associated(il) && new_assoc) {
1776 D_INFO("Toggling associated bit on current RXON\n");
1777 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1778
1779 /*
1780 * reserved4 and 5 could have been filled by the iwlcore code.
1781 * Let's clear them before pushing to the 3945.
1782 */
1783 active_rxon->reserved4 = 0;
1784 active_rxon->reserved5 = 0;
1785 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1786 &il->ctx.active);
1787
1788 /* If the mask clearing failed then we set
1789 * active_rxon back to what it was previously */
1790 if (rc) {
1791 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1792 IL_ERR("Error clearing ASSOC_MSK on current "
1793 "configuration (%d).\n", rc);
1794 return rc;
1795 }
1796 il_clear_ucode_stations(il, &il->ctx);
1797 il_restore_stations(il, &il->ctx);
1798 }
1799
1800 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1801 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1802 le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
1803
1804 /*
1805 * reserved4 and 5 could have been filled by the iwlcore code.
1806 * Let's clear them before pushing to the 3945.
1807 */
1808 staging_rxon->reserved4 = 0;
1809 staging_rxon->reserved5 = 0;
1810
1811 il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
1812
1813 /* Apply the new configuration */
1814 rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
1815 staging_rxon);
1816 if (rc) {
1817 IL_ERR("Error setting new configuration (%d).\n", rc);
1818 return rc;
1819 }
1820
1821 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1822
1823 if (!new_assoc) {
1824 il_clear_ucode_stations(il, &il->ctx);
1825 il_restore_stations(il, &il->ctx);
1826 }
1827
1828 /* If we issue a new RXON command which required a tune then we must
1829 * send a new TXPOWER command or we won't be able to Tx any frames */
1830 rc = il_set_tx_power(il, il->tx_power_next, true);
1831 if (rc) {
1832 IL_ERR("Error setting Tx power (%d).\n", rc);
1833 return rc;
1834 }
1835
1836 /* Init the hardware's rate fallback order based on the band */
1837 rc = il3945_init_hw_rate_table(il);
1838 if (rc) {
1839 IL_ERR("Error setting HW rate table: %02X\n", rc);
1840 return -EIO;
1841 }
1842
1843 return 0;
1844}
1845
1846/**
1847 * il3945_reg_txpower_periodic - called when time to check our temperature.
1848 *
1849 * -- reset periodic timer
1850 * -- see if temp has changed enough to warrant re-calibration ... if so:
1851 * -- correct coeffs for temp (can reset temp timer)
1852 * -- save this temp as "last",
1853 * -- send new set of gain settings to NIC
1854 * NOTE: This should continue working, even when we're not associated,
1855 * so we can keep our internal table of scan powers current. */
1856void
1857il3945_reg_txpower_periodic(struct il_priv *il)
1858{
1859 /* This will kick in the "brute force"
1860 * il3945_hw_reg_comp_txpower_temp() below */
1861 if (!il3945_is_temp_calib_needed(il))
1862 goto reschedule;
1863
1864 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1865 * This is based *only* on current temperature,
1866 * ignoring any previous power measurements */
1867 il3945_hw_reg_comp_txpower_temp(il);
1868
1869reschedule:
1870 queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
1871 REG_RECALIB_PERIOD * HZ);
1872}
1873
1874static void
1875il3945_bg_reg_txpower_periodic(struct work_struct *work)
1876{
1877 struct il_priv *il = container_of(work, struct il_priv,
1878 _3945.thermal_periodic.work);
1879
1880 if (test_bit(S_EXIT_PENDING, &il->status))
1881 return;
1882
1883 mutex_lock(&il->mutex);
1884 il3945_reg_txpower_periodic(il);
1885 mutex_unlock(&il->mutex);
1886}
1887
1888/**
1889 * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
1890 *
1891 * This function is used when initializing channel-info structs.
1892 *
1893 * NOTE: These channel groups do *NOT* match the bands above!
1894 * These channel groups are based on factory-tested channels;
1895 * on A-band, EEPROM's "group frequency" entries represent the top
1896 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1897 */
1898static u16
1899il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
1900 const struct il_channel_info *ch_info)
1901{
1902 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1903 struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1904 u8 group;
1905 u16 group_idx = 0; /* based on factory calib frequencies */
1906 u8 grp_channel;
1907
1908 /* Find the group idx for the channel ... don't use idx 1(?) */
1909 if (il_is_channel_a_band(ch_info)) {
1910 for (group = 1; group < 5; group++) {
1911 grp_channel = ch_grp[group].group_channel;
1912 if (ch_info->channel <= grp_channel) {
1913 group_idx = group;
1914 break;
1915 }
1916 }
1917 /* group 4 has a few channels *above* its factory cal freq */
1918 if (group == 5)
1919 group_idx = 4;
1920 } else
1921 group_idx = 0; /* 2.4 GHz, group 0 */
1922
1923 D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
1924 return group_idx;
1925}
1926
1927/**
1928 * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
1929 *
1930 * Interpolate to get nominal (i.e. at factory calibration temperature) idx
1931 * into radio/DSP gain settings table for requested power.
1932 */
1933static int
1934il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
1935 s32 setting_idx, s32 *new_idx)
1936{
1937 const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
1938 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1939 s32 idx0, idx1;
1940 s32 power = 2 * requested_power;
1941 s32 i;
1942 const struct il3945_eeprom_txpower_sample *samples;
1943 s32 gains0, gains1;
1944 s32 res;
1945 s32 denominator;
1946
1947 chnl_grp = &eeprom->groups[setting_idx];
1948 samples = chnl_grp->samples;
1949 for (i = 0; i < 5; i++) {
1950 if (power == samples[i].power) {
1951 *new_idx = samples[i].gain_idx;
1952 return 0;
1953 }
1954 }
1955
1956 if (power > samples[1].power) {
1957 idx0 = 0;
1958 idx1 = 1;
1959 } else if (power > samples[2].power) {
1960 idx0 = 1;
1961 idx1 = 2;
1962 } else if (power > samples[3].power) {
1963 idx0 = 2;
1964 idx1 = 3;
1965 } else {
1966 idx0 = 3;
1967 idx1 = 4;
1968 }
1969
1970 denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
1971 if (denominator == 0)
1972 return -EINVAL;
1973 gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
1974 gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
1975 res =
1976 gains0 + (gains1 - gains0) * ((s32) power -
1977 (s32) samples[idx0].power) /
1978 denominator + (1 << 18);
1979 *new_idx = res >> 19;
1980 return 0;
1981}
1982
1983static void
1984il3945_hw_reg_init_channel_groups(struct il_priv *il)
1985{
1986 u32 i;
1987 s32 rate_idx;
1988 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
1989 const struct il3945_eeprom_txpower_group *group;
1990
1991 D_POWER("Initializing factory calib info from EEPROM\n");
1992
1993 for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
1994 s8 *clip_pwrs; /* table of power levels for each rate */
1995 s8 satur_pwr; /* saturation power for each chnl group */
1996 group = &eeprom->groups[i];
1997
1998 /* sanity check on factory saturation power value */
1999 if (group->saturation_power < 40) {
2000 IL_WARN("Error: saturation power is %d, "
2001 "less than minimum expected 40\n",
2002 group->saturation_power);
2003 return;
2004 }
2005
2006 /*
2007 * Derive requested power levels for each rate, based on
2008 * hardware capabilities (saturation power for band).
2009 * Basic value is 3dB down from saturation, with further
2010 * power reductions for highest 3 data rates. These
2011 * backoffs provide headroom for high rate modulation
2012 * power peaks, without too much distortion (clipping).
2013 */
2014 /* we'll fill in this array with h/w max power levels */
2015 clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
2016
2017 /* divide factory saturation power by 2 to find -3dB level */
2018 satur_pwr = (s8) (group->saturation_power >> 1);
2019
2020 /* fill in channel group's nominal powers for each rate */
2021 for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
2022 rate_idx++, clip_pwrs++) {
2023 switch (rate_idx) {
2024 case RATE_36M_IDX_TBL:
2025 if (i == 0) /* B/G */
2026 *clip_pwrs = satur_pwr;
2027 else /* A */
2028 *clip_pwrs = satur_pwr - 5;
2029 break;
2030 case RATE_48M_IDX_TBL:
2031 if (i == 0)
2032 *clip_pwrs = satur_pwr - 7;
2033 else
2034 *clip_pwrs = satur_pwr - 10;
2035 break;
2036 case RATE_54M_IDX_TBL:
2037 if (i == 0)
2038 *clip_pwrs = satur_pwr - 9;
2039 else
2040 *clip_pwrs = satur_pwr - 12;
2041 break;
2042 default:
2043 *clip_pwrs = satur_pwr;
2044 break;
2045 }
2046 }
2047 }
2048}
2049
2050/**
2051 * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2052 *
2053 * Second pass (during init) to set up il->channel_info
2054 *
2055 * Set up Tx-power settings in our channel info database for each VALID
2056 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2057 * and current temperature.
2058 *
2059 * Since this is based on current temperature (at init time), these values may
2060 * not be valid for very long, but it gives us a starting/default point,
2061 * and allows us to active (i.e. using Tx) scan.
2062 *
2063 * This does *not* write values to NIC, just sets up our internal table.
2064 */
2065int
2066il3945_txpower_set_from_eeprom(struct il_priv *il)
2067{
2068 struct il_channel_info *ch_info = NULL;
2069 struct il3945_channel_power_info *pwr_info;
2070 struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
2071 int delta_idx;
2072 u8 rate_idx;
2073 u8 scan_tbl_idx;
2074 const s8 *clip_pwrs; /* array of power levels for each rate */
2075 u8 gain, dsp_atten;
2076 s8 power;
2077 u8 pwr_idx, base_pwr_idx, a_band;
2078 u8 i;
2079 int temperature;
2080
2081 /* save temperature reference,
2082 * so we can determine next time to calibrate */
2083 temperature = il3945_hw_reg_txpower_get_temperature(il);
2084 il->last_temperature = temperature;
2085
2086 il3945_hw_reg_init_channel_groups(il);
2087
2088 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2089 for (i = 0, ch_info = il->channel_info; i < il->channel_count;
2090 i++, ch_info++) {
2091 a_band = il_is_channel_a_band(ch_info);
2092 if (!il_is_channel_valid(ch_info))
2093 continue;
2094
2095 /* find this channel's channel group (*not* "band") idx */
2096 ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
2097
2098 /* Get this chnlgrp's rate->max/clip-powers table */
2099 clip_pwrs =
2100 il->_3945.clip_groups[ch_info->group_idx].clip_powers;
2101
2102 /* calculate power idx *adjustment* value according to
2103 * diff between current temperature and factory temperature */
2104 delta_idx =
2105 il3945_hw_reg_adjust_power_by_temp(temperature,
2106 eeprom->groups[ch_info->
2107 group_idx].
2108 temperature);
2109
2110 D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
2111 delta_idx, temperature + IL_TEMP_CONVERT);
2112
2113 /* set tx power value for all OFDM rates */
2114 for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
2115 s32 uninitialized_var(power_idx);
2116 int rc;
2117
2118 /* use channel group's clip-power table,
2119 * but don't exceed channel's max power */
2120 s8 pwr = min(ch_info->max_power_avg,
2121 clip_pwrs[rate_idx]);
2122
2123 pwr_info = &ch_info->power_info[rate_idx];
2124
2125 /* get base (i.e. at factory-measured temperature)
2126 * power table idx for this rate's power */
2127 rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
2128 ch_info->
2129 group_idx,
2130 &power_idx);
2131 if (rc) {
2132 IL_ERR("Invalid power idx\n");
2133 return rc;
2134 }
2135 pwr_info->base_power_idx = (u8) power_idx;
2136
2137 /* temperature compensate */
2138 power_idx += delta_idx;
2139
2140 /* stay within range of gain table */
2141 power_idx = il3945_hw_reg_fix_power_idx(power_idx);
2142
2143 /* fill 1 OFDM rate's il3945_channel_power_info struct */
2144 pwr_info->requested_power = pwr;
2145 pwr_info->power_table_idx = (u8) power_idx;
2146 pwr_info->tpc.tx_gain =
2147 power_gain_table[a_band][power_idx].tx_gain;
2148 pwr_info->tpc.dsp_atten =
2149 power_gain_table[a_band][power_idx].dsp_atten;
2150 }
2151
2152 /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
2153 pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
2154 power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
2155 pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2156 base_pwr_idx =
2157 pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
2158
2159 /* stay within table range */
2160 pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
2161 gain = power_gain_table[a_band][pwr_idx].tx_gain;
2162 dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
2163
2164 /* fill each CCK rate's il3945_channel_power_info structure
2165 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2166 * NOTE: CCK rates start at end of OFDM rates! */
2167 for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
2168 pwr_info =
2169 &ch_info->power_info[rate_idx + IL_OFDM_RATES];
2170 pwr_info->requested_power = power;
2171 pwr_info->power_table_idx = pwr_idx;
2172 pwr_info->base_power_idx = base_pwr_idx;
2173 pwr_info->tpc.tx_gain = gain;
2174 pwr_info->tpc.dsp_atten = dsp_atten;
2175 }
2176
2177 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2178 for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
2179 scan_tbl_idx++) {
2180 s32 actual_idx =
2181 (scan_tbl_idx ==
2182 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
2183 il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
2184 actual_idx, clip_pwrs,
2185 ch_info, a_band);
2186 }
2187 }
2188
2189 return 0;
2190}
2191
2192int
2193il3945_hw_rxq_stop(struct il_priv *il)
2194{
2195 int rc;
2196
2197 il_wr(il, FH39_RCSR_CONFIG(0), 0);
2198 rc = il_poll_bit(il, FH39_RSSR_STATUS,
2199 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2200 if (rc < 0)
2201 IL_ERR("Can't stop Rx DMA.\n");
2202
2203 return 0;
2204}
2205
2206int
2207il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
2208{
2209 int txq_id = txq->q.id;
2210
2211 struct il3945_shared *shared_data = il->_3945.shared_virt;
2212
2213 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
2214
2215 il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
2216 il_wr(il, FH39_CBCC_BASE(txq_id), 0);
2217
2218 il_wr(il, FH39_TCSR_CONFIG(txq_id),
2219 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2220 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2221 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2222 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2223 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2224
2225 /* fake read to flush all prev. writes */
2226 _il_rd(il, FH39_TSSR_CBB_BASE);
2227
2228 return 0;
2229}
2230
2231/*
2232 * HCMD utils
2233 */
2234static u16
2235il3945_get_hcmd_size(u8 cmd_id, u16 len)
2236{
2237 switch (cmd_id) {
2238 case C_RXON:
2239 return sizeof(struct il3945_rxon_cmd);
2240 case C_POWER_TBL:
2241 return sizeof(struct il3945_powertable_cmd);
2242 default:
2243 return len;
2244 }
2245}
2246
2247static u16
2248il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
2249{
2250 struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
2251 addsta->mode = cmd->mode;
2252 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2253 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
2254 addsta->station_flags = cmd->station_flags;
2255 addsta->station_flags_msk = cmd->station_flags_msk;
2256 addsta->tid_disable_tx = cpu_to_le16(0);
2257 addsta->rate_n_flags = cmd->rate_n_flags;
2258 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2259 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2260 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2261
2262 return (u16) sizeof(struct il3945_addsta_cmd);
2263}
2264
2265static int
2266il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
2267{
2268 struct il_rxon_context *ctx = &il->ctx;
2269 int ret;
2270 u8 sta_id;
2271 unsigned long flags;
2272
2273 if (sta_id_r)
2274 *sta_id_r = IL_INVALID_STATION;
2275
2276 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2277 if (ret) {
2278 IL_ERR("Unable to add station %pM\n", addr);
2279 return ret;
2280 }
2281
2282 if (sta_id_r)
2283 *sta_id_r = sta_id;
2284
2285 spin_lock_irqsave(&il->sta_lock, flags);
2286 il->stations[sta_id].used |= IL_STA_LOCAL;
2287 spin_unlock_irqrestore(&il->sta_lock, flags);
2288
2289 return 0;
2290}
2291
2292static int
2293il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
2294 bool add)
2295{
2296 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
2297 int ret;
2298
2299 if (add) {
2300 ret =
2301 il3945_add_bssid_station(il, vif->bss_conf.bssid,
2302 &vif_priv->ibss_bssid_sta_id);
2303 if (ret)
2304 return ret;
2305
2306 il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
2307 (il->band ==
2308 IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
2309 RATE_1M_PLCP);
2310 il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
2311
2312 return 0;
2313 }
2314
2315 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
2316 vif->bss_conf.bssid);
2317}
2318
2319/**
2320 * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
2321 */
2322int
2323il3945_init_hw_rate_table(struct il_priv *il)
2324{
2325 int rc, i, idx, prev_idx;
2326 struct il3945_rate_scaling_cmd rate_cmd = {
2327 .reserved = {0, 0, 0},
2328 };
2329 struct il3945_rate_scaling_info *table = rate_cmd.table;
2330
2331 for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
2332 idx = il3945_rates[i].table_rs_idx;
2333
2334 table[idx].rate_n_flags =
2335 il3945_hw_set_rate_n_flags(il3945_rates[i].plcp, 0);
2336 table[idx].try_cnt = il->retry_rate;
2337 prev_idx = il3945_get_prev_ieee_rate(i);
2338 table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
2339 }
2340
2341 switch (il->band) {
2342 case IEEE80211_BAND_5GHZ:
2343 D_RATE("Select A mode rate scale\n");
2344 /* If one of the following CCK rates is used,
2345 * have it fall back to the 6M OFDM rate */
2346 for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
2347 table[i].next_rate_idx =
2348 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2349
2350 /* Don't fall back to CCK rates */
2351 table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
2352
2353 /* Don't drop out of OFDM rates */
2354 table[RATE_6M_IDX_TBL].next_rate_idx =
2355 il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
2356 break;
2357
2358 case IEEE80211_BAND_2GHZ:
2359 D_RATE("Select B/G mode rate scale\n");
2360 /* If an OFDM rate is used, have it fall back to the
2361 * 1M CCK rates */
2362
2363 if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
2364 il_is_associated(il)) {
2365
2366 idx = IL_FIRST_CCK_RATE;
2367 for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
2368 table[i].next_rate_idx =
2369 il3945_rates[idx].table_rs_idx;
2370
2371 idx = RATE_11M_IDX_TBL;
2372 /* CCK shouldn't fall back to OFDM... */
2373 table[idx].next_rate_idx = RATE_5M_IDX_TBL;
2374 }
2375 break;
2376
2377 default:
2378 WARN_ON(1);
2379 break;
2380 }
2381
2382 /* Update the rate scaling for control frame Tx */
2383 rate_cmd.table_id = 0;
2384 rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2385 if (rc)
2386 return rc;
2387
2388 /* Update the rate scaling for data frame Tx */
2389 rate_cmd.table_id = 1;
2390 return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
2391}
2392
2393/* Called when initializing driver */
2394int
2395il3945_hw_set_hw_params(struct il_priv *il)
2396{
2397 memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
2398
2399 il->_3945.shared_virt =
2400 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2401 &il->_3945.shared_phys, GFP_KERNEL);
2402 if (!il->_3945.shared_virt) {
2403 IL_ERR("failed to allocate pci memory\n");
2404 return -ENOMEM;
2405 }
2406
2407 /* Assign number of Usable TX queues */
2408 il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
2409
2410 il->hw_params.tfd_size = sizeof(struct il3945_tfd);
2411 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
2412 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2413 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2414 il->hw_params.max_stations = IL3945_STATION_COUNT;
2415 il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
2416
2417 il->sta_key_max_num = STA_KEY_MAX_NUM;
2418
2419 il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2420 il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
2421 il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
2422
2423 return 0;
2424}
2425
2426unsigned int
2427il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
2428 u8 rate)
2429{
2430 struct il3945_tx_beacon_cmd *tx_beacon_cmd;
2431 unsigned int frame_size;
2432
2433 tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
2434 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2435
2436 tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
2437 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2438
2439 frame_size =
2440 il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
2441 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2442
2443 BUG_ON(frame_size > MAX_MPDU_SIZE);
2444 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
2445
2446 tx_beacon_cmd->tx.rate = rate;
2447 tx_beacon_cmd->tx.tx_flags =
2448 (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
2449
2450 /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
2451 tx_beacon_cmd->tx.supp_rates[0] =
2452 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
2453
2454 tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
2455
2456 return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
2457}
2458
2459void
2460il3945_hw_handler_setup(struct il_priv *il)
2461{
2462 il->handlers[C_TX] = il3945_hdl_tx;
2463 il->handlers[N_3945_RX] = il3945_hdl_rx;
2464}
2465
2466void
2467il3945_hw_setup_deferred_work(struct il_priv *il)
2468{
2469 INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
2470 il3945_bg_reg_txpower_periodic);
2471}
2472
2473void
2474il3945_hw_cancel_deferred_work(struct il_priv *il)
2475{
2476 cancel_delayed_work(&il->_3945.thermal_periodic);
2477}
2478
2479/* check contents of special bootstrap uCode SRAM */
2480static int
2481il3945_verify_bsm(struct il_priv *il)
2482{
2483 __le32 *image = il->ucode_boot.v_addr;
2484 u32 len = il->ucode_boot.len;
2485 u32 reg;
2486 u32 val;
2487
2488 D_INFO("Begin verify bsm\n");
2489
2490 /* verify BSM SRAM contents */
2491 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
2492 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
2493 reg += sizeof(u32), image++) {
2494 val = il_rd_prph(il, reg);
2495 if (val != le32_to_cpu(*image)) {
2496 IL_ERR("BSM uCode verification failed at "
2497 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2498 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
2499 len, val, le32_to_cpu(*image));
2500 return -EIO;
2501 }
2502 }
2503
2504 D_INFO("BSM bootstrap uCode image OK\n");
2505
2506 return 0;
2507}
2508
2509/******************************************************************************
2510 *
2511 * EEPROM related functions
2512 *
2513 ******************************************************************************/
2514
2515/*
2516 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2517 * embedded controller) as EEPROM reader; each read is a series of pulses
2518 * to/from the EEPROM chip, not a single event, so even reads could conflict
2519 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2520 * simply claims ownership, which should be safe when this function is called
2521 * (i.e. before loading uCode!).
2522 */
2523static int
2524il3945_eeprom_acquire_semaphore(struct il_priv *il)
2525{
2526 _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2527 return 0;
2528}
2529
2530static void
2531il3945_eeprom_release_semaphore(struct il_priv *il)
2532{
2533 return;
2534}
2535
2536 /**
2537 * il3945_load_bsm - Load bootstrap instructions
2538 *
2539 * BSM operation:
2540 *
2541 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2542 * in special SRAM that does not power down during RFKILL. When powering back
2543 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2544 * the bootstrap program into the on-board processor, and starts it.
2545 *
2546 * The bootstrap program loads (via DMA) instructions and data for a new
2547 * program from host DRAM locations indicated by the host driver in the
2548 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2549 * automatically.
2550 *
2551 * When initializing the NIC, the host driver points the BSM to the
2552 * "initialize" uCode image. This uCode sets up some internal data, then
2553 * notifies host via "initialize alive" that it is complete.
2554 *
2555 * The host then replaces the BSM_DRAM_* pointer values to point to the
2556 * normal runtime uCode instructions and a backup uCode data cache buffer
2557 * (filled initially with starting data values for the on-board processor),
2558 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2559 * which begins normal operation.
2560 *
2561 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2562 * the backup data cache in DRAM before SRAM is powered down.
2563 *
2564 * When powering back up, the BSM loads the bootstrap program. This reloads
2565 * the runtime uCode instructions and the backup data cache into SRAM,
2566 * and re-launches the runtime uCode from where it left off.
2567 */
2568static int
2569il3945_load_bsm(struct il_priv *il)
2570{
2571 __le32 *image = il->ucode_boot.v_addr;
2572 u32 len = il->ucode_boot.len;
2573 dma_addr_t pinst;
2574 dma_addr_t pdata;
2575 u32 inst_len;
2576 u32 data_len;
2577 int rc;
2578 int i;
2579 u32 done;
2580 u32 reg_offset;
2581
2582 D_INFO("Begin load bsm\n");
2583
2584 /* make sure bootstrap program is no larger than BSM's SRAM size */
2585 if (len > IL39_MAX_BSM_SIZE)
2586 return -EINVAL;
2587
2588 /* Tell bootstrap uCode where to find the "Initialize" uCode
2589 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2590 * NOTE: il3945_initialize_alive_start() will replace these values,
2591 * after the "initialize" uCode has run, to point to
2592 * runtime/protocol instructions and backup data cache. */
2593 pinst = il->ucode_init.p_addr;
2594 pdata = il->ucode_init_data.p_addr;
2595 inst_len = il->ucode_init.len;
2596 data_len = il->ucode_init_data.len;
2597
2598 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
2599 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
2600 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2601 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2602
2603 /* Fill BSM memory with bootstrap instructions */
2604 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2605 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2606 reg_offset += sizeof(u32), image++)
2607 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
2608
2609 rc = il3945_verify_bsm(il);
2610 if (rc)
2611 return rc;
2612
2613 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2614 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
2615 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
2616 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2617
2618 /* Load bootstrap code into instruction SRAM now,
2619 * to prepare to load "initialize" uCode */
2620 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
2621
2622 /* Wait for load of bootstrap uCode to finish */
2623 for (i = 0; i < 100; i++) {
2624 done = il_rd_prph(il, BSM_WR_CTRL_REG);
2625 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2626 break;
2627 udelay(10);
2628 }
2629 if (i < 100)
2630 D_INFO("BSM write complete, poll %d iterations\n", i);
2631 else {
2632 IL_ERR("BSM write did not complete!\n");
2633 return -EIO;
2634 }
2635
2636 /* Enable future boot loads whenever power management unit triggers it
2637 * (e.g. when powering back up after power-save shutdown) */
2638 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
2639
2640 return 0;
2641}
2642
2643static struct il_hcmd_ops il3945_hcmd = {
2644 .rxon_assoc = il3945_send_rxon_assoc,
2645 .commit_rxon = il3945_commit_rxon,
2646};
2647
2648static struct il_lib_ops il3945_lib = {
2649 .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
2650 .txq_free_tfd = il3945_hw_txq_free_tfd,
2651 .txq_init = il3945_hw_tx_queue_init,
2652 .load_ucode = il3945_load_bsm,
2653 .dump_nic_error_log = il3945_dump_nic_error_log,
2654 .apm_ops = {
2655 .init = il3945_apm_init,
2656 .config = il3945_nic_config,
2657 },
2658 .eeprom_ops = {
2659 .regulatory_bands = {
2660 EEPROM_REGULATORY_BAND_1_CHANNELS,
2661 EEPROM_REGULATORY_BAND_2_CHANNELS,
2662 EEPROM_REGULATORY_BAND_3_CHANNELS,
2663 EEPROM_REGULATORY_BAND_4_CHANNELS,
2664 EEPROM_REGULATORY_BAND_5_CHANNELS,
2665 EEPROM_REGULATORY_BAND_NO_HT40,
2666 EEPROM_REGULATORY_BAND_NO_HT40,
2667 },
2668 .acquire_semaphore = il3945_eeprom_acquire_semaphore,
2669 .release_semaphore = il3945_eeprom_release_semaphore,
2670 },
2671 .send_tx_power = il3945_send_tx_power,
2672 .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
2673
2674#ifdef CONFIG_IWLEGACY_DEBUGFS
2675 .debugfs_ops = {
2676 .rx_stats_read = il3945_ucode_rx_stats_read,
2677 .tx_stats_read = il3945_ucode_tx_stats_read,
2678 .general_stats_read = il3945_ucode_general_stats_read,
2679 },
2680#endif
2681};
2682
2683static const struct il_legacy_ops il3945_legacy_ops = {
2684 .post_associate = il3945_post_associate,
2685 .config_ap = il3945_config_ap,
2686 .manage_ibss_station = il3945_manage_ibss_station,
2687};
2688
2689static struct il_hcmd_utils_ops il3945_hcmd_utils = {
2690 .get_hcmd_size = il3945_get_hcmd_size,
2691 .build_addsta_hcmd = il3945_build_addsta_hcmd,
2692 .request_scan = il3945_request_scan,
2693 .post_scan = il3945_post_scan,
2694};
2695
2696static const struct il_ops il3945_ops = {
2697 .lib = &il3945_lib,
2698 .hcmd = &il3945_hcmd,
2699 .utils = &il3945_hcmd_utils,
2700 .led = &il3945_led_ops,
2701 .legacy = &il3945_legacy_ops,
2702 .ieee80211_ops = &il3945_hw_ops,
2703};
2704
2705static struct il_base_params il3945_base_params = {
2706 .eeprom_size = IL3945_EEPROM_IMG_SIZE,
2707 .num_of_queues = IL39_NUM_QUEUES,
2708 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2709 .set_l0s = false,
2710 .use_bsm = true,
2711 .led_compensation = 64,
2712 .wd_timeout = IL_DEF_WD_TIMEOUT,
2713};
2714
2715static struct il_cfg il3945_bg_cfg = {
2716 .name = "3945BG",
2717 .fw_name_pre = IL3945_FW_PRE,
2718 .ucode_api_max = IL3945_UCODE_API_MAX,
2719 .ucode_api_min = IL3945_UCODE_API_MIN,
2720 .sku = IL_SKU_G,
2721 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2722 .ops = &il3945_ops,
2723 .mod_params = &il3945_mod_params,
2724 .base_params = &il3945_base_params,
2725 .led_mode = IL_LED_BLINK,
2726};
2727
2728static struct il_cfg il3945_abg_cfg = {
2729 .name = "3945ABG",
2730 .fw_name_pre = IL3945_FW_PRE,
2731 .ucode_api_max = IL3945_UCODE_API_MAX,
2732 .ucode_api_min = IL3945_UCODE_API_MIN,
2733 .sku = IL_SKU_A | IL_SKU_G,
2734 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2735 .ops = &il3945_ops,
2736 .mod_params = &il3945_mod_params,
2737 .base_params = &il3945_base_params,
2738 .led_mode = IL_LED_BLINK,
2739};
2740
2741DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
2742 {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
2743 {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
2744 {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
2745 {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
2746 {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
2747 {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
2748 {0}
2749};
2750
2751MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 000000000000..2b2895c544d7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,626 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __il_3945_h__
28#define __il_3945_h__
29
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <net/ieee80211_radiotap.h>
33
34/* Hardware specific file defines the PCI IDs table for that hardware module */
35extern const struct pci_device_id il3945_hw_card_ids[];
36
37#include "common.h"
38
39/* Highest firmware API version supported */
40#define IL3945_UCODE_API_MAX 2
41
42/* Lowest firmware API version supported */
43#define IL3945_UCODE_API_MIN 1
44
45#define IL3945_FW_PRE "iwlwifi-3945-"
46#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
47#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
48
49/* Default noise level to report when noise measurement is not available.
50 * This may be because we're:
51 * 1) Not associated (4965, no beacon stats being sent to driver)
52 * 2) Scanning (noise measurement does not apply to associated channel)
53 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
54 * Use default noise value of -127 ... this is below the range of measurable
55 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
56 * Also, -127 works better than 0 when averaging frames with/without
57 * noise info (e.g. averaging might be done in app); measured dBm values are
58 * always negative ... using a negative value as the default keeps all
59 * averages within an s8's (used in some apps) range of negative values. */
60#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
61
62/* Module parameters accessible from iwl-*.c */
63extern struct il_mod_params il3945_mod_params;
64
65struct il3945_rate_scale_data {
66 u64 data;
67 s32 success_counter;
68 s32 success_ratio;
69 s32 counter;
70 s32 average_tpt;
71 unsigned long stamp;
72};
73
74struct il3945_rs_sta {
75 spinlock_t lock;
76 struct il_priv *il;
77 s32 *expected_tpt;
78 unsigned long last_partial_flush;
79 unsigned long last_flush;
80 u32 flush_time;
81 u32 last_tx_packets;
82 u32 tx_packets;
83 u8 tgg;
84 u8 flush_pending;
85 u8 start_rate;
86 struct timer_list rate_scale_flush;
87 struct il3945_rate_scale_data win[RATE_COUNT_3945];
88#ifdef CONFIG_MAC80211_DEBUGFS
89 struct dentry *rs_sta_dbgfs_stats_table_file;
90#endif
91
92 /* used to be in sta_info */
93 int last_txrate_idx;
94};
95
96/*
97 * The common struct MUST be first because it is shared between
98 * 3945 and 4965!
99 */
100struct il3945_sta_priv {
101 struct il_station_priv_common common;
102 struct il3945_rs_sta rs_sta;
103};
104
105enum il3945_antenna {
106 IL_ANTENNA_DIVERSITY,
107 IL_ANTENNA_MAIN,
108 IL_ANTENNA_AUX
109};
110
111/*
112 * RTS threshold here is total size [2347] minus 4 FCS bytes
113 * Per spec:
114 * a value of 0 means RTS on all data/management packets
115 * a value > max MSDU size means no RTS
116 * else RTS for data/management frames where MPDU is larger
117 * than RTS value.
118 */
119#define DEFAULT_RTS_THRESHOLD 2347U
120#define MIN_RTS_THRESHOLD 0U
121#define MAX_RTS_THRESHOLD 2347U
122#define MAX_MSDU_SIZE 2304U
123#define MAX_MPDU_SIZE 2346U
124#define DEFAULT_BEACON_INTERVAL 100U
125#define DEFAULT_SHORT_RETRY_LIMIT 7U
126#define DEFAULT_LONG_RETRY_LIMIT 4U
127
128#define IL_TX_FIFO_AC0 0
129#define IL_TX_FIFO_AC1 1
130#define IL_TX_FIFO_AC2 2
131#define IL_TX_FIFO_AC3 3
132#define IL_TX_FIFO_HCCA_1 5
133#define IL_TX_FIFO_HCCA_2 6
134#define IL_TX_FIFO_NONE 7
135
136#define IEEE80211_DATA_LEN 2304
137#define IEEE80211_4ADDR_LEN 30
138#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
139#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
140
141struct il3945_frame {
142 union {
143 struct ieee80211_hdr frame;
144 struct il3945_tx_beacon_cmd beacon;
145 u8 raw[IEEE80211_FRAME_LEN];
146 u8 cmd[360];
147 } u;
148 struct list_head list;
149};
150
151#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
152#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
153#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
154
155#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
156#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
157#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
158
159#define IL_SUPPORTED_RATES_IE_LEN 8
160
161#define SCAN_INTERVAL 100
162
163#define MAX_TID_COUNT 9
164
165#define IL_INVALID_RATE 0xFF
166#define IL_INVALID_VALUE -1
167
168#define STA_PS_STATUS_WAKE 0
169#define STA_PS_STATUS_SLEEP 1
170
171struct il3945_ibss_seq {
172 u8 mac[ETH_ALEN];
173 u16 seq_num;
174 u16 frag_num;
175 unsigned long packet_time;
176 struct list_head list;
177};
178
179#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
180 x->u.rx_frame.stats.payload + \
181 x->u.rx_frame.stats.phy_count))
182#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
183 IL_RX_HDR(x)->payload + \
184 le16_to_cpu(IL_RX_HDR(x)->len)))
185#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
186#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
187
188/******************************************************************************
189 *
190 * Functions implemented in iwl3945-base.c which are forward declared here
191 * for use by iwl-*.c
192 *
193 *****************************************************************************/
194extern int il3945_calc_db_from_ratio(int sig_ratio);
195extern void il3945_rx_replenish(void *data);
196extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
197extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
198 struct ieee80211_hdr *hdr,
199 int left);
200extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
201 char **buf, bool display);
202extern void il3945_dump_nic_error_log(struct il_priv *il);
203
204/******************************************************************************
205 *
206 * Functions implemented in iwl-[34]*.c which are forward declared here
207 * for use by iwl3945-base.c
208 *
209 * NOTE: The implementation of these functions are hardware specific
210 * which is why they are in the hardware specific files (vs. iwl-base.c)
211 *
212 * Naming convention --
213 * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
214 * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
215 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
216 * il3945_bg_ <-- Called from work queue context
217 * il3945_mac_ <-- mac80211 callback
218 *
219 ****************************************************************************/
220extern void il3945_hw_handler_setup(struct il_priv *il);
221extern void il3945_hw_setup_deferred_work(struct il_priv *il);
222extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
223extern int il3945_hw_rxq_stop(struct il_priv *il);
224extern int il3945_hw_set_hw_params(struct il_priv *il);
225extern int il3945_hw_nic_init(struct il_priv *il);
226extern int il3945_hw_nic_stop_master(struct il_priv *il);
227extern void il3945_hw_txq_ctx_free(struct il_priv *il);
228extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
229extern int il3945_hw_nic_reset(struct il_priv *il);
230extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
231 struct il_tx_queue *txq,
232 dma_addr_t addr, u16 len, u8 reset,
233 u8 pad);
234extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
235extern int il3945_hw_get_temperature(struct il_priv *il);
236extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
237extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
238 struct il3945_frame *frame,
239 u8 rate);
240void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
241 struct ieee80211_tx_info *info,
242 struct ieee80211_hdr *hdr, int sta_id,
243 int tx_id);
244extern int il3945_hw_reg_send_txpower(struct il_priv *il);
245extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
246extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
247void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
248extern void il3945_disable_events(struct il_priv *il);
249extern int il4965_get_temperature(const struct il_priv *il);
250extern void il3945_post_associate(struct il_priv *il);
251extern void il3945_config_ap(struct il_priv *il);
252
253extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
254
255/**
256 * il3945_hw_find_station - Find station id for a given BSSID
257 * @bssid: MAC address of station ID to find
258 *
259 * NOTE: This should not be hardware specific but the code has
260 * not yet been merged into a single common layer for managing the
261 * station tables.
262 */
263extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
264
265extern struct ieee80211_ops il3945_hw_ops;
266
267extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
268extern int il3945_init_hw_rate_table(struct il_priv *il);
269extern void il3945_reg_txpower_periodic(struct il_priv *il);
270extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
271
272extern int il3945_rs_next_rate(struct il_priv *il, int rate);
273
274/* scanning */
275int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
276void il3945_post_scan(struct il_priv *il);
277
278/* rates */
279extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
280
281/* RSSI to dBm */
282#define IL39_RSSI_OFFSET 95
283
284/*
285 * EEPROM related constants, enums, and structures.
286 */
287#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
288
289/*
290 * Mapping of a Tx power level, at factory calibration temperature,
291 * to a radio/DSP gain table idx.
292 * One for each of 5 "sample" power levels in each band.
293 * v_det is measured at the factory, using the 3945's built-in power amplifier
294 * (PA) output voltage detector. This same detector is used during Tx of
295 * long packets in normal operation to provide feedback as to proper output
296 * level.
297 * Data copied from EEPROM.
298 * DO NOT ALTER THIS STRUCTURE!!!
299 */
300struct il3945_eeprom_txpower_sample {
301 u8 gain_idx; /* idx into power (gain) setup table ... */
302 s8 power; /* ... for this pwr level for this chnl group */
303 u16 v_det; /* PA output voltage */
304} __packed;
305
306/*
307 * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
308 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
309 * Tx power setup code interpolates between the 5 "sample" power levels
310 * to determine the nominal setup for a requested power level.
311 * Data copied from EEPROM.
312 * DO NOT ALTER THIS STRUCTURE!!!
313 */
314struct il3945_eeprom_txpower_group {
315 struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
316 s32 a, b, c, d, e; /* coefficients for voltage->power
317 * formula (signed) */
318 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
319 * frequency (signed) */
320 s8 saturation_power; /* highest power possible by h/w in this
321 * band */
322 u8 group_channel; /* "representative" channel # in this band */
323 s16 temperature; /* h/w temperature at factory calib this band
324 * (signed) */
325} __packed;
326
327/*
328 * Temperature-based Tx-power compensation data, not band-specific.
329 * These coefficients are use to modify a/b/c/d/e coeffs based on
330 * difference between current temperature and factory calib temperature.
331 * Data copied from EEPROM.
332 */
333struct il3945_eeprom_temperature_corr {
334 u32 Ta;
335 u32 Tb;
336 u32 Tc;
337 u32 Td;
338 u32 Te;
339} __packed;
340
341/*
342 * EEPROM map
343 */
344struct il3945_eeprom {
345 u8 reserved0[16];
346 u16 device_id; /* abs.ofs: 16 */
347 u8 reserved1[2];
348 u16 pmc; /* abs.ofs: 20 */
349 u8 reserved2[20];
350 u8 mac_address[6]; /* abs.ofs: 42 */
351 u8 reserved3[58];
352 u16 board_revision; /* abs.ofs: 106 */
353 u8 reserved4[11];
354 u8 board_pba_number[9]; /* abs.ofs: 119 */
355 u8 reserved5[8];
356 u16 version; /* abs.ofs: 136 */
357 u8 sku_cap; /* abs.ofs: 138 */
358 u8 leds_mode; /* abs.ofs: 139 */
359 u16 oem_mode;
360 u16 wowlan_mode; /* abs.ofs: 142 */
361 u16 leds_time_interval; /* abs.ofs: 144 */
362 u8 leds_off_time; /* abs.ofs: 146 */
363 u8 leds_on_time; /* abs.ofs: 147 */
364 u8 almgor_m_version; /* abs.ofs: 148 */
365 u8 antenna_switch_type; /* abs.ofs: 149 */
366 u8 reserved6[42];
367 u8 sku_id[4]; /* abs.ofs: 192 */
368
369/*
370 * Per-channel regulatory data.
371 *
372 * Each channel that *might* be supported by 3945 has a fixed location
373 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
374 * txpower (MSB).
375 *
376 * Entries immediately below are for 20 MHz channel width.
377 *
378 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
379 */
380 u16 band_1_count; /* abs.ofs: 196 */
381 struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
382
383/*
384 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
385 * 5.0 GHz channels 7, 8, 11, 12, 16
386 * (4915-5080MHz) (none of these is ever supported)
387 */
388 u16 band_2_count; /* abs.ofs: 226 */
389 struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
390
391/*
392 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
393 * (5170-5320MHz)
394 */
395 u16 band_3_count; /* abs.ofs: 254 */
396 struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
397
398/*
399 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
400 * (5500-5700MHz)
401 */
402 u16 band_4_count; /* abs.ofs: 280 */
403 struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
404
405/*
406 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
407 * (5725-5825MHz)
408 */
409 u16 band_5_count; /* abs.ofs: 304 */
410 struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
411
412 u8 reserved9[194];
413
414/*
415 * 3945 Txpower calibration data.
416 */
417#define IL_NUM_TX_CALIB_GROUPS 5
418 struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
419/* abs.ofs: 512 */
420 struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
421 u8 reserved16[172]; /* fill out to full 1024 byte block */
422} __packed;
423
424#define IL3945_EEPROM_IMG_SIZE 1024
425
426/* End of EEPROM */
427
428#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
429#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
430
431/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
432#define IL39_NUM_QUEUES 5
433#define IL39_CMD_QUEUE_NUM 4
434
435#define IL_DEFAULT_TX_RETRY 15
436
437/*********************************************/
438
439#define RFD_SIZE 4
440#define NUM_TFD_CHUNKS 4
441
442#define TFD_CTL_COUNT_SET(n) (n << 24)
443#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
444#define TFD_CTL_PAD_SET(n) (n << 28)
445#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
446
447/* Sizes and addresses for instruction and data memory (SRAM) in
448 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
449#define IL39_RTC_INST_LOWER_BOUND (0x000000)
450#define IL39_RTC_INST_UPPER_BOUND (0x014000)
451
452#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
453#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
454
455#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
456 IL39_RTC_INST_LOWER_BOUND)
457#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
458 IL39_RTC_DATA_LOWER_BOUND)
459
460#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
461#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
462
463/* Size of uCode instruction memory in bootstrap state machine */
464#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
465
466static inline int
467il3945_hw_valid_rtc_data_addr(u32 addr)
468{
469 return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
470 addr < IL39_RTC_DATA_UPPER_BOUND);
471}
472
473/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
474 * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
475struct il3945_shared {
476 __le32 tx_base_ptr[8];
477} __packed;
478
479static inline u8
480il3945_hw_get_rate(__le16 rate_n_flags)
481{
482 return le16_to_cpu(rate_n_flags) & 0xFF;
483}
484
485static inline u16
486il3945_hw_get_rate_n_flags(__le16 rate_n_flags)
487{
488 return le16_to_cpu(rate_n_flags);
489}
490
491static inline __le16
492il3945_hw_set_rate_n_flags(u8 rate, u16 flags)
493{
494 return cpu_to_le16((u16) rate | flags);
495}
496
497/************************************/
498/* iwl3945 Flow Handler Definitions */
499/************************************/
500
501/**
502 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
503 * Addresses are offsets from device's PCI hardware base address.
504 */
505#define FH39_MEM_LOWER_BOUND (0x0800)
506#define FH39_MEM_UPPER_BOUND (0x1000)
507
508#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
509#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
510#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
511#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
512#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
513#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
514
515/* TFDB (Transmit Frame Buffer Descriptor) */
516#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
517 ((_ch) * 2 + (buf)) * 0x28)
518#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
519
520/* CBCC channel is [0,2] */
521#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
522#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
523#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
524
525/* RCSR channel is [0,2] */
526#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
527#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
528#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
529#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
530#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
531
532#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
533
534/* RSSR */
535#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
536#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
537
538/* TCSR */
539#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
540#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
541#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
542#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
543
544/* TSSR */
545#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
546#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
547#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
548
549/* DBM */
550
551#define FH39_SRVC_CHNL (6)
552
553#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
554#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
555
556#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
557
558#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
559
560#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
561
562#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
563
564#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
565
566#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
567
568#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
569#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
570
571#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
572#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
573
574#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
575
576#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
577
578#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
579#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
580
581#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
582
583#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
584
585#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
586#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
587
588#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
589
590#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
591#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
592
593#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
594#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
595
596#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
597#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
598
599#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
600 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
601 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
602
603#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
604
605struct il3945_tfd_tb {
606 __le32 addr;
607 __le32 len;
608} __packed;
609
610struct il3945_tfd {
611 __le32 control_flags;
612 struct il3945_tfd_tb tbs[4];
613 u8 __pad[28];
614} __packed;
615
616#ifdef CONFIG_IWLEGACY_DEBUGFS
617ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
618 size_t count, loff_t *ppos);
619ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
620 size_t count, loff_t *ppos);
621ssize_t il3945_ucode_general_stats_read(struct file *file,
622 char __user *user_buf, size_t count,
623 loff_t *ppos);
624#endif
625
626#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877e6869..d3248e3ef23b 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h" 66#include "common.h"
67#include "iwl-core.h" 67#include "4965.h"
68#include "iwl-4965-calib.h"
69 68
70/***************************************************************************** 69/*****************************************************************************
71 * INIT calibrations framework 70 * INIT calibrations framework
72 *****************************************************************************/ 71 *****************************************************************************/
73 72
74struct statistics_general_data { 73struct stats_general_data {
75 u32 beacon_silence_rssi_a; 74 u32 beacon_silence_rssi_a;
76 u32 beacon_silence_rssi_b; 75 u32 beacon_silence_rssi_b;
77 u32 beacon_silence_rssi_c; 76 u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
80 u32 beacon_energy_c; 79 u32 beacon_energy_c;
81}; 80};
82 81
83void iwl4965_calib_free_results(struct iwl_priv *priv) 82void
83il4965_calib_free_results(struct il_priv *il)
84{ 84{
85 int i; 85 int i;
86 86
87 for (i = 0; i < IWL_CALIB_MAX; i++) { 87 for (i = 0; i < IL_CALIB_MAX; i++) {
88 kfree(priv->calib_results[i].buf); 88 kfree(il->calib_results[i].buf);
89 priv->calib_results[i].buf = NULL; 89 il->calib_results[i].buf = NULL;
90 priv->calib_results[i].buf_len = 0; 90 il->calib_results[i].buf_len = 0;
91 } 91 }
92} 92}
93 93
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
103 * enough to receive all of our own network traffic, but not so 103 * enough to receive all of our own network traffic, but not so
104 * high that our DSP gets too busy trying to lock onto non-network 104 * high that our DSP gets too busy trying to lock onto non-network
105 * activity/noise. */ 105 * activity/noise. */
106static int iwl4965_sens_energy_cck(struct iwl_priv *priv, 106static int
107 u32 norm_fa, 107il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
108 u32 rx_enable_time, 108 struct stats_general_data *rx_info)
109 struct statistics_general_data *rx_info)
110{ 109{
111 u32 max_nrg_cck = 0; 110 u32 max_nrg_cck = 0;
112 int i = 0; 111 int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
129 u32 false_alarms = norm_fa * 200 * 1024; 128 u32 false_alarms = norm_fa * 200 * 1024;
130 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; 129 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
131 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; 130 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
132 struct iwl_sensitivity_data *data = NULL; 131 struct il_sensitivity_data *data = NULL;
133 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 132 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
134 133
135 data = &(priv->sensitivity_data); 134 data = &(il->sensitivity_data);
136 135
137 data->nrg_auto_corr_silence_diff = 0; 136 data->nrg_auto_corr_silence_diff = 0;
138 137
139 /* Find max silence rssi among all 3 receivers. 138 /* Find max silence rssi among all 3 receivers.
140 * This is background noise, which may include transmissions from other 139 * This is background noise, which may include transmissions from other
141 * networks, measured during silence before our network's beacon */ 140 * networks, measured during silence before our network's beacon */
142 silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & 141 silence_rssi_a =
143 ALL_BAND_FILTER) >> 8); 142 (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
144 silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & 143 silence_rssi_b =
145 ALL_BAND_FILTER) >> 8); 144 (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
146 silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & 145 silence_rssi_c =
147 ALL_BAND_FILTER) >> 8); 146 (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
148 147
149 val = max(silence_rssi_b, silence_rssi_c); 148 val = max(silence_rssi_b, silence_rssi_c);
150 max_silence_rssi = max(silence_rssi_a, (u8) val); 149 max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
160 val = data->nrg_silence_rssi[i]; 159 val = data->nrg_silence_rssi[i];
161 silence_ref = max(silence_ref, val); 160 silence_ref = max(silence_ref, val);
162 } 161 }
163 IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", 162 D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
164 silence_rssi_a, silence_rssi_b, silence_rssi_c, 163 silence_rssi_b, silence_rssi_c, silence_ref);
165 silence_ref);
166 164
167 /* Find max rx energy (min value!) among all 3 receivers, 165 /* Find max rx energy (min value!) among all 3 receivers,
168 * measured during beacon frame. 166 * measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
184 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); 182 max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
185 max_nrg_cck += 6; 183 max_nrg_cck += 6;
186 184
187 IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", 185 D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
188 rx_info->beacon_energy_a, rx_info->beacon_energy_b, 186 rx_info->beacon_energy_a, rx_info->beacon_energy_b,
189 rx_info->beacon_energy_c, max_nrg_cck - 6); 187 rx_info->beacon_energy_c, max_nrg_cck - 6);
190 188
191 /* Count number of consecutive beacons with fewer-than-desired 189 /* Count number of consecutive beacons with fewer-than-desired
192 * false alarms. */ 190 * false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
194 data->num_in_cck_no_fa++; 192 data->num_in_cck_no_fa++;
195 else 193 else
196 data->num_in_cck_no_fa = 0; 194 data->num_in_cck_no_fa = 0;
197 IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", 195 D_CALIB("consecutive bcns with few false alarms = %u\n",
198 data->num_in_cck_no_fa); 196 data->num_in_cck_no_fa);
199 197
200 /* If we got too many false alarms this time, reduce sensitivity */ 198 /* If we got too many false alarms this time, reduce sensitivity */
201 if ((false_alarms > max_false_alarms) && 199 if (false_alarms > max_false_alarms &&
202 (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { 200 data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
203 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", 201 D_CALIB("norm FA %u > max FA %u\n", false_alarms,
204 false_alarms, max_false_alarms); 202 max_false_alarms);
205 IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); 203 D_CALIB("... reducing sensitivity\n");
206 data->nrg_curr_state = IWL_FA_TOO_MANY; 204 data->nrg_curr_state = IL_FA_TOO_MANY;
207 /* Store for "fewer than desired" on later beacon */ 205 /* Store for "fewer than desired" on later beacon */
208 data->nrg_silence_ref = silence_ref; 206 data->nrg_silence_ref = silence_ref;
209 207
210 /* increase energy threshold (reduce nrg value) 208 /* increase energy threshold (reduce nrg value)
211 * to decrease sensitivity */ 209 * to decrease sensitivity */
212 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; 210 data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
213 /* Else if we got fewer than desired, increase sensitivity */ 211 /* Else if we got fewer than desired, increase sensitivity */
214 } else if (false_alarms < min_false_alarms) { 212 } else if (false_alarms < min_false_alarms) {
215 data->nrg_curr_state = IWL_FA_TOO_FEW; 213 data->nrg_curr_state = IL_FA_TOO_FEW;
216 214
217 /* Compare silence level with silence level for most recent 215 /* Compare silence level with silence level for most recent
218 * healthy number or too many false alarms */ 216 * healthy number or too many false alarms */
219 data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - 217 data->nrg_auto_corr_silence_diff =
220 (s32)silence_ref; 218 (s32) data->nrg_silence_ref - (s32) silence_ref;
221 219
222 IWL_DEBUG_CALIB(priv, 220 D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
223 "norm FA %u < min FA %u, silence diff %d\n", 221 false_alarms, min_false_alarms,
224 false_alarms, min_false_alarms, 222 data->nrg_auto_corr_silence_diff);
225 data->nrg_auto_corr_silence_diff);
226 223
227 /* Increase value to increase sensitivity, but only if: 224 /* Increase value to increase sensitivity, but only if:
228 * 1a) previous beacon did *not* have *too many* false alarms 225 * 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
230 * from a previous beacon with too many, or healthy # FAs 227 * from a previous beacon with too many, or healthy # FAs
231 * OR 2) We've seen a lot of beacons (100) with too few 228 * OR 2) We've seen a lot of beacons (100) with too few
232 * false alarms */ 229 * false alarms */
233 if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && 230 if (data->nrg_prev_state != IL_FA_TOO_MANY &&
234 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 231 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
235 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 232 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
236 233
237 IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); 234 D_CALIB("... increasing sensitivity\n");
238 /* Increase nrg value to increase sensitivity */ 235 /* Increase nrg value to increase sensitivity */
239 val = data->nrg_th_cck + NRG_STEP_CCK; 236 val = data->nrg_th_cck + NRG_STEP_CCK;
240 data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); 237 data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
241 } else { 238 } else {
242 IWL_DEBUG_CALIB(priv, 239 D_CALIB("... but not changing sensitivity\n");
243 "... but not changing sensitivity\n");
244 } 240 }
245 241
246 /* Else we got a healthy number of false alarms, keep status quo */ 242 /* Else we got a healthy number of false alarms, keep status quo */
247 } else { 243 } else {
248 IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); 244 D_CALIB(" FA in safe zone\n");
249 data->nrg_curr_state = IWL_FA_GOOD_RANGE; 245 data->nrg_curr_state = IL_FA_GOOD_RANGE;
250 246
251 /* Store for use in "fewer than desired" with later beacon */ 247 /* Store for use in "fewer than desired" with later beacon */
252 data->nrg_silence_ref = silence_ref; 248 data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
254 /* If previous beacon had too many false alarms, 250 /* If previous beacon had too many false alarms,
255 * give it some extra margin by reducing sensitivity again 251 * give it some extra margin by reducing sensitivity again
256 * (but don't go below measured energy of desired Rx) */ 252 * (but don't go below measured energy of desired Rx) */
257 if (IWL_FA_TOO_MANY == data->nrg_prev_state) { 253 if (IL_FA_TOO_MANY == data->nrg_prev_state) {
258 IWL_DEBUG_CALIB(priv, "... increasing margin\n"); 254 D_CALIB("... increasing margin\n");
259 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) 255 if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
260 data->nrg_th_cck -= NRG_MARGIN; 256 data->nrg_th_cck -= NRG_MARGIN;
261 else 257 else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
269 * Lower value is higher energy, so we use max()! 265 * Lower value is higher energy, so we use max()!
270 */ 266 */
271 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); 267 data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
272 IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); 268 D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
273 269
274 data->nrg_prev_state = data->nrg_curr_state; 270 data->nrg_prev_state = data->nrg_curr_state;
275 271
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
284 else { 280 else {
285 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; 281 val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
286 data->auto_corr_cck = 282 data->auto_corr_cck =
287 min((u32)ranges->auto_corr_max_cck, val); 283 min((u32) ranges->auto_corr_max_cck, val);
288 } 284 }
289 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; 285 val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
290 data->auto_corr_cck_mrc = 286 data->auto_corr_cck_mrc =
291 min((u32)ranges->auto_corr_max_cck_mrc, val); 287 min((u32) ranges->auto_corr_max_cck_mrc, val);
292 } else if ((false_alarms < min_false_alarms) && 288 } else if (false_alarms < min_false_alarms &&
293 ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || 289 (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
294 (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { 290 data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
295 291
296 /* Decrease auto_corr values to increase sensitivity */ 292 /* Decrease auto_corr values to increase sensitivity */
297 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; 293 val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
298 data->auto_corr_cck = 294 data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
299 max((u32)ranges->auto_corr_min_cck, val);
300 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; 295 val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
301 data->auto_corr_cck_mrc = 296 data->auto_corr_cck_mrc =
302 max((u32)ranges->auto_corr_min_cck_mrc, val); 297 max((u32) ranges->auto_corr_min_cck_mrc, val);
303 } 298 }
304 299
305 return 0; 300 return 0;
306} 301}
307 302
308 303static int
309static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, 304il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
310 u32 norm_fa,
311 u32 rx_enable_time)
312{ 305{
313 u32 val; 306 u32 val;
314 u32 false_alarms = norm_fa * 200 * 1024; 307 u32 false_alarms = norm_fa * 200 * 1024;
315 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; 308 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
316 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; 309 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
317 struct iwl_sensitivity_data *data = NULL; 310 struct il_sensitivity_data *data = NULL;
318 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 311 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
319 312
320 data = &(priv->sensitivity_data); 313 data = &(il->sensitivity_data);
321 314
322 /* If we got too many false alarms this time, reduce sensitivity */ 315 /* If we got too many false alarms this time, reduce sensitivity */
323 if (false_alarms > max_false_alarms) { 316 if (false_alarms > max_false_alarms) {
324 317
325 IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", 318 D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
326 false_alarms, max_false_alarms); 319 max_false_alarms);
327 320
328 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; 321 val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
329 data->auto_corr_ofdm = 322 data->auto_corr_ofdm =
330 min((u32)ranges->auto_corr_max_ofdm, val); 323 min((u32) ranges->auto_corr_max_ofdm, val);
331 324
332 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; 325 val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
333 data->auto_corr_ofdm_mrc = 326 data->auto_corr_ofdm_mrc =
334 min((u32)ranges->auto_corr_max_ofdm_mrc, val); 327 min((u32) ranges->auto_corr_max_ofdm_mrc, val);
335 328
336 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; 329 val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
337 data->auto_corr_ofdm_x1 = 330 data->auto_corr_ofdm_x1 =
338 min((u32)ranges->auto_corr_max_ofdm_x1, val); 331 min((u32) ranges->auto_corr_max_ofdm_x1, val);
339 332
340 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; 333 val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
341 data->auto_corr_ofdm_mrc_x1 = 334 data->auto_corr_ofdm_mrc_x1 =
342 min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); 335 min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
343 } 336 }
344 337
345 /* Else if we got fewer than desired, increase sensitivity */ 338 /* Else if we got fewer than desired, increase sensitivity */
346 else if (false_alarms < min_false_alarms) { 339 else if (false_alarms < min_false_alarms) {
347 340
348 IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", 341 D_CALIB("norm FA %u < min FA %u\n", false_alarms,
349 false_alarms, min_false_alarms); 342 min_false_alarms);
350 343
351 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; 344 val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
352 data->auto_corr_ofdm = 345 data->auto_corr_ofdm =
353 max((u32)ranges->auto_corr_min_ofdm, val); 346 max((u32) ranges->auto_corr_min_ofdm, val);
354 347
355 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; 348 val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
356 data->auto_corr_ofdm_mrc = 349 data->auto_corr_ofdm_mrc =
357 max((u32)ranges->auto_corr_min_ofdm_mrc, val); 350 max((u32) ranges->auto_corr_min_ofdm_mrc, val);
358 351
359 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; 352 val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
360 data->auto_corr_ofdm_x1 = 353 data->auto_corr_ofdm_x1 =
361 max((u32)ranges->auto_corr_min_ofdm_x1, val); 354 max((u32) ranges->auto_corr_min_ofdm_x1, val);
362 355
363 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; 356 val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
364 data->auto_corr_ofdm_mrc_x1 = 357 data->auto_corr_ofdm_mrc_x1 =
365 max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); 358 max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
366 } else { 359 } else {
367 IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", 360 D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
368 min_false_alarms, false_alarms, max_false_alarms); 361 min_false_alarms, false_alarms, max_false_alarms);
369 } 362 }
370 return 0; 363 return 0;
371} 364}
372 365
373static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, 366static void
374 struct iwl_sensitivity_data *data, 367il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
375 __le16 *tbl) 368 struct il_sensitivity_data *data,
369 __le16 *tbl)
376{ 370{
377 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = 371 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
378 cpu_to_le16((u16)data->auto_corr_ofdm); 372 cpu_to_le16((u16) data->auto_corr_ofdm);
379 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = 373 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
380 cpu_to_le16((u16)data->auto_corr_ofdm_mrc); 374 cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
381 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = 375 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
382 cpu_to_le16((u16)data->auto_corr_ofdm_x1); 376 cpu_to_le16((u16) data->auto_corr_ofdm_x1);
383 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = 377 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
384 cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); 378 cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
385 379
386 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = 380 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
387 cpu_to_le16((u16)data->auto_corr_cck); 381 cpu_to_le16((u16) data->auto_corr_cck);
388 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = 382 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
389 cpu_to_le16((u16)data->auto_corr_cck_mrc); 383 cpu_to_le16((u16) data->auto_corr_cck_mrc);
390 384
391 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = 385 tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
392 cpu_to_le16((u16)data->nrg_th_cck); 386 tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
393 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = 387
394 cpu_to_le16((u16)data->nrg_th_ofdm); 388 tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
395 389 cpu_to_le16(data->barker_corr_th_min);
396 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 390 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
397 cpu_to_le16(data->barker_corr_th_min); 391 cpu_to_le16(data->barker_corr_th_min_mrc);
398 tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 392 tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
399 cpu_to_le16(data->barker_corr_th_min_mrc); 393
400 tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = 394 D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
401 cpu_to_le16(data->nrg_th_cca); 395 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
402 396 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
403 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 397 data->nrg_th_ofdm);
404 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 398
405 data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, 399 D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
406 data->nrg_th_ofdm); 400 data->auto_corr_cck_mrc, data->nrg_th_cck);
407
408 IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
409 data->auto_corr_cck, data->auto_corr_cck_mrc,
410 data->nrg_th_cck);
411} 401}
412 402
413/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 403/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
414static int iwl4965_sensitivity_write(struct iwl_priv *priv) 404static int
405il4965_sensitivity_write(struct il_priv *il)
415{ 406{
416 struct iwl_sensitivity_cmd cmd; 407 struct il_sensitivity_cmd cmd;
417 struct iwl_sensitivity_data *data = NULL; 408 struct il_sensitivity_data *data = NULL;
418 struct iwl_host_cmd cmd_out = { 409 struct il_host_cmd cmd_out = {
419 .id = SENSITIVITY_CMD, 410 .id = C_SENSITIVITY,
420 .len = sizeof(struct iwl_sensitivity_cmd), 411 .len = sizeof(struct il_sensitivity_cmd),
421 .flags = CMD_ASYNC, 412 .flags = CMD_ASYNC,
422 .data = &cmd, 413 .data = &cmd,
423 }; 414 };
424 415
425 data = &(priv->sensitivity_data); 416 data = &(il->sensitivity_data);
426 417
427 memset(&cmd, 0, sizeof(cmd)); 418 memset(&cmd, 0, sizeof(cmd));
428 419
429 iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); 420 il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
430 421
431 /* Update uCode's "work" table, and copy it to DSP */ 422 /* Update uCode's "work" table, and copy it to DSP */
432 cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; 423 cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
433 424
434 /* Don't send command to uCode if nothing has changed */ 425 /* Don't send command to uCode if nothing has changed */
435 if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), 426 if (!memcmp
436 sizeof(u16)*HD_TABLE_SIZE)) { 427 (&cmd.table[0], &(il->sensitivity_tbl[0]),
437 IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); 428 sizeof(u16) * HD_TBL_SIZE)) {
429 D_CALIB("No change in C_SENSITIVITY\n");
438 return 0; 430 return 0;
439 } 431 }
440 432
441 /* Copy table for comparison next time */ 433 /* Copy table for comparison next time */
442 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 434 memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
443 sizeof(u16)*HD_TABLE_SIZE); 435 sizeof(u16) * HD_TBL_SIZE);
444 436
445 return iwl_legacy_send_cmd(priv, &cmd_out); 437 return il_send_cmd(il, &cmd_out);
446} 438}
447 439
448void iwl4965_init_sensitivity(struct iwl_priv *priv) 440void
441il4965_init_sensitivity(struct il_priv *il)
449{ 442{
450 int ret = 0; 443 int ret = 0;
451 int i; 444 int i;
452 struct iwl_sensitivity_data *data = NULL; 445 struct il_sensitivity_data *data = NULL;
453 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 446 const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
454 447
455 if (priv->disable_sens_cal) 448 if (il->disable_sens_cal)
456 return; 449 return;
457 450
458 IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); 451 D_CALIB("Start il4965_init_sensitivity\n");
459 452
460 /* Clear driver's sensitivity algo data */ 453 /* Clear driver's sensitivity algo data */
461 data = &(priv->sensitivity_data); 454 data = &(il->sensitivity_data);
462 455
463 if (ranges == NULL) 456 if (ranges == NULL)
464 return; 457 return;
465 458
466 memset(data, 0, sizeof(struct iwl_sensitivity_data)); 459 memset(data, 0, sizeof(struct il_sensitivity_data));
467 460
468 data->num_in_cck_no_fa = 0; 461 data->num_in_cck_no_fa = 0;
469 data->nrg_curr_state = IWL_FA_TOO_MANY; 462 data->nrg_curr_state = IL_FA_TOO_MANY;
470 data->nrg_prev_state = IWL_FA_TOO_MANY; 463 data->nrg_prev_state = IL_FA_TOO_MANY;
471 data->nrg_silence_ref = 0; 464 data->nrg_silence_ref = 0;
472 data->nrg_silence_idx = 0; 465 data->nrg_silence_idx = 0;
473 data->nrg_energy_idx = 0; 466 data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
478 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 471 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
479 data->nrg_silence_rssi[i] = 0; 472 data->nrg_silence_rssi[i] = 0;
480 473
481 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; 474 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
482 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; 475 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
483 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; 476 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
484 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; 477 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
485 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; 478 data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
486 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; 479 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
495 data->last_bad_plcp_cnt_cck = 0; 488 data->last_bad_plcp_cnt_cck = 0;
496 data->last_fa_cnt_cck = 0; 489 data->last_fa_cnt_cck = 0;
497 490
498 ret |= iwl4965_sensitivity_write(priv); 491 ret |= il4965_sensitivity_write(il);
499 IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); 492 D_CALIB("<<return 0x%X\n", ret);
500} 493}
501 494
502void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) 495void
496il4965_sensitivity_calibration(struct il_priv *il, void *resp)
503{ 497{
504 u32 rx_enable_time; 498 u32 rx_enable_time;
505 u32 fa_cck; 499 u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
508 u32 bad_plcp_ofdm; 502 u32 bad_plcp_ofdm;
509 u32 norm_fa_ofdm; 503 u32 norm_fa_ofdm;
510 u32 norm_fa_cck; 504 u32 norm_fa_cck;
511 struct iwl_sensitivity_data *data = NULL; 505 struct il_sensitivity_data *data = NULL;
512 struct statistics_rx_non_phy *rx_info; 506 struct stats_rx_non_phy *rx_info;
513 struct statistics_rx_phy *ofdm, *cck; 507 struct stats_rx_phy *ofdm, *cck;
514 unsigned long flags; 508 unsigned long flags;
515 struct statistics_general_data statis; 509 struct stats_general_data statis;
516 510
517 if (priv->disable_sens_cal) 511 if (il->disable_sens_cal)
518 return; 512 return;
519 513
520 data = &(priv->sensitivity_data); 514 data = &(il->sensitivity_data);
521 515
522 if (!iwl_legacy_is_any_associated(priv)) { 516 if (!il_is_any_associated(il)) {
523 IWL_DEBUG_CALIB(priv, "<< - not associated\n"); 517 D_CALIB("<< - not associated\n");
524 return; 518 return;
525 } 519 }
526 520
527 spin_lock_irqsave(&priv->lock, flags); 521 spin_lock_irqsave(&il->lock, flags);
528 522
529 rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); 523 rx_info = &(((struct il_notif_stats *)resp)->rx.general);
530 ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); 524 ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
531 cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); 525 cck = &(((struct il_notif_stats *)resp)->rx.cck);
532 526
533 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 527 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
534 IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); 528 D_CALIB("<< invalid data.\n");
535 spin_unlock_irqrestore(&priv->lock, flags); 529 spin_unlock_irqrestore(&il->lock, flags);
536 return; 530 return;
537 } 531 }
538 532
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
544 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); 538 bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
545 539
546 statis.beacon_silence_rssi_a = 540 statis.beacon_silence_rssi_a =
547 le32_to_cpu(rx_info->beacon_silence_rssi_a); 541 le32_to_cpu(rx_info->beacon_silence_rssi_a);
548 statis.beacon_silence_rssi_b = 542 statis.beacon_silence_rssi_b =
549 le32_to_cpu(rx_info->beacon_silence_rssi_b); 543 le32_to_cpu(rx_info->beacon_silence_rssi_b);
550 statis.beacon_silence_rssi_c = 544 statis.beacon_silence_rssi_c =
551 le32_to_cpu(rx_info->beacon_silence_rssi_c); 545 le32_to_cpu(rx_info->beacon_silence_rssi_c);
552 statis.beacon_energy_a = 546 statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
553 le32_to_cpu(rx_info->beacon_energy_a); 547 statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
554 statis.beacon_energy_b = 548 statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
555 le32_to_cpu(rx_info->beacon_energy_b);
556 statis.beacon_energy_c =
557 le32_to_cpu(rx_info->beacon_energy_c);
558 549
559 spin_unlock_irqrestore(&priv->lock, flags); 550 spin_unlock_irqrestore(&il->lock, flags);
560 551
561 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); 552 D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
562 553
563 if (!rx_enable_time) { 554 if (!rx_enable_time) {
564 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); 555 D_CALIB("<< RX Enable Time == 0!\n");
565 return; 556 return;
566 } 557 }
567 558
568 /* These statistics increase monotonically, and do not reset 559 /* These stats increase monotonically, and do not reset
569 * at each beacon. Calculate difference from last value, or just 560 * at each beacon. Calculate difference from last value, or just
570 * use the new statistics value if it has reset or wrapped around. */ 561 * use the new stats value if it has reset or wrapped around. */
571 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) 562 if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
572 data->last_bad_plcp_cnt_cck = bad_plcp_cck; 563 data->last_bad_plcp_cnt_cck = bad_plcp_cck;
573 else { 564 else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
600 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; 591 norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
601 norm_fa_cck = fa_cck + bad_plcp_cck; 592 norm_fa_cck = fa_cck + bad_plcp_cck;
602 593
603 IWL_DEBUG_CALIB(priv, 594 D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
604 "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, 595 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
605 bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
606 596
607 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 597 il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
608 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); 598 il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
609 599
610 iwl4965_sensitivity_write(priv); 600 il4965_sensitivity_write(il);
611} 601}
612 602
613static inline u8 iwl4965_find_first_chain(u8 mask) 603static inline u8
604il4965_find_first_chain(u8 mask)
614{ 605{
615 if (mask & ANT_A) 606 if (mask & ANT_A)
616 return CHAIN_A; 607 return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
624 * disconnected. 615 * disconnected.
625 */ 616 */
626static void 617static void
627iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, 618il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
628 struct iwl_chain_noise_data *data) 619 struct il_chain_noise_data *data)
629{ 620{
630 u32 active_chains = 0; 621 u32 active_chains = 0;
631 u32 max_average_sig; 622 u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
634 u8 first_chain; 625 u8 first_chain;
635 u16 i = 0; 626 u16 i = 0;
636 627
637 average_sig[0] = data->chain_signal_a / 628 average_sig[0] =
638 priv->cfg->base_params->chain_noise_num_beacons; 629 data->chain_signal_a /
639 average_sig[1] = data->chain_signal_b / 630 il->cfg->base_params->chain_noise_num_beacons;
640 priv->cfg->base_params->chain_noise_num_beacons; 631 average_sig[1] =
641 average_sig[2] = data->chain_signal_c / 632 data->chain_signal_b /
642 priv->cfg->base_params->chain_noise_num_beacons; 633 il->cfg->base_params->chain_noise_num_beacons;
634 average_sig[2] =
635 data->chain_signal_c /
636 il->cfg->base_params->chain_noise_num_beacons;
643 637
644 if (average_sig[0] >= average_sig[1]) { 638 if (average_sig[0] >= average_sig[1]) {
645 max_average_sig = average_sig[0]; 639 max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
657 active_chains = (1 << max_average_sig_antenna_i); 651 active_chains = (1 << max_average_sig_antenna_i);
658 } 652 }
659 653
660 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", 654 D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
661 average_sig[0], average_sig[1], average_sig[2]); 655 average_sig[2]);
662 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", 656 D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
663 max_average_sig, max_average_sig_antenna_i); 657 max_average_sig_antenna_i);
664 658
665 /* Compare signal strengths for all 3 receivers. */ 659 /* Compare signal strengths for all 3 receivers. */
666 for (i = 0; i < NUM_RX_CHAINS; i++) { 660 for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
673 data->disconn_array[i] = 1; 667 data->disconn_array[i] = 1;
674 else 668 else
675 active_chains |= (1 << i); 669 active_chains |= (1 << i);
676 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " 670 D_CALIB("i = %d rssiDelta = %d "
677 "disconn_array[i] = %d\n", 671 "disconn_array[i] = %d\n", i, rssi_delta,
678 i, rssi_delta, data->disconn_array[i]); 672 data->disconn_array[i]);
679 } 673 }
680 } 674 }
681 675
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
689 * To be safe, simply mask out any chains that we know 683 * To be safe, simply mask out any chains that we know
690 * are not on the device. 684 * are not on the device.
691 */ 685 */
692 active_chains &= priv->hw_params.valid_rx_ant; 686 active_chains &= il->hw_params.valid_rx_ant;
693 687
694 num_tx_chains = 0; 688 num_tx_chains = 0;
695 for (i = 0; i < NUM_RX_CHAINS; i++) { 689 for (i = 0; i < NUM_RX_CHAINS; i++) {
696 /* loops on all the bits of 690 /* loops on all the bits of
697 * priv->hw_setting.valid_tx_ant */ 691 * il->hw_setting.valid_tx_ant */
698 u8 ant_msk = (1 << i); 692 u8 ant_msk = (1 << i);
699 if (!(priv->hw_params.valid_tx_ant & ant_msk)) 693 if (!(il->hw_params.valid_tx_ant & ant_msk))
700 continue; 694 continue;
701 695
702 num_tx_chains++; 696 num_tx_chains++;
703 if (data->disconn_array[i] == 0) 697 if (data->disconn_array[i] == 0)
704 /* there is a Tx antenna connected */ 698 /* there is a Tx antenna connected */
705 break; 699 break;
706 if (num_tx_chains == priv->hw_params.tx_chains_num && 700 if (num_tx_chains == il->hw_params.tx_chains_num &&
707 data->disconn_array[i]) { 701 data->disconn_array[i]) {
708 /* 702 /*
709 * If all chains are disconnected 703 * If all chains are disconnected
710 * connect the first valid tx chain 704 * connect the first valid tx chain
711 */ 705 */
712 first_chain = 706 first_chain =
713 iwl4965_find_first_chain(priv->cfg->valid_tx_ant); 707 il4965_find_first_chain(il->cfg->valid_tx_ant);
714 data->disconn_array[first_chain] = 0; 708 data->disconn_array[first_chain] = 0;
715 active_chains |= BIT(first_chain); 709 active_chains |= BIT(first_chain);
716 IWL_DEBUG_CALIB(priv, 710 D_CALIB("All Tx chains are disconnected"
717 "All Tx chains are disconnected W/A - declare %d as connected\n", 711 "- declare %d as connected\n", first_chain);
718 first_chain);
719 break; 712 break;
720 } 713 }
721 } 714 }
722 715
723 if (active_chains != priv->hw_params.valid_rx_ant && 716 if (active_chains != il->hw_params.valid_rx_ant &&
724 active_chains != priv->chain_noise_data.active_chains) 717 active_chains != il->chain_noise_data.active_chains)
725 IWL_DEBUG_CALIB(priv, 718 D_CALIB("Detected that not all antennas are connected! "
726 "Detected that not all antennas are connected! " 719 "Connected: %#x, valid: %#x.\n", active_chains,
727 "Connected: %#x, valid: %#x.\n", 720 il->hw_params.valid_rx_ant);
728 active_chains, priv->hw_params.valid_rx_ant);
729 721
730 /* Save for use within RXON, TX, SCAN commands, etc. */ 722 /* Save for use within RXON, TX, SCAN commands, etc. */
731 data->active_chains = active_chains; 723 data->active_chains = active_chains;
732 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", 724 D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
733 active_chains);
734} 725}
735 726
736static void iwl4965_gain_computation(struct iwl_priv *priv, 727static void
737 u32 *average_noise, 728il4965_gain_computation(struct il_priv *il, u32 * average_noise,
738 u16 min_average_noise_antenna_i, 729 u16 min_average_noise_antenna_i, u32 min_average_noise,
739 u32 min_average_noise, 730 u8 default_chain)
740 u8 default_chain)
741{ 731{
742 int i, ret; 732 int i, ret;
743 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 733 struct il_chain_noise_data *data = &il->chain_noise_data;
744 734
745 data->delta_gain_code[min_average_noise_antenna_i] = 0; 735 data->delta_gain_code[min_average_noise_antenna_i] = 0;
746 736
747 for (i = default_chain; i < NUM_RX_CHAINS; i++) { 737 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
748 s32 delta_g = 0; 738 s32 delta_g = 0;
749 739
750 if (!(data->disconn_array[i]) && 740 if (!data->disconn_array[i] &&
751 (data->delta_gain_code[i] == 741 data->delta_gain_code[i] ==
752 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { 742 CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
753 delta_g = average_noise[i] - min_average_noise; 743 delta_g = average_noise[i] - min_average_noise;
754 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); 744 data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
755 data->delta_gain_code[i] = 745 data->delta_gain_code[i] =
756 min(data->delta_gain_code[i], 746 min(data->delta_gain_code[i],
757 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); 747 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
758 748
759 data->delta_gain_code[i] = 749 data->delta_gain_code[i] =
760 (data->delta_gain_code[i] | (1 << 2)); 750 (data->delta_gain_code[i] | (1 << 2));
761 } else { 751 } else {
762 data->delta_gain_code[i] = 0; 752 data->delta_gain_code[i] = 0;
763 } 753 }
764 } 754 }
765 IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", 755 D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
766 data->delta_gain_code[0], 756 data->delta_gain_code[1], data->delta_gain_code[2]);
767 data->delta_gain_code[1],
768 data->delta_gain_code[2]);
769 757
770 /* Differential gain gets sent to uCode only once */ 758 /* Differential gain gets sent to uCode only once */
771 if (!data->radio_write) { 759 if (!data->radio_write) {
772 struct iwl_calib_diff_gain_cmd cmd; 760 struct il_calib_diff_gain_cmd cmd;
773 data->radio_write = 1; 761 data->radio_write = 1;
774 762
775 memset(&cmd, 0, sizeof(cmd)); 763 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; 764 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
777 cmd.diff_gain_a = data->delta_gain_code[0]; 765 cmd.diff_gain_a = data->delta_gain_code[0];
778 cmd.diff_gain_b = data->delta_gain_code[1]; 766 cmd.diff_gain_b = data->delta_gain_code[1];
779 cmd.diff_gain_c = data->delta_gain_code[2]; 767 cmd.diff_gain_c = data->delta_gain_code[2];
780 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 768 ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
781 sizeof(cmd), &cmd);
782 if (ret) 769 if (ret)
783 IWL_DEBUG_CALIB(priv, "fail sending cmd " 770 D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
784 "REPLY_PHY_CALIBRATION_CMD\n");
785 771
786 /* TODO we might want recalculate 772 /* TODO we might want recalculate
787 * rx_chain in rxon cmd */ 773 * rx_chain in rxon cmd */
788 774
789 /* Mark so we run this algo only once! */ 775 /* Mark so we run this algo only once! */
790 data->state = IWL_CHAIN_NOISE_CALIBRATED; 776 data->state = IL_CHAIN_NOISE_CALIBRATED;
791 } 777 }
792} 778}
793 779
794
795
796/* 780/*
797 * Accumulate 16 beacons of signal and noise statistics for each of 781 * Accumulate 16 beacons of signal and noise stats for each of
798 * 3 receivers/antennas/rx-chains, then figure out: 782 * 3 receivers/antennas/rx-chains, then figure out:
799 * 1) Which antennas are connected. 783 * 1) Which antennas are connected.
800 * 2) Differential rx gain settings to balance the 3 receivers. 784 * 2) Differential rx gain settings to balance the 3 receivers.
801 */ 785 */
802void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) 786void
787il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
803{ 788{
804 struct iwl_chain_noise_data *data = NULL; 789 struct il_chain_noise_data *data = NULL;
805 790
806 u32 chain_noise_a; 791 u32 chain_noise_a;
807 u32 chain_noise_b; 792 u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
809 u32 chain_sig_a; 794 u32 chain_sig_a;
810 u32 chain_sig_b; 795 u32 chain_sig_b;
811 u32 chain_sig_c; 796 u32 chain_sig_c;
812 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 797 u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
813 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 798 u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
814 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; 799 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
815 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; 800 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
816 u16 i = 0; 801 u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
819 u8 rxon_band24; 804 u8 rxon_band24;
820 u8 stat_band24; 805 u8 stat_band24;
821 unsigned long flags; 806 unsigned long flags;
822 struct statistics_rx_non_phy *rx_info; 807 struct stats_rx_non_phy *rx_info;
823 808
824 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 809 struct il_rxon_context *ctx = &il->ctx;
825 810
826 if (priv->disable_chain_noise_cal) 811 if (il->disable_chain_noise_cal)
827 return; 812 return;
828 813
829 data = &(priv->chain_noise_data); 814 data = &(il->chain_noise_data);
830 815
831 /* 816 /*
832 * Accumulate just the first "chain_noise_num_beacons" after 817 * Accumulate just the first "chain_noise_num_beacons" after
833 * the first association, then we're done forever. 818 * the first association, then we're done forever.
834 */ 819 */
835 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { 820 if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
836 if (data->state == IWL_CHAIN_NOISE_ALIVE) 821 if (data->state == IL_CHAIN_NOISE_ALIVE)
837 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); 822 D_CALIB("Wait for noise calib reset\n");
838 return; 823 return;
839 } 824 }
840 825
841 spin_lock_irqsave(&priv->lock, flags); 826 spin_lock_irqsave(&il->lock, flags);
842 827
843 rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> 828 rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
844 rx.general);
845 829
846 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 830 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
847 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); 831 D_CALIB(" << Interference data unavailable\n");
848 spin_unlock_irqrestore(&priv->lock, flags); 832 spin_unlock_irqrestore(&il->lock, flags);
849 return; 833 return;
850 } 834 }
851 835
852 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); 836 rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
853 rxon_chnum = le16_to_cpu(ctx->staging.channel); 837 rxon_chnum = le16_to_cpu(ctx->staging.channel);
854 838
855 stat_band24 = !!(((struct iwl_notif_statistics *) 839 stat_band24 =
856 stat_resp)->flag & 840 !!(((struct il_notif_stats *)stat_resp)->
857 STATISTICS_REPLY_FLG_BAND_24G_MSK); 841 flag & STATS_REPLY_FLG_BAND_24G_MSK);
858 stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) 842 stat_chnum =
859 stat_resp)->flag) >> 16; 843 le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
860 844
861 /* Make sure we accumulate data for just the associated channel 845 /* Make sure we accumulate data for just the associated channel
862 * (even if scanning). */ 846 * (even if scanning). */
863 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { 847 if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
864 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", 848 D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
865 rxon_chnum, rxon_band24); 849 rxon_band24);
866 spin_unlock_irqrestore(&priv->lock, flags); 850 spin_unlock_irqrestore(&il->lock, flags);
867 return; 851 return;
868 } 852 }
869 853
870 /* 854 /*
871 * Accumulate beacon statistics values across 855 * Accumulate beacon stats values across
872 * "chain_noise_num_beacons" 856 * "chain_noise_num_beacons"
873 */ 857 */
874 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & 858 chain_noise_a =
875 IN_BAND_FILTER; 859 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
876 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & 860 chain_noise_b =
877 IN_BAND_FILTER; 861 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
878 chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & 862 chain_noise_c =
879 IN_BAND_FILTER; 863 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
880 864
881 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; 865 chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
882 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; 866 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
883 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; 867 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
884 868
885 spin_unlock_irqrestore(&priv->lock, flags); 869 spin_unlock_irqrestore(&il->lock, flags);
886 870
887 data->beacon_count++; 871 data->beacon_count++;
888 872
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
894 data->chain_signal_b = (chain_sig_b + data->chain_signal_b); 878 data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
895 data->chain_signal_c = (chain_sig_c + data->chain_signal_c); 879 data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
896 880
897 IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", 881 D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
898 rxon_chnum, rxon_band24, data->beacon_count); 882 data->beacon_count);
899 IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", 883 D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
900 chain_sig_a, chain_sig_b, chain_sig_c); 884 chain_sig_c);
901 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", 885 D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
902 chain_noise_a, chain_noise_b, chain_noise_c); 886 chain_noise_c);
903 887
904 /* If this is the "chain_noise_num_beacons", determine: 888 /* If this is the "chain_noise_num_beacons", determine:
905 * 1) Disconnected antennas (using signal strengths) 889 * 1) Disconnected antennas (using signal strengths)
906 * 2) Differential gain (using silence noise) to balance receivers */ 890 * 2) Differential gain (using silence noise) to balance receivers */
907 if (data->beacon_count != 891 if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
908 priv->cfg->base_params->chain_noise_num_beacons)
909 return; 892 return;
910 893
911 /* Analyze signal for disconnected antenna */ 894 /* Analyze signal for disconnected antenna */
912 iwl4965_find_disconn_antenna(priv, average_sig, data); 895 il4965_find_disconn_antenna(il, average_sig, data);
913 896
914 /* Analyze noise for rx balance */ 897 /* Analyze noise for rx balance */
915 average_noise[0] = data->chain_noise_a / 898 average_noise[0] =
916 priv->cfg->base_params->chain_noise_num_beacons; 899 data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
917 average_noise[1] = data->chain_noise_b / 900 average_noise[1] =
918 priv->cfg->base_params->chain_noise_num_beacons; 901 data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
919 average_noise[2] = data->chain_noise_c / 902 average_noise[2] =
920 priv->cfg->base_params->chain_noise_num_beacons; 903 data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
921 904
922 for (i = 0; i < NUM_RX_CHAINS; i++) { 905 for (i = 0; i < NUM_RX_CHAINS; i++) {
923 if (!(data->disconn_array[i]) && 906 if (!data->disconn_array[i] &&
924 (average_noise[i] <= min_average_noise)) { 907 average_noise[i] <= min_average_noise) {
925 /* This means that chain i is active and has 908 /* This means that chain i is active and has
926 * lower noise values so far: */ 909 * lower noise values so far: */
927 min_average_noise = average_noise[i]; 910 min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
929 } 912 }
930 } 913 }
931 914
932 IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", 915 D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
933 average_noise[0], average_noise[1], 916 average_noise[1], average_noise[2]);
934 average_noise[2]);
935 917
936 IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", 918 D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
937 min_average_noise, min_average_noise_antenna_i); 919 min_average_noise_antenna_i);
938 920
939 iwl4965_gain_computation(priv, average_noise, 921 il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
940 min_average_noise_antenna_i, min_average_noise, 922 min_average_noise,
941 iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); 923 il4965_find_first_chain(il->cfg->valid_rx_ant));
942 924
943 /* Some power changes may have been made during the calibration. 925 /* Some power changes may have been made during the calibration.
944 * Update and commit the RXON 926 * Update and commit the RXON
945 */ 927 */
946 if (priv->cfg->ops->lib->update_chain_flags) 928 if (il->cfg->ops->lib->update_chain_flags)
947 priv->cfg->ops->lib->update_chain_flags(priv); 929 il->cfg->ops->lib->update_chain_flags(il);
948 930
949 data->state = IWL_CHAIN_NOISE_DONE; 931 data->state = IL_CHAIN_NOISE_DONE;
950 iwl_legacy_power_update_mode(priv, false); 932 il_power_update_mode(il, false);
951} 933}
952 934
953void iwl4965_reset_run_time_calib(struct iwl_priv *priv) 935void
936il4965_reset_run_time_calib(struct il_priv *il)
954{ 937{
955 int i; 938 int i;
956 memset(&(priv->sensitivity_data), 0, 939 memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
957 sizeof(struct iwl_sensitivity_data)); 940 memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
958 memset(&(priv->chain_noise_data), 0,
959 sizeof(struct iwl_chain_noise_data));
960 for (i = 0; i < NUM_RX_CHAINS; i++) 941 for (i = 0; i < NUM_RX_CHAINS; i++)
961 priv->chain_noise_data.delta_gain_code[i] = 942 il->chain_noise_data.delta_gain_code[i] =
962 CHAIN_NOISE_DELTA_GAIN_INIT_VAL; 943 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
963 944
964 /* Ask for statistics now, the uCode will send notification 945 /* Ask for stats now, the uCode will send notification
965 * periodically after association */ 946 * periodically after association */
966 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); 947 il_send_stats_request(il, CMD_ASYNC, true);
967} 948}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 000000000000..98ec39f56ba3
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "common.h"
29#include "4965.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int
37il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
38{
39 int p = 0;
40 u32 flag;
41
42 flag = le32_to_cpu(il->_4965.stats.flag);
43
44 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
45 if (flag & UCODE_STATS_CLEAR_MSK)
46 p += scnprintf(buf + p, bufsz - p,
47 "\tStatistics have been cleared\n");
48 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
49 (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
50 "5.2 GHz");
51 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
52 (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
53 "disabled");
54
55 return p;
56}
57
58ssize_t
59il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
60 size_t count, loff_t *ppos)
61{
62 struct il_priv *il = file->private_data;
63 int pos = 0;
64 char *buf;
65 int bufsz =
66 sizeof(struct stats_rx_phy) * 40 +
67 sizeof(struct stats_rx_non_phy) * 40 +
68 sizeof(struct stats_rx_ht_phy) * 40 + 400;
69 ssize_t ret;
70 struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
71 struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
72 struct stats_rx_non_phy *general, *accum_general;
73 struct stats_rx_non_phy *delta_general, *max_general;
74 struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
75
76 if (!il_is_alive(il))
77 return -EAGAIN;
78
79 buf = kzalloc(bufsz, GFP_KERNEL);
80 if (!buf) {
81 IL_ERR("Can not allocate Buffer\n");
82 return -ENOMEM;
83 }
84
85 /*
86 * the statistic information display here is based on
87 * the last stats notification from uCode
88 * might not reflect the current uCode activity
89 */
90 ofdm = &il->_4965.stats.rx.ofdm;
91 cck = &il->_4965.stats.rx.cck;
92 general = &il->_4965.stats.rx.general;
93 ht = &il->_4965.stats.rx.ofdm_ht;
94 accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
95 accum_cck = &il->_4965.accum_stats.rx.cck;
96 accum_general = &il->_4965.accum_stats.rx.general;
97 accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
98 delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
99 delta_cck = &il->_4965.delta_stats.rx.cck;
100 delta_general = &il->_4965.delta_stats.rx.general;
101 delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
102 max_ofdm = &il->_4965.max_delta.rx.ofdm;
103 max_cck = &il->_4965.max_delta.rx.cck;
104 max_general = &il->_4965.max_delta.rx.general;
105 max_ht = &il->_4965.max_delta.rx.ofdm_ht;
106
107 pos += il4965_stats_flag(il, buf, bufsz);
108 pos +=
109 scnprintf(buf + pos, bufsz - pos, fmt_header,
110 "Statistics_Rx - OFDM:");
111 pos +=
112 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
113 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
114 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
115 pos +=
116 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
117 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
118 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
119 pos +=
120 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
121 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
122 delta_ofdm->plcp_err, max_ofdm->plcp_err);
123 pos +=
124 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
125 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
126 delta_ofdm->crc32_err, max_ofdm->crc32_err);
127 pos +=
128 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
129 le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
130 delta_ofdm->overrun_err, max_ofdm->overrun_err);
131 pos +=
132 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
133 le32_to_cpu(ofdm->early_overrun_err),
134 accum_ofdm->early_overrun_err,
135 delta_ofdm->early_overrun_err,
136 max_ofdm->early_overrun_err);
137 pos +=
138 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
139 le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
140 delta_ofdm->crc32_good, max_ofdm->crc32_good);
141 pos +=
142 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
143 le32_to_cpu(ofdm->false_alarm_cnt),
144 accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos +=
147 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos +=
153 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
155 delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
156 pos +=
157 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
159 delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
160 pos +=
161 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
162 le32_to_cpu(ofdm->unresponded_rts),
163 accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
164 max_ofdm->unresponded_rts);
165 pos +=
166 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
167 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
168 accum_ofdm->rxe_frame_limit_overrun,
169 delta_ofdm->rxe_frame_limit_overrun,
170 max_ofdm->rxe_frame_limit_overrun);
171 pos +=
172 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
173 le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
174 delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
175 pos +=
176 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
177 le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
178 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
179 pos +=
180 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
181 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
182 accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
183 max_ofdm->sent_ba_rsp_cnt);
184 pos +=
185 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
186 le32_to_cpu(ofdm->dsp_self_kill),
187 accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
188 max_ofdm->dsp_self_kill);
189 pos +=
190 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
191 le32_to_cpu(ofdm->mh_format_err),
192 accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
193 max_ofdm->mh_format_err);
194 pos +=
195 scnprintf(buf + pos, bufsz - pos, fmt_table,
196 "re_acq_main_rssi_sum:",
197 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
198 accum_ofdm->re_acq_main_rssi_sum,
199 delta_ofdm->re_acq_main_rssi_sum,
200 max_ofdm->re_acq_main_rssi_sum);
201
202 pos +=
203 scnprintf(buf + pos, bufsz - pos, fmt_header,
204 "Statistics_Rx - CCK:");
205 pos +=
206 scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
207 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
208 delta_cck->ina_cnt, max_cck->ina_cnt);
209 pos +=
210 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
211 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
212 delta_cck->fina_cnt, max_cck->fina_cnt);
213 pos +=
214 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
215 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
216 delta_cck->plcp_err, max_cck->plcp_err);
217 pos +=
218 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
219 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
220 delta_cck->crc32_err, max_cck->crc32_err);
221 pos +=
222 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
223 le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
224 delta_cck->overrun_err, max_cck->overrun_err);
225 pos +=
226 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
227 le32_to_cpu(cck->early_overrun_err),
228 accum_cck->early_overrun_err,
229 delta_cck->early_overrun_err, max_cck->early_overrun_err);
230 pos +=
231 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
232 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
233 delta_cck->crc32_good, max_cck->crc32_good);
234 pos +=
235 scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
236 le32_to_cpu(cck->false_alarm_cnt),
237 accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
238 max_cck->false_alarm_cnt);
239 pos +=
240 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
244 pos +=
245 scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
246 le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
247 delta_cck->sfd_timeout, max_cck->sfd_timeout);
248 pos +=
249 scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
250 le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
251 delta_cck->fina_timeout, max_cck->fina_timeout);
252 pos +=
253 scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
254 le32_to_cpu(cck->unresponded_rts),
255 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
256 max_cck->unresponded_rts);
257 pos +=
258 scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
259 le32_to_cpu(cck->rxe_frame_limit_overrun),
260 accum_cck->rxe_frame_limit_overrun,
261 delta_cck->rxe_frame_limit_overrun,
262 max_cck->rxe_frame_limit_overrun);
263 pos +=
264 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
265 le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
266 delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
267 pos +=
268 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
269 le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
270 delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
271 pos +=
272 scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
273 le32_to_cpu(cck->sent_ba_rsp_cnt),
274 accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
275 max_cck->sent_ba_rsp_cnt);
276 pos +=
277 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
278 le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
279 delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
280 pos +=
281 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
282 le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
283 delta_cck->mh_format_err, max_cck->mh_format_err);
284 pos +=
285 scnprintf(buf + pos, bufsz - pos, fmt_table,
286 "re_acq_main_rssi_sum:",
287 le32_to_cpu(cck->re_acq_main_rssi_sum),
288 accum_cck->re_acq_main_rssi_sum,
289 delta_cck->re_acq_main_rssi_sum,
290 max_cck->re_acq_main_rssi_sum);
291
292 pos +=
293 scnprintf(buf + pos, bufsz - pos, fmt_header,
294 "Statistics_Rx - GENERAL:");
295 pos +=
296 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
297 le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
298 delta_general->bogus_cts, max_general->bogus_cts);
299 pos +=
300 scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
301 le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
302 delta_general->bogus_ack, max_general->bogus_ack);
303 pos +=
304 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
305 le32_to_cpu(general->non_bssid_frames),
306 accum_general->non_bssid_frames,
307 delta_general->non_bssid_frames,
308 max_general->non_bssid_frames);
309 pos +=
310 scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos +=
316 scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
317 le32_to_cpu(general->non_channel_beacons),
318 accum_general->non_channel_beacons,
319 delta_general->non_channel_beacons,
320 max_general->non_channel_beacons);
321 pos +=
322 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
323 le32_to_cpu(general->channel_beacons),
324 accum_general->channel_beacons,
325 delta_general->channel_beacons,
326 max_general->channel_beacons);
327 pos +=
328 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
329 le32_to_cpu(general->num_missed_bcon),
330 accum_general->num_missed_bcon,
331 delta_general->num_missed_bcon,
332 max_general->num_missed_bcon);
333 pos +=
334 scnprintf(buf + pos, bufsz - pos, fmt_table,
335 "adc_rx_saturation_time:",
336 le32_to_cpu(general->adc_rx_saturation_time),
337 accum_general->adc_rx_saturation_time,
338 delta_general->adc_rx_saturation_time,
339 max_general->adc_rx_saturation_time);
340 pos +=
341 scnprintf(buf + pos, bufsz - pos, fmt_table,
342 "ina_detect_search_tm:",
343 le32_to_cpu(general->ina_detection_search_time),
344 accum_general->ina_detection_search_time,
345 delta_general->ina_detection_search_time,
346 max_general->ina_detection_search_time);
347 pos +=
348 scnprintf(buf + pos, bufsz - pos, fmt_table,
349 "beacon_silence_rssi_a:",
350 le32_to_cpu(general->beacon_silence_rssi_a),
351 accum_general->beacon_silence_rssi_a,
352 delta_general->beacon_silence_rssi_a,
353 max_general->beacon_silence_rssi_a);
354 pos +=
355 scnprintf(buf + pos, bufsz - pos, fmt_table,
356 "beacon_silence_rssi_b:",
357 le32_to_cpu(general->beacon_silence_rssi_b),
358 accum_general->beacon_silence_rssi_b,
359 delta_general->beacon_silence_rssi_b,
360 max_general->beacon_silence_rssi_b);
361 pos +=
362 scnprintf(buf + pos, bufsz - pos, fmt_table,
363 "beacon_silence_rssi_c:",
364 le32_to_cpu(general->beacon_silence_rssi_c),
365 accum_general->beacon_silence_rssi_c,
366 delta_general->beacon_silence_rssi_c,
367 max_general->beacon_silence_rssi_c);
368 pos +=
369 scnprintf(buf + pos, bufsz - pos, fmt_table,
370 "interference_data_flag:",
371 le32_to_cpu(general->interference_data_flag),
372 accum_general->interference_data_flag,
373 delta_general->interference_data_flag,
374 max_general->interference_data_flag);
375 pos +=
376 scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
377 le32_to_cpu(general->channel_load),
378 accum_general->channel_load, delta_general->channel_load,
379 max_general->channel_load);
380 pos +=
381 scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
382 le32_to_cpu(general->dsp_false_alarms),
383 accum_general->dsp_false_alarms,
384 delta_general->dsp_false_alarms,
385 max_general->dsp_false_alarms);
386 pos +=
387 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
388 le32_to_cpu(general->beacon_rssi_a),
389 accum_general->beacon_rssi_a,
390 delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
391 pos +=
392 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
393 le32_to_cpu(general->beacon_rssi_b),
394 accum_general->beacon_rssi_b,
395 delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
396 pos +=
397 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
398 le32_to_cpu(general->beacon_rssi_c),
399 accum_general->beacon_rssi_c,
400 delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
401 pos +=
402 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
403 le32_to_cpu(general->beacon_energy_a),
404 accum_general->beacon_energy_a,
405 delta_general->beacon_energy_a,
406 max_general->beacon_energy_a);
407 pos +=
408 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
409 le32_to_cpu(general->beacon_energy_b),
410 accum_general->beacon_energy_b,
411 delta_general->beacon_energy_b,
412 max_general->beacon_energy_b);
413 pos +=
414 scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
415 le32_to_cpu(general->beacon_energy_c),
416 accum_general->beacon_energy_c,
417 delta_general->beacon_energy_c,
418 max_general->beacon_energy_c);
419
420 pos +=
421 scnprintf(buf + pos, bufsz - pos, fmt_header,
422 "Statistics_Rx - OFDM_HT:");
423 pos +=
424 scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
425 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
426 delta_ht->plcp_err, max_ht->plcp_err);
427 pos +=
428 scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
429 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
430 delta_ht->overrun_err, max_ht->overrun_err);
431 pos +=
432 scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
433 le32_to_cpu(ht->early_overrun_err),
434 accum_ht->early_overrun_err, delta_ht->early_overrun_err,
435 max_ht->early_overrun_err);
436 pos +=
437 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
438 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
439 delta_ht->crc32_good, max_ht->crc32_good);
440 pos +=
441 scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
442 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
443 delta_ht->crc32_err, max_ht->crc32_err);
444 pos +=
445 scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
446 le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
447 delta_ht->mh_format_err, max_ht->mh_format_err);
448 pos +=
449 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
450 le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
451 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
452 pos +=
453 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
454 le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
455 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
456 pos +=
457 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
458 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
459 delta_ht->agg_cnt, max_ht->agg_cnt);
460 pos +=
461 scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
462 le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
463 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
464
465 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
466 kfree(buf);
467 return ret;
468}
469
470ssize_t
471il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
472 size_t count, loff_t *ppos)
473{
474 struct il_priv *il = file->private_data;
475 int pos = 0;
476 char *buf;
477 int bufsz = (sizeof(struct stats_tx) * 48) + 250;
478 ssize_t ret;
479 struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
480
481 if (!il_is_alive(il))
482 return -EAGAIN;
483
484 buf = kzalloc(bufsz, GFP_KERNEL);
485 if (!buf) {
486 IL_ERR("Can not allocate Buffer\n");
487 return -ENOMEM;
488 }
489
490 /* the statistic information display here is based on
491 * the last stats notification from uCode
492 * might not reflect the current uCode activity
493 */
494 tx = &il->_4965.stats.tx;
495 accum_tx = &il->_4965.accum_stats.tx;
496 delta_tx = &il->_4965.delta_stats.tx;
497 max_tx = &il->_4965.max_delta.tx;
498
499 pos += il4965_stats_flag(il, buf, bufsz);
500 pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
501 pos +=
502 scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
503 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
504 delta_tx->preamble_cnt, max_tx->preamble_cnt);
505 pos +=
506 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
507 le32_to_cpu(tx->rx_detected_cnt),
508 accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
509 max_tx->rx_detected_cnt);
510 pos +=
511 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
512 le32_to_cpu(tx->bt_prio_defer_cnt),
513 accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
514 max_tx->bt_prio_defer_cnt);
515 pos +=
516 scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
517 le32_to_cpu(tx->bt_prio_kill_cnt),
518 accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
519 max_tx->bt_prio_kill_cnt);
520 pos +=
521 scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
522 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
523 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
524 pos +=
525 scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
526 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
527 delta_tx->cts_timeout, max_tx->cts_timeout);
528 pos +=
529 scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
530 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
531 delta_tx->ack_timeout, max_tx->ack_timeout);
532 pos +=
533 scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
534 le32_to_cpu(tx->expected_ack_cnt),
535 accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
536 max_tx->expected_ack_cnt);
537 pos +=
538 scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
539 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
540 delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
541 pos +=
542 scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
543 le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
544 delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
545 pos +=
546 scnprintf(buf + pos, bufsz - pos, fmt_table,
547 "abort_nxt_frame_mismatch:",
548 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
549 accum_tx->burst_abort_next_frame_mismatch_cnt,
550 delta_tx->burst_abort_next_frame_mismatch_cnt,
551 max_tx->burst_abort_next_frame_mismatch_cnt);
552 pos +=
553 scnprintf(buf + pos, bufsz - pos, fmt_table,
554 "abort_missing_nxt_frame:",
555 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
556 accum_tx->burst_abort_missing_next_frame_cnt,
557 delta_tx->burst_abort_missing_next_frame_cnt,
558 max_tx->burst_abort_missing_next_frame_cnt);
559 pos +=
560 scnprintf(buf + pos, bufsz - pos, fmt_table,
561 "cts_timeout_collision:",
562 le32_to_cpu(tx->cts_timeout_collision),
563 accum_tx->cts_timeout_collision,
564 delta_tx->cts_timeout_collision,
565 max_tx->cts_timeout_collision);
566 pos +=
567 scnprintf(buf + pos, bufsz - pos, fmt_table,
568 "ack_ba_timeout_collision:",
569 le32_to_cpu(tx->ack_or_ba_timeout_collision),
570 accum_tx->ack_or_ba_timeout_collision,
571 delta_tx->ack_or_ba_timeout_collision,
572 max_tx->ack_or_ba_timeout_collision);
573 pos +=
574 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
575 le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
576 delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
577 pos +=
578 scnprintf(buf + pos, bufsz - pos, fmt_table,
579 "agg ba_resched_frames:",
580 le32_to_cpu(tx->agg.ba_reschedule_frames),
581 accum_tx->agg.ba_reschedule_frames,
582 delta_tx->agg.ba_reschedule_frames,
583 max_tx->agg.ba_reschedule_frames);
584 pos +=
585 scnprintf(buf + pos, bufsz - pos, fmt_table,
586 "agg scd_query_agg_frame:",
587 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
588 accum_tx->agg.scd_query_agg_frame_cnt,
589 delta_tx->agg.scd_query_agg_frame_cnt,
590 max_tx->agg.scd_query_agg_frame_cnt);
591 pos +=
592 scnprintf(buf + pos, bufsz - pos, fmt_table,
593 "agg scd_query_no_agg:",
594 le32_to_cpu(tx->agg.scd_query_no_agg),
595 accum_tx->agg.scd_query_no_agg,
596 delta_tx->agg.scd_query_no_agg,
597 max_tx->agg.scd_query_no_agg);
598 pos +=
599 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
600 le32_to_cpu(tx->agg.scd_query_agg),
601 accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
602 max_tx->agg.scd_query_agg);
603 pos +=
604 scnprintf(buf + pos, bufsz - pos, fmt_table,
605 "agg scd_query_mismatch:",
606 le32_to_cpu(tx->agg.scd_query_mismatch),
607 accum_tx->agg.scd_query_mismatch,
608 delta_tx->agg.scd_query_mismatch,
609 max_tx->agg.scd_query_mismatch);
610 pos +=
611 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
612 le32_to_cpu(tx->agg.frame_not_ready),
613 accum_tx->agg.frame_not_ready,
614 delta_tx->agg.frame_not_ready,
615 max_tx->agg.frame_not_ready);
616 pos +=
617 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
618 le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
619 delta_tx->agg.underrun, max_tx->agg.underrun);
620 pos +=
621 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
622 le32_to_cpu(tx->agg.bt_prio_kill),
623 accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
624 max_tx->agg.bt_prio_kill);
625 pos +=
626 scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
627 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
628 accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
629 max_tx->agg.rx_ba_rsp_cnt);
630
631 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
632 kfree(buf);
633 return ret;
634}
635
636ssize_t
637il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
638 size_t count, loff_t *ppos)
639{
640 struct il_priv *il = file->private_data;
641 int pos = 0;
642 char *buf;
643 int bufsz = sizeof(struct stats_general) * 10 + 300;
644 ssize_t ret;
645 struct stats_general_common *general, *accum_general;
646 struct stats_general_common *delta_general, *max_general;
647 struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
648 struct stats_div *div, *accum_div, *delta_div, *max_div;
649
650 if (!il_is_alive(il))
651 return -EAGAIN;
652
653 buf = kzalloc(bufsz, GFP_KERNEL);
654 if (!buf) {
655 IL_ERR("Can not allocate Buffer\n");
656 return -ENOMEM;
657 }
658
659 /* the statistic information display here is based on
660 * the last stats notification from uCode
661 * might not reflect the current uCode activity
662 */
663 general = &il->_4965.stats.general.common;
664 dbg = &il->_4965.stats.general.common.dbg;
665 div = &il->_4965.stats.general.common.div;
666 accum_general = &il->_4965.accum_stats.general.common;
667 accum_dbg = &il->_4965.accum_stats.general.common.dbg;
668 accum_div = &il->_4965.accum_stats.general.common.div;
669 delta_general = &il->_4965.delta_stats.general.common;
670 max_general = &il->_4965.max_delta.general.common;
671 delta_dbg = &il->_4965.delta_stats.general.common.dbg;
672 max_dbg = &il->_4965.max_delta.general.common.dbg;
673 delta_div = &il->_4965.delta_stats.general.common.div;
674 max_div = &il->_4965.max_delta.general.common.div;
675
676 pos += il4965_stats_flag(il, buf, bufsz);
677 pos +=
678 scnprintf(buf + pos, bufsz - pos, fmt_header,
679 "Statistics_General:");
680 pos +=
681 scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
682 le32_to_cpu(general->temperature));
683 pos +=
684 scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
685 le32_to_cpu(general->ttl_timestamp));
686 pos +=
687 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
688 le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
689 delta_dbg->burst_check, max_dbg->burst_check);
690 pos +=
691 scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
692 le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
693 delta_dbg->burst_count, max_dbg->burst_count);
694 pos +=
695 scnprintf(buf + pos, bufsz - pos, fmt_table,
696 "wait_for_silence_timeout_count:",
697 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
698 accum_dbg->wait_for_silence_timeout_cnt,
699 delta_dbg->wait_for_silence_timeout_cnt,
700 max_dbg->wait_for_silence_timeout_cnt);
701 pos +=
702 scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
703 le32_to_cpu(general->sleep_time),
704 accum_general->sleep_time, delta_general->sleep_time,
705 max_general->sleep_time);
706 pos +=
707 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
708 le32_to_cpu(general->slots_out), accum_general->slots_out,
709 delta_general->slots_out, max_general->slots_out);
710 pos +=
711 scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
712 le32_to_cpu(general->slots_idle),
713 accum_general->slots_idle, delta_general->slots_idle,
714 max_general->slots_idle);
715 pos +=
716 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
717 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
718 delta_div->tx_on_a, max_div->tx_on_a);
719 pos +=
720 scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
721 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
722 delta_div->tx_on_b, max_div->tx_on_b);
723 pos +=
724 scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
725 le32_to_cpu(div->exec_time), accum_div->exec_time,
726 delta_div->exec_time, max_div->exec_time);
727 pos +=
728 scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
729 le32_to_cpu(div->probe_time), accum_div->probe_time,
730 delta_div->probe_time, max_div->probe_time);
731 pos +=
732 scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
733 le32_to_cpu(general->rx_enable_counter),
734 accum_general->rx_enable_counter,
735 delta_general->rx_enable_counter,
736 max_general->rx_enable_counter);
737 pos +=
738 scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
739 le32_to_cpu(general->num_of_sos_states),
740 accum_general->num_of_sos_states,
741 delta_general->num_of_sos_states,
742 max_general->num_of_sos_states);
743 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
744 kfree(buf);
745 return ret;
746}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 000000000000..4aaef4135564
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6536 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "common.h"
54#include "4965.h"
55
56/******************************************************************************
57 *
58 * module boiler plate
59 *
60 ******************************************************************************/
61
62/*
63 * module name, copyright, version, etc.
64 */
65#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
66
67#ifdef CONFIG_IWLEGACY_DEBUG
68#define VD "d"
69#else
70#define VD
71#endif
72
73#define DRV_VERSION IWLWIFI_VERSION VD
74
75MODULE_DESCRIPTION(DRV_DESCRIPTION);
76MODULE_VERSION(DRV_VERSION);
77MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
78MODULE_LICENSE("GPL");
79MODULE_ALIAS("iwl4965");
80
81void
82il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
83{
84 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
85 IL_ERR("Tx flush command to flush out all frames\n");
86 if (!test_bit(S_EXIT_PENDING, &il->status))
87 queue_work(il->workqueue, &il->tx_flush);
88 }
89}
90
91/*
92 * EEPROM
93 */
94struct il_mod_params il4965_mod_params = {
95 .amsdu_size_8K = 1,
96 .restart_fw = 1,
97 /* the rest are 0 by default */
98};
99
100void
101il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
102{
103 unsigned long flags;
104 int i;
105 spin_lock_irqsave(&rxq->lock, flags);
106 INIT_LIST_HEAD(&rxq->rx_free);
107 INIT_LIST_HEAD(&rxq->rx_used);
108 /* Fill the rx_used queue with _all_ of the Rx buffers */
109 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
110 /* In the reset function, these buffers may have been allocated
111 * to an SKB, so we need to unmap and free potential storage */
112 if (rxq->pool[i].page != NULL) {
113 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
114 PAGE_SIZE << il->hw_params.rx_page_order,
115 PCI_DMA_FROMDEVICE);
116 __il_free_pages(il, rxq->pool[i].page);
117 rxq->pool[i].page = NULL;
118 }
119 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
120 }
121
122 for (i = 0; i < RX_QUEUE_SIZE; i++)
123 rxq->queue[i] = NULL;
124
125 /* Set us so that we have processed and used all buffers, but have
126 * not restocked the Rx queue with fresh buffers */
127 rxq->read = rxq->write = 0;
128 rxq->write_actual = 0;
129 rxq->free_count = 0;
130 spin_unlock_irqrestore(&rxq->lock, flags);
131}
132
133int
134il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
135{
136 u32 rb_size;
137 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
138 u32 rb_timeout = 0;
139
140 if (il->cfg->mod_params->amsdu_size_8K)
141 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
142 else
143 rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144
145 /* Stop Rx DMA */
146 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147
148 /* Reset driver's Rx queue write idx */
149 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150
151 /* Tell device where to find RBD circular buffer in DRAM */
152 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
153
154 /* Tell device where in DRAM to update its Rx status */
155 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
156
157 /* Enable Rx DMA
158 * Direct rx interrupts to hosts
159 * Rx buffer size 4 or 8k
160 * RB timeout 0x10
161 * 256 RBDs
162 */
163 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
164 FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
165 FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
166 FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
167 rb_size |
168 (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
169 (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
170
171 /* Set interrupt coalescing timer to default (2048 usecs) */
172 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
173
174 return 0;
175}
176
177static void
178il4965_set_pwr_vmain(struct il_priv *il)
179{
180/*
181 * (for documentation purposes)
182 * to set power to V_AUX, do:
183
184 if (pci_pme_capable(il->pci_dev, PCI_D3cold))
185 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
186 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
187 ~APMG_PS_CTRL_MSK_PWR_SRC);
188 */
189
190 il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
191 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
192 ~APMG_PS_CTRL_MSK_PWR_SRC);
193}
194
195int
196il4965_hw_nic_init(struct il_priv *il)
197{
198 unsigned long flags;
199 struct il_rx_queue *rxq = &il->rxq;
200 int ret;
201
202 /* nic_init */
203 spin_lock_irqsave(&il->lock, flags);
204 il->cfg->ops->lib->apm_ops.init(il);
205
206 /* Set interrupt coalescing calibration timer to default (512 usecs) */
207 il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
208
209 spin_unlock_irqrestore(&il->lock, flags);
210
211 il4965_set_pwr_vmain(il);
212
213 il->cfg->ops->lib->apm_ops.config(il);
214
215 /* Allocate the RX queue, or reset if it is already allocated */
216 if (!rxq->bd) {
217 ret = il_rx_queue_alloc(il);
218 if (ret) {
219 IL_ERR("Unable to initialize Rx queue\n");
220 return -ENOMEM;
221 }
222 } else
223 il4965_rx_queue_reset(il, rxq);
224
225 il4965_rx_replenish(il);
226
227 il4965_rx_init(il, rxq);
228
229 spin_lock_irqsave(&il->lock, flags);
230
231 rxq->need_update = 1;
232 il_rx_queue_update_write_ptr(il, rxq);
233
234 spin_unlock_irqrestore(&il->lock, flags);
235
236 /* Allocate or reset and init all Tx and Command queues */
237 if (!il->txq) {
238 ret = il4965_txq_ctx_alloc(il);
239 if (ret)
240 return ret;
241 } else
242 il4965_txq_ctx_reset(il);
243
244 set_bit(S_INIT, &il->status);
245
246 return 0;
247}
248
249/**
250 * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
251 */
252static inline __le32
253il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
254{
255 return cpu_to_le32((u32) (dma_addr >> 8));
256}
257
258/**
259 * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
260 *
261 * If there are slots in the RX queue that need to be restocked,
262 * and we have free pre-allocated buffers, fill the ranks as much
263 * as we can, pulling from rx_free.
264 *
265 * This moves the 'write' idx forward to catch up with 'processed', and
266 * also updates the memory address in the firmware to reference the new
267 * target buffer.
268 */
269void
270il4965_rx_queue_restock(struct il_priv *il)
271{
272 struct il_rx_queue *rxq = &il->rxq;
273 struct list_head *element;
274 struct il_rx_buf *rxb;
275 unsigned long flags;
276
277 spin_lock_irqsave(&rxq->lock, flags);
278 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
279 /* The overwritten rxb must be a used one */
280 rxb = rxq->queue[rxq->write];
281 BUG_ON(rxb && rxb->page);
282
283 /* Get next free Rx buffer, remove from free list */
284 element = rxq->rx_free.next;
285 rxb = list_entry(element, struct il_rx_buf, list);
286 list_del(element);
287
288 /* Point to Rx buffer via next RBD in circular buffer */
289 rxq->bd[rxq->write] =
290 il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
291 rxq->queue[rxq->write] = rxb;
292 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
293 rxq->free_count--;
294 }
295 spin_unlock_irqrestore(&rxq->lock, flags);
296 /* If the pre-allocated buffer pool is dropping low, schedule to
297 * refill it */
298 if (rxq->free_count <= RX_LOW_WATERMARK)
299 queue_work(il->workqueue, &il->rx_replenish);
300
301 /* If we've added more space for the firmware to place data, tell it.
302 * Increment device's write pointer in multiples of 8. */
303 if (rxq->write_actual != (rxq->write & ~0x7)) {
304 spin_lock_irqsave(&rxq->lock, flags);
305 rxq->need_update = 1;
306 spin_unlock_irqrestore(&rxq->lock, flags);
307 il_rx_queue_update_write_ptr(il, rxq);
308 }
309}
310
311/**
312 * il4965_rx_replenish - Move all used packet from rx_used to rx_free
313 *
314 * When moving to rx_free an SKB is allocated for the slot.
315 *
316 * Also restock the Rx queue via il_rx_queue_restock.
317 * This is called as a scheduled work item (except for during initialization)
318 */
319static void
320il4965_rx_allocate(struct il_priv *il, gfp_t priority)
321{
322 struct il_rx_queue *rxq = &il->rxq;
323 struct list_head *element;
324 struct il_rx_buf *rxb;
325 struct page *page;
326 unsigned long flags;
327 gfp_t gfp_mask = priority;
328
329 while (1) {
330 spin_lock_irqsave(&rxq->lock, flags);
331 if (list_empty(&rxq->rx_used)) {
332 spin_unlock_irqrestore(&rxq->lock, flags);
333 return;
334 }
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 if (rxq->free_count > RX_LOW_WATERMARK)
338 gfp_mask |= __GFP_NOWARN;
339
340 if (il->hw_params.rx_page_order > 0)
341 gfp_mask |= __GFP_COMP;
342
343 /* Alloc a new receive buffer */
344 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
345 if (!page) {
346 if (net_ratelimit())
347 D_INFO("alloc_pages failed, " "order: %d\n",
348 il->hw_params.rx_page_order);
349
350 if (rxq->free_count <= RX_LOW_WATERMARK &&
351 net_ratelimit())
352 IL_ERR("Failed to alloc_pages with %s. "
353 "Only %u free buffers remaining.\n",
354 priority ==
355 GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
356 rxq->free_count);
357 /* We don't reschedule replenish work here -- we will
358 * call the restock method and if it still needs
359 * more buffers it will schedule replenish */
360 return;
361 }
362
363 spin_lock_irqsave(&rxq->lock, flags);
364
365 if (list_empty(&rxq->rx_used)) {
366 spin_unlock_irqrestore(&rxq->lock, flags);
367 __free_pages(page, il->hw_params.rx_page_order);
368 return;
369 }
370 element = rxq->rx_used.next;
371 rxb = list_entry(element, struct il_rx_buf, list);
372 list_del(element);
373
374 spin_unlock_irqrestore(&rxq->lock, flags);
375
376 BUG_ON(rxb->page);
377 rxb->page = page;
378 /* Get physical address of the RB */
379 rxb->page_dma =
380 pci_map_page(il->pci_dev, page, 0,
381 PAGE_SIZE << il->hw_params.rx_page_order,
382 PCI_DMA_FROMDEVICE);
383 /* dma address must be no more than 36 bits */
384 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
385 /* and also 256 byte aligned! */
386 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
387
388 spin_lock_irqsave(&rxq->lock, flags);
389
390 list_add_tail(&rxb->list, &rxq->rx_free);
391 rxq->free_count++;
392 il->alloc_rxb_page++;
393
394 spin_unlock_irqrestore(&rxq->lock, flags);
395 }
396}
397
398void
399il4965_rx_replenish(struct il_priv *il)
400{
401 unsigned long flags;
402
403 il4965_rx_allocate(il, GFP_KERNEL);
404
405 spin_lock_irqsave(&il->lock, flags);
406 il4965_rx_queue_restock(il);
407 spin_unlock_irqrestore(&il->lock, flags);
408}
409
410void
411il4965_rx_replenish_now(struct il_priv *il)
412{
413 il4965_rx_allocate(il, GFP_ATOMIC);
414
415 il4965_rx_queue_restock(il);
416}
417
418/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
419 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
420 * This free routine walks the list of POOL entries and if SKB is set to
421 * non NULL it is unmapped and freed
422 */
423void
424il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
425{
426 int i;
427 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
428 if (rxq->pool[i].page != NULL) {
429 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
430 PAGE_SIZE << il->hw_params.rx_page_order,
431 PCI_DMA_FROMDEVICE);
432 __il_free_pages(il, rxq->pool[i].page);
433 rxq->pool[i].page = NULL;
434 }
435 }
436
437 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
438 rxq->bd_dma);
439 dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
440 rxq->rb_stts, rxq->rb_stts_dma);
441 rxq->bd = NULL;
442 rxq->rb_stts = NULL;
443}
444
445int
446il4965_rxq_stop(struct il_priv *il)
447{
448
449 /* stop Rx DMA */
450 il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
451 il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
452 FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
453
454 return 0;
455}
456
457int
458il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
459{
460 int idx = 0;
461 int band_offset = 0;
462
463 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
464 if (rate_n_flags & RATE_MCS_HT_MSK) {
465 idx = (rate_n_flags & 0xff);
466 return idx;
467 /* Legacy rate format, search for match in table */
468 } else {
469 if (band == IEEE80211_BAND_5GHZ)
470 band_offset = IL_FIRST_OFDM_RATE;
471 for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
472 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
473 return idx - band_offset;
474 }
475
476 return -1;
477}
478
479static int
480il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
481{
482 /* data from PHY/DSP regarding signal strength, etc.,
483 * contents are always there, not configurable by host. */
484 struct il4965_rx_non_cfg_phy *ncphy =
485 (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
486 u32 agc =
487 (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
488 IL49_AGC_DB_POS;
489
490 u32 valid_antennae =
491 (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
492 >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
493 u8 max_rssi = 0;
494 u32 i;
495
496 /* Find max rssi among 3 possible receivers.
497 * These values are measured by the digital signal processor (DSP).
498 * They should stay fairly constant even as the signal strength varies,
499 * if the radio's automatic gain control (AGC) is working right.
500 * AGC value (see below) will provide the "interesting" info. */
501 for (i = 0; i < 3; i++)
502 if (valid_antennae & (1 << i))
503 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
504
505 D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
506 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
507 max_rssi, agc);
508
509 /* dBm = max_rssi dB - agc dB - constant.
510 * Higher AGC (higher radio gain) means lower signal. */
511 return max_rssi - agc - IL4965_RSSI_OFFSET;
512}
513
514static u32
515il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
516{
517 u32 decrypt_out = 0;
518
519 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
520 RX_RES_STATUS_STATION_FOUND)
521 decrypt_out |=
522 (RX_RES_STATUS_STATION_FOUND |
523 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
524
525 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
526
527 /* packet was not encrypted */
528 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
529 RX_RES_STATUS_SEC_TYPE_NONE)
530 return decrypt_out;
531
532 /* packet was encrypted with unknown alg */
533 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
534 RX_RES_STATUS_SEC_TYPE_ERR)
535 return decrypt_out;
536
537 /* decryption was not done in HW */
538 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
539 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
540 return decrypt_out;
541
542 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
543
544 case RX_RES_STATUS_SEC_TYPE_CCMP:
545 /* alg is CCM: check MIC only */
546 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
547 /* Bad MIC */
548 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
549 else
550 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
551
552 break;
553
554 case RX_RES_STATUS_SEC_TYPE_TKIP:
555 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
556 /* Bad TTAK */
557 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
558 break;
559 }
560 /* fall through if TTAK OK */
561 default:
562 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
563 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
564 else
565 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
566 break;
567 }
568
569 D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
570
571 return decrypt_out;
572}
573
574static void
575il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
576 u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
577 struct ieee80211_rx_status *stats)
578{
579 struct sk_buff *skb;
580 __le16 fc = hdr->frame_control;
581
582 /* We only process data packets if the interface is open */
583 if (unlikely(!il->is_open)) {
584 D_DROP("Dropping packet while interface is not open.\n");
585 return;
586 }
587
588 /* In case of HW accelerated crypto and bad decryption, drop */
589 if (!il->cfg->mod_params->sw_crypto &&
590 il_set_decrypted_flag(il, hdr, ampdu_status, stats))
591 return;
592
593 skb = dev_alloc_skb(128);
594 if (!skb) {
595 IL_ERR("dev_alloc_skb failed\n");
596 return;
597 }
598
599 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
600
601 il_update_stats(il, false, fc, len);
602 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
603
604 ieee80211_rx(il->hw, skb);
605 il->alloc_rxb_page--;
606 rxb->page = NULL;
607}
608
609/* Called for N_RX (legacy ABG frames), or
610 * N_RX_MPDU (HT high-throughput N frames). */
611void
612il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
613{
614 struct ieee80211_hdr *header;
615 struct ieee80211_rx_status rx_status;
616 struct il_rx_pkt *pkt = rxb_addr(rxb);
617 struct il_rx_phy_res *phy_res;
618 __le32 rx_pkt_status;
619 struct il_rx_mpdu_res_start *amsdu;
620 u32 len;
621 u32 ampdu_status;
622 u32 rate_n_flags;
623
624 /**
625 * N_RX and N_RX_MPDU are handled differently.
626 * N_RX: physical layer info is in this buffer
627 * N_RX_MPDU: physical layer info was sent in separate
628 * command and cached in il->last_phy_res
629 *
630 * Here we set up local variables depending on which command is
631 * received.
632 */
633 if (pkt->hdr.cmd == N_RX) {
634 phy_res = (struct il_rx_phy_res *)pkt->u.raw;
635 header =
636 (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
637 phy_res->cfg_phy_cnt);
638
639 len = le16_to_cpu(phy_res->byte_count);
640 rx_pkt_status =
641 *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
642 phy_res->cfg_phy_cnt + len);
643 ampdu_status = le32_to_cpu(rx_pkt_status);
644 } else {
645 if (!il->_4965.last_phy_res_valid) {
646 IL_ERR("MPDU frame without cached PHY data\n");
647 return;
648 }
649 phy_res = &il->_4965.last_phy_res;
650 amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
651 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
652 len = le16_to_cpu(amsdu->byte_count);
653 rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
654 ampdu_status =
655 il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
656 }
657
658 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
659 D_DROP("dsp size out of range [0,20]: %d/n",
660 phy_res->cfg_phy_cnt);
661 return;
662 }
663
664 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
665 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
666 D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
667 return;
668 }
669
670 /* This will be used in several places later */
671 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
672
673 /* rx_status carries information about the packet to mac80211 */
674 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
675 rx_status.band =
676 (phy_res->
677 phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
678 IEEE80211_BAND_5GHZ;
679 rx_status.freq =
680 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
681 rx_status.band);
682 rx_status.rate_idx =
683 il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
684 rx_status.flag = 0;
685
686 /* TSF isn't reliable. In order to allow smooth user experience,
687 * this W/A doesn't propagate it to the mac80211 */
688 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
689
690 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
691
692 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
693 rx_status.signal = il4965_calc_rssi(il, phy_res);
694
695 il_dbg_log_rx_data_frame(il, len, header);
696 D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
697 (unsigned long long)rx_status.mactime);
698
699 /*
700 * "antenna number"
701 *
702 * It seems that the antenna field in the phy flags value
703 * is actually a bit field. This is undefined by radiotap,
704 * it wants an actual antenna number but I always get "7"
705 * for most legacy frames I receive indicating that the
706 * same frame was received on all three RX chains.
707 *
708 * I think this field should be removed in favor of a
709 * new 802.11n radiotap field "RX chains" that is defined
710 * as a bitmask.
711 */
712 rx_status.antenna =
713 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
714 RX_RES_PHY_FLAGS_ANTENNA_POS;
715
716 /* set the preamble flag if appropriate */
717 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
718 rx_status.flag |= RX_FLAG_SHORTPRE;
719
720 /* Set up the HT phy flags */
721 if (rate_n_flags & RATE_MCS_HT_MSK)
722 rx_status.flag |= RX_FLAG_HT;
723 if (rate_n_flags & RATE_MCS_HT40_MSK)
724 rx_status.flag |= RX_FLAG_40MHZ;
725 if (rate_n_flags & RATE_MCS_SGI_MSK)
726 rx_status.flag |= RX_FLAG_SHORT_GI;
727
728 il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
729 &rx_status);
730}
731
732/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
733 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
734void
735il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
736{
737 struct il_rx_pkt *pkt = rxb_addr(rxb);
738 il->_4965.last_phy_res_valid = true;
739 memcpy(&il->_4965.last_phy_res, pkt->u.raw,
740 sizeof(struct il_rx_phy_res));
741}
742
743static int
744il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
745 enum ieee80211_band band, u8 is_active,
746 u8 n_probes, struct il_scan_channel *scan_ch)
747{
748 struct ieee80211_channel *chan;
749 const struct ieee80211_supported_band *sband;
750 const struct il_channel_info *ch_info;
751 u16 passive_dwell = 0;
752 u16 active_dwell = 0;
753 int added, i;
754 u16 channel;
755
756 sband = il_get_hw_mode(il, band);
757 if (!sband)
758 return 0;
759
760 active_dwell = il_get_active_dwell_time(il, band, n_probes);
761 passive_dwell = il_get_passive_dwell_time(il, band, vif);
762
763 if (passive_dwell <= active_dwell)
764 passive_dwell = active_dwell + 1;
765
766 for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
767 chan = il->scan_request->channels[i];
768
769 if (chan->band != band)
770 continue;
771
772 channel = chan->hw_value;
773 scan_ch->channel = cpu_to_le16(channel);
774
775 ch_info = il_get_channel_info(il, band, channel);
776 if (!il_is_channel_valid(ch_info)) {
777 D_SCAN("Channel %d is INVALID for this band.\n",
778 channel);
779 continue;
780 }
781
782 if (!is_active || il_is_channel_passive(ch_info) ||
783 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
784 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
785 else
786 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
787
788 if (n_probes)
789 scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
790
791 scan_ch->active_dwell = cpu_to_le16(active_dwell);
792 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
793
794 /* Set txpower levels to defaults */
795 scan_ch->dsp_atten = 110;
796
797 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
798 * power level:
799 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
800 */
801 if (band == IEEE80211_BAND_5GHZ)
802 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
803 else
804 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
805
806 D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
807 le32_to_cpu(scan_ch->type),
808 (scan_ch->
809 type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
810 (scan_ch->
811 type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
812 passive_dwell);
813
814 scan_ch++;
815 added++;
816 }
817
818 D_SCAN("total channels to scan %d\n", added);
819 return added;
820}
821
822static inline u32
823il4965_ant_idx_to_flags(u8 ant_idx)
824{
825 return BIT(ant_idx) << RATE_MCS_ANT_POS;
826}
827
828int
829il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
830{
831 struct il_host_cmd cmd = {
832 .id = C_SCAN,
833 .len = sizeof(struct il_scan_cmd),
834 .flags = CMD_SIZE_HUGE,
835 };
836 struct il_scan_cmd *scan;
837 struct il_rxon_context *ctx = &il->ctx;
838 u32 rate_flags = 0;
839 u16 cmd_len;
840 u16 rx_chain = 0;
841 enum ieee80211_band band;
842 u8 n_probes = 0;
843 u8 rx_ant = il->hw_params.valid_rx_ant;
844 u8 rate;
845 bool is_active = false;
846 int chan_mod;
847 u8 active_chains;
848 u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
849 int ret;
850
851 lockdep_assert_held(&il->mutex);
852
853 ctx = il_rxon_ctx_from_vif(vif);
854
855 if (!il->scan_cmd) {
856 il->scan_cmd =
857 kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
858 GFP_KERNEL);
859 if (!il->scan_cmd) {
860 D_SCAN("fail to allocate memory for scan\n");
861 return -ENOMEM;
862 }
863 }
864 scan = il->scan_cmd;
865 memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
866
867 scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
868 scan->quiet_time = IL_ACTIVE_QUIET_TIME;
869
870 if (il_is_any_associated(il)) {
871 u16 interval;
872 u32 extra;
873 u32 suspend_time = 100;
874 u32 scan_suspend_time = 100;
875
876 D_INFO("Scanning while associated...\n");
877 interval = vif->bss_conf.beacon_int;
878
879 scan->suspend_time = 0;
880 scan->max_out_time = cpu_to_le32(200 * 1024);
881 if (!interval)
882 interval = suspend_time;
883
884 extra = (suspend_time / interval) << 22;
885 scan_suspend_time =
886 (extra | ((suspend_time % interval) * 1024));
887 scan->suspend_time = cpu_to_le32(scan_suspend_time);
888 D_SCAN("suspend_time 0x%X beacon interval %d\n",
889 scan_suspend_time, interval);
890 }
891
892 if (il->scan_request->n_ssids) {
893 int i, p = 0;
894 D_SCAN("Kicking off active scan\n");
895 for (i = 0; i < il->scan_request->n_ssids; i++) {
896 /* always does wildcard anyway */
897 if (!il->scan_request->ssids[i].ssid_len)
898 continue;
899 scan->direct_scan[p].id = WLAN_EID_SSID;
900 scan->direct_scan[p].len =
901 il->scan_request->ssids[i].ssid_len;
902 memcpy(scan->direct_scan[p].ssid,
903 il->scan_request->ssids[i].ssid,
904 il->scan_request->ssids[i].ssid_len);
905 n_probes++;
906 p++;
907 }
908 is_active = true;
909 } else
910 D_SCAN("Start passive scan.\n");
911
912 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
913 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
914 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
915
916 switch (il->scan_band) {
917 case IEEE80211_BAND_2GHZ:
918 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
919 chan_mod =
920 le32_to_cpu(il->ctx.active.
921 flags & RXON_FLG_CHANNEL_MODE_MSK) >>
922 RXON_FLG_CHANNEL_MODE_POS;
923 if (chan_mod == CHANNEL_MODE_PURE_40) {
924 rate = RATE_6M_PLCP;
925 } else {
926 rate = RATE_1M_PLCP;
927 rate_flags = RATE_MCS_CCK_MSK;
928 }
929 break;
930 case IEEE80211_BAND_5GHZ:
931 rate = RATE_6M_PLCP;
932 break;
933 default:
934 IL_WARN("Invalid scan band\n");
935 return -EIO;
936 }
937
938 /*
939 * If active scanning is requested but a certain channel is
940 * marked passive, we can do active scanning if we detect
941 * transmissions.
942 *
943 * There is an issue with some firmware versions that triggers
944 * a sysassert on a "good CRC threshold" of zero (== disabled),
945 * on a radar channel even though this means that we should NOT
946 * send probes.
947 *
948 * The "good CRC threshold" is the number of frames that we
949 * need to receive during our dwell time on a channel before
950 * sending out probes -- setting this to a huge value will
951 * mean we never reach it, but at the same time work around
952 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
953 * here instead of IL_GOOD_CRC_TH_DISABLED.
954 */
955 scan->good_CRC_th =
956 is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
957
958 band = il->scan_band;
959
960 if (il->cfg->scan_rx_antennas[band])
961 rx_ant = il->cfg->scan_rx_antennas[band];
962
963 il->scan_tx_ant[band] =
964 il4965_toggle_tx_ant(il, il->scan_tx_ant[band], scan_tx_antennas);
965 rate_flags |= il4965_ant_idx_to_flags(il->scan_tx_ant[band]);
966 scan->tx_cmd.rate_n_flags =
967 il4965_hw_set_rate_n_flags(rate, rate_flags);
968
969 /* In power save mode use one chain, otherwise use all chains */
970 if (test_bit(S_POWER_PMI, &il->status)) {
971 /* rx_ant has been set to all valid chains previously */
972 active_chains =
973 rx_ant & ((u8) (il->chain_noise_data.active_chains));
974 if (!active_chains)
975 active_chains = rx_ant;
976
977 D_SCAN("chain_noise_data.active_chains: %u\n",
978 il->chain_noise_data.active_chains);
979
980 rx_ant = il4965_first_antenna(active_chains);
981 }
982
983 /* MIMO is not used here, but value is required */
984 rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
985 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
986 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
987 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
988 scan->rx_chain = cpu_to_le16(rx_chain);
989
990 cmd_len =
991 il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
992 vif->addr, il->scan_request->ie,
993 il->scan_request->ie_len,
994 IL_MAX_SCAN_SIZE - sizeof(*scan));
995 scan->tx_cmd.len = cpu_to_le16(cmd_len);
996
997 scan->filter_flags |=
998 (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
999
1000 scan->channel_count =
1001 il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
1002 (void *)&scan->data[cmd_len]);
1003 if (scan->channel_count == 0) {
1004 D_SCAN("channel count %d\n", scan->channel_count);
1005 return -EIO;
1006 }
1007
1008 cmd.len +=
1009 le16_to_cpu(scan->tx_cmd.len) +
1010 scan->channel_count * sizeof(struct il_scan_channel);
1011 cmd.data = scan;
1012 scan->len = cpu_to_le16(cmd.len);
1013
1014 set_bit(S_SCAN_HW, &il->status);
1015
1016 ret = il_send_cmd_sync(il, &cmd);
1017 if (ret)
1018 clear_bit(S_SCAN_HW, &il->status);
1019
1020 return ret;
1021}
1022
1023int
1024il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
1025 bool add)
1026{
1027 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1028
1029 if (add)
1030 return il4965_add_bssid_station(il, vif_priv->ctx,
1031 vif->bss_conf.bssid,
1032 &vif_priv->ibss_bssid_sta_id);
1033 return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
1034 vif->bss_conf.bssid);
1035}
1036
1037void
1038il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
1039{
1040 lockdep_assert_held(&il->sta_lock);
1041
1042 if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1043 il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1044 else {
1045 D_TX("free more than tfds_in_queue (%u:%d)\n",
1046 il->stations[sta_id].tid[tid].tfds_in_queue, freed);
1047 il->stations[sta_id].tid[tid].tfds_in_queue = 0;
1048 }
1049}
1050
1051#define IL_TX_QUEUE_MSK 0xfffff
1052
1053static bool
1054il4965_is_single_rx_stream(struct il_priv *il)
1055{
1056 return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1057 il->current_ht_config.single_chain_sufficient;
1058}
1059
1060#define IL_NUM_RX_CHAINS_MULTIPLE 3
1061#define IL_NUM_RX_CHAINS_SINGLE 2
1062#define IL_NUM_IDLE_CHAINS_DUAL 2
1063#define IL_NUM_IDLE_CHAINS_SINGLE 1
1064
1065/*
1066 * Determine how many receiver/antenna chains to use.
1067 *
1068 * More provides better reception via diversity. Fewer saves power
1069 * at the expense of throughput, but only when not in powersave to
1070 * start with.
1071 *
1072 * MIMO (dual stream) requires at least 2, but works better with 3.
1073 * This does not determine *which* chains to use, just how many.
1074 */
1075static int
1076il4965_get_active_rx_chain_count(struct il_priv *il)
1077{
1078 /* # of Rx chains to use when expecting MIMO. */
1079 if (il4965_is_single_rx_stream(il))
1080 return IL_NUM_RX_CHAINS_SINGLE;
1081 else
1082 return IL_NUM_RX_CHAINS_MULTIPLE;
1083}
1084
1085/*
1086 * When we are in power saving mode, unless device support spatial
1087 * multiplexing power save, use the active count for rx chain count.
1088 */
1089static int
1090il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
1091{
1092 /* # Rx chains when idling, depending on SMPS mode */
1093 switch (il->current_ht_config.smps) {
1094 case IEEE80211_SMPS_STATIC:
1095 case IEEE80211_SMPS_DYNAMIC:
1096 return IL_NUM_IDLE_CHAINS_SINGLE;
1097 case IEEE80211_SMPS_OFF:
1098 return active_cnt;
1099 default:
1100 WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
1101 return active_cnt;
1102 }
1103}
1104
1105/* up to 4 chains */
1106static u8
1107il4965_count_chain_bitmap(u32 chain_bitmap)
1108{
1109 u8 res;
1110 res = (chain_bitmap & BIT(0)) >> 0;
1111 res += (chain_bitmap & BIT(1)) >> 1;
1112 res += (chain_bitmap & BIT(2)) >> 2;
1113 res += (chain_bitmap & BIT(3)) >> 3;
1114 return res;
1115}
1116
1117/**
1118 * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1119 *
1120 * Selects how many and which Rx receivers/antennas/chains to use.
1121 * This should not be used for scan command ... it puts data in wrong place.
1122 */
1123void
1124il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
1125{
1126 bool is_single = il4965_is_single_rx_stream(il);
1127 bool is_cam = !test_bit(S_POWER_PMI, &il->status);
1128 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1129 u32 active_chains;
1130 u16 rx_chain;
1131
1132 /* Tell uCode which antennas are actually connected.
1133 * Before first association, we assume all antennas are connected.
1134 * Just after first association, il4965_chain_noise_calibration()
1135 * checks which antennas actually *are* connected. */
1136 if (il->chain_noise_data.active_chains)
1137 active_chains = il->chain_noise_data.active_chains;
1138 else
1139 active_chains = il->hw_params.valid_rx_ant;
1140
1141 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1142
1143 /* How many receivers should we use? */
1144 active_rx_cnt = il4965_get_active_rx_chain_count(il);
1145 idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
1146
1147 /* correct rx chain count according hw settings
1148 * and chain noise calibration
1149 */
1150 valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
1151 if (valid_rx_cnt < active_rx_cnt)
1152 active_rx_cnt = valid_rx_cnt;
1153
1154 if (valid_rx_cnt < idle_rx_cnt)
1155 idle_rx_cnt = valid_rx_cnt;
1156
1157 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1158 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1159
1160 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1161
1162 if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
1163 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1164 else
1165 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1166
1167 D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
1168 active_rx_cnt, idle_rx_cnt);
1169
1170 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1171 active_rx_cnt < idle_rx_cnt);
1172}
1173
1174u8
1175il4965_toggle_tx_ant(struct il_priv *il, u8 ant, u8 valid)
1176{
1177 int i;
1178 u8 ind = ant;
1179
1180 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1181 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1182 if (valid & BIT(ind))
1183 return ind;
1184 }
1185 return ant;
1186}
1187
1188static const char *
1189il4965_get_fh_string(int cmd)
1190{
1191 switch (cmd) {
1192 IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
1193 IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
1194 IL_CMD(FH49_RSCSR_CHNL0_WPTR);
1195 IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
1196 IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
1197 IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
1198 IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1199 IL_CMD(FH49_TSSR_TX_STATUS_REG);
1200 IL_CMD(FH49_TSSR_TX_ERROR_REG);
1201 default:
1202 return "UNKNOWN";
1203 }
1204}
1205
1206int
1207il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1208{
1209 int i;
1210#ifdef CONFIG_IWLEGACY_DEBUG
1211 int pos = 0;
1212 size_t bufsz = 0;
1213#endif
1214 static const u32 fh_tbl[] = {
1215 FH49_RSCSR_CHNL0_STTS_WPTR_REG,
1216 FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
1217 FH49_RSCSR_CHNL0_WPTR,
1218 FH49_MEM_RCSR_CHNL0_CONFIG_REG,
1219 FH49_MEM_RSSR_SHARED_CTRL_REG,
1220 FH49_MEM_RSSR_RX_STATUS_REG,
1221 FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1222 FH49_TSSR_TX_STATUS_REG,
1223 FH49_TSSR_TX_ERROR_REG
1224 };
1225#ifdef CONFIG_IWLEGACY_DEBUG
1226 if (display) {
1227 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1228 *buf = kmalloc(bufsz, GFP_KERNEL);
1229 if (!*buf)
1230 return -ENOMEM;
1231 pos +=
1232 scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
1233 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1234 pos +=
1235 scnprintf(*buf + pos, bufsz - pos,
1236 " %34s: 0X%08x\n",
1237 il4965_get_fh_string(fh_tbl[i]),
1238 il_rd(il, fh_tbl[i]));
1239 }
1240 return pos;
1241 }
1242#endif
1243 IL_ERR("FH register values:\n");
1244 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1245 IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
1246 il_rd(il, fh_tbl[i]));
1247 }
1248 return 0;
1249}
1250
1251void
1252il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1253{
1254 struct il_rx_pkt *pkt = rxb_addr(rxb);
1255 struct il_missed_beacon_notif *missed_beacon;
1256
1257 missed_beacon = &pkt->u.missed_beacon;
1258 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
1259 il->missed_beacon_threshold) {
1260 D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
1261 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
1262 le32_to_cpu(missed_beacon->total_missed_becons),
1263 le32_to_cpu(missed_beacon->num_recvd_beacons),
1264 le32_to_cpu(missed_beacon->num_expected_beacons));
1265 if (!test_bit(S_SCANNING, &il->status))
1266 il4965_init_sensitivity(il);
1267 }
1268}
1269
1270/* Calculate noise level, based on measurements during network silence just
1271 * before arriving beacon. This measurement can be done only if we know
1272 * exactly when to expect beacons, therefore only when we're associated. */
1273static void
1274il4965_rx_calc_noise(struct il_priv *il)
1275{
1276 struct stats_rx_non_phy *rx_info;
1277 int num_active_rx = 0;
1278 int total_silence = 0;
1279 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
1280 int last_rx_noise;
1281
1282 rx_info = &(il->_4965.stats.rx.general);
1283 bcn_silence_a =
1284 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
1285 bcn_silence_b =
1286 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
1287 bcn_silence_c =
1288 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
1289
1290 if (bcn_silence_a) {
1291 total_silence += bcn_silence_a;
1292 num_active_rx++;
1293 }
1294 if (bcn_silence_b) {
1295 total_silence += bcn_silence_b;
1296 num_active_rx++;
1297 }
1298 if (bcn_silence_c) {
1299 total_silence += bcn_silence_c;
1300 num_active_rx++;
1301 }
1302
1303 /* Average among active antennas */
1304 if (num_active_rx)
1305 last_rx_noise = (total_silence / num_active_rx) - 107;
1306 else
1307 last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
1308
1309 D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
1310 bcn_silence_b, bcn_silence_c, last_rx_noise);
1311}
1312
1313#ifdef CONFIG_IWLEGACY_DEBUGFS
1314/*
1315 * based on the assumption of all stats counter are in DWORD
1316 * FIXME: This function is for debugging, do not deal with
1317 * the case of counters roll-over.
1318 */
1319static void
1320il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1321{
1322 int i, size;
1323 __le32 *prev_stats;
1324 u32 *accum_stats;
1325 u32 *delta, *max_delta;
1326 struct stats_general_common *general, *accum_general;
1327 struct stats_tx *tx, *accum_tx;
1328
1329 prev_stats = (__le32 *) &il->_4965.stats;
1330 accum_stats = (u32 *) &il->_4965.accum_stats;
1331 size = sizeof(struct il_notif_stats);
1332 general = &il->_4965.stats.general.common;
1333 accum_general = &il->_4965.accum_stats.general.common;
1334 tx = &il->_4965.stats.tx;
1335 accum_tx = &il->_4965.accum_stats.tx;
1336 delta = (u32 *) &il->_4965.delta_stats;
1337 max_delta = (u32 *) &il->_4965.max_delta;
1338
1339 for (i = sizeof(__le32); i < size;
1340 i +=
1341 sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
1342 accum_stats++) {
1343 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
1344 *delta =
1345 (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
1346 *accum_stats += *delta;
1347 if (*delta > *max_delta)
1348 *max_delta = *delta;
1349 }
1350 }
1351
1352 /* reset accumulative stats for "no-counter" type stats */
1353 accum_general->temperature = general->temperature;
1354 accum_general->ttl_timestamp = general->ttl_timestamp;
1355}
1356#endif
1357
1358#define REG_RECALIB_PERIOD (60)
1359
1360void
1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1362{
1363 int change;
1364 struct il_rx_pkt *pkt = rxb_addr(rxb);
1365
1366 D_RX("Statistics notification received (%d vs %d).\n",
1367 (int)sizeof(struct il_notif_stats),
1368 le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
1369
1370 change =
1371 ((il->_4965.stats.general.common.temperature !=
1372 pkt->u.stats.general.common.temperature) ||
1373 ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
1374 (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
1375#ifdef CONFIG_IWLEGACY_DEBUGFS
1376 il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
1377#endif
1378
1379 /* TODO: reading some of stats is unneeded */
1380 memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
1381
1382 set_bit(S_STATS, &il->status);
1383
1384 /* Reschedule the stats timer to occur in
1385 * REG_RECALIB_PERIOD seconds to ensure we get a
1386 * thermal update even if the uCode doesn't give
1387 * us one */
1388 mod_timer(&il->stats_periodic,
1389 jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
1390
1391 if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
1392 (pkt->hdr.cmd == N_STATS)) {
1393 il4965_rx_calc_noise(il);
1394 queue_work(il->workqueue, &il->run_time_calib_work);
1395 }
1396 if (il->cfg->ops->lib->temp_ops.temperature && change)
1397 il->cfg->ops->lib->temp_ops.temperature(il);
1398}
1399
1400void
1401il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1402{
1403 struct il_rx_pkt *pkt = rxb_addr(rxb);
1404
1405 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
1406#ifdef CONFIG_IWLEGACY_DEBUGFS
1407 memset(&il->_4965.accum_stats, 0,
1408 sizeof(struct il_notif_stats));
1409 memset(&il->_4965.delta_stats, 0,
1410 sizeof(struct il_notif_stats));
1411 memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
1412#endif
1413 D_RX("Statistics have been cleared\n");
1414 }
1415 il4965_hdl_stats(il, rxb);
1416}
1417
1418
1419/*
1420 * mac80211 queues, ACs, hardware queues, FIFOs.
1421 *
1422 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
1423 *
1424 * Mac80211 uses the following numbers, which we get as from it
1425 * by way of skb_get_queue_mapping(skb):
1426 *
1427 * VO 0
1428 * VI 1
1429 * BE 2
1430 * BK 3
1431 *
1432 *
1433 * Regular (not A-MPDU) frames are put into hardware queues corresponding
1434 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
1435 * own queue per aggregation session (RA/TID combination), such queues are
1436 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
1437 * order to map frames to the right queue, we also need an AC->hw queue
1438 * mapping. This is implemented here.
1439 *
1440 * Due to the way hw queues are set up (by the hw specific modules like
1441 * 4965.c), the AC->hw queue mapping is the identity
1442 * mapping.
1443 */
1444
1445static const u8 tid_to_ac[] = {
1446 IEEE80211_AC_BE,
1447 IEEE80211_AC_BK,
1448 IEEE80211_AC_BK,
1449 IEEE80211_AC_BE,
1450 IEEE80211_AC_VI,
1451 IEEE80211_AC_VI,
1452 IEEE80211_AC_VO,
1453 IEEE80211_AC_VO
1454};
1455
1456static inline int
1457il4965_get_ac_from_tid(u16 tid)
1458{
1459 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1460 return tid_to_ac[tid];
1461
1462 /* no support for TIDs 8-15 yet */
1463 return -EINVAL;
1464}
1465
1466static inline int
1467il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
1468{
1469 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
1470 return ctx->ac_to_fifo[tid_to_ac[tid]];
1471
1472 /* no support for TIDs 8-15 yet */
1473 return -EINVAL;
1474}
1475
1476/*
1477 * handle build C_TX command notification.
1478 */
1479static void
1480il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
1481 struct il_tx_cmd *tx_cmd,
1482 struct ieee80211_tx_info *info,
1483 struct ieee80211_hdr *hdr, u8 std_id)
1484{
1485 __le16 fc = hdr->frame_control;
1486 __le32 tx_flags = tx_cmd->tx_flags;
1487
1488 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1489 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1490 tx_flags |= TX_CMD_FLG_ACK_MSK;
1491 if (ieee80211_is_mgmt(fc))
1492 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1493 if (ieee80211_is_probe_resp(fc) &&
1494 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
1495 tx_flags |= TX_CMD_FLG_TSF_MSK;
1496 } else {
1497 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
1498 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1499 }
1500
1501 if (ieee80211_is_back_req(fc))
1502 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
1503
1504 tx_cmd->sta_id = std_id;
1505 if (ieee80211_has_morefrags(fc))
1506 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
1507
1508 if (ieee80211_is_data_qos(fc)) {
1509 u8 *qc = ieee80211_get_qos_ctl(hdr);
1510 tx_cmd->tid_tspec = qc[0] & 0xf;
1511 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
1512 } else {
1513 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
1514 }
1515
1516 il_tx_cmd_protection(il, info, fc, &tx_flags);
1517
1518 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
1519 if (ieee80211_is_mgmt(fc)) {
1520 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
1521 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
1522 else
1523 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
1524 } else {
1525 tx_cmd->timeout.pm_frame_timeout = 0;
1526 }
1527
1528 tx_cmd->driver_txop = 0;
1529 tx_cmd->tx_flags = tx_flags;
1530 tx_cmd->next_frame_len = 0;
1531}
1532
1533#define RTS_DFAULT_RETRY_LIMIT 60
1534
1535static void
1536il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
1537 struct ieee80211_tx_info *info, __le16 fc)
1538{
1539 u32 rate_flags;
1540 int rate_idx;
1541 u8 rts_retry_limit;
1542 u8 data_retry_limit;
1543 u8 rate_plcp;
1544
1545 /* Set retry limit on DATA packets and Probe Responses */
1546 if (ieee80211_is_probe_resp(fc))
1547 data_retry_limit = 3;
1548 else
1549 data_retry_limit = IL4965_DEFAULT_TX_RETRY;
1550 tx_cmd->data_retry_limit = data_retry_limit;
1551
1552 /* Set retry limit on RTS packets */
1553 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
1554 if (data_retry_limit < rts_retry_limit)
1555 rts_retry_limit = data_retry_limit;
1556 tx_cmd->rts_retry_limit = rts_retry_limit;
1557
1558 /* DATA packets will use the uCode station table for rate/antenna
1559 * selection */
1560 if (ieee80211_is_data(fc)) {
1561 tx_cmd->initial_rate_idx = 0;
1562 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
1563 return;
1564 }
1565
1566 /**
1567 * If the current TX rate stored in mac80211 has the MCS bit set, it's
1568 * not really a TX rate. Thus, we use the lowest supported rate for
1569 * this band. Also use the lowest supported rate if the stored rate
1570 * idx is invalid.
1571 */
1572 rate_idx = info->control.rates[0].idx;
1573 if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
1574 || rate_idx > RATE_COUNT_LEGACY)
1575 rate_idx =
1576 rate_lowest_index(&il->bands[info->band],
1577 info->control.sta);
1578 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
1579 if (info->band == IEEE80211_BAND_5GHZ)
1580 rate_idx += IL_FIRST_OFDM_RATE;
1581 /* Get PLCP rate for tx_cmd->rate_n_flags */
1582 rate_plcp = il_rates[rate_idx].plcp;
1583 /* Zero out flags for this packet */
1584 rate_flags = 0;
1585
1586 /* Set CCK flag as needed */
1587 if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
1588 rate_flags |= RATE_MCS_CCK_MSK;
1589
1590 /* Set up antennas */
1591 il->mgmt_tx_ant =
1592 il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
1593 il->hw_params.valid_tx_ant);
1594
1595 rate_flags |= il4965_ant_idx_to_flags(il->mgmt_tx_ant);
1596
1597 /* Set the rate in the TX cmd */
1598 tx_cmd->rate_n_flags =
1599 il4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
1600}
1601
1602static void
1603il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
1604 struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
1605 int sta_id)
1606{
1607 struct ieee80211_key_conf *keyconf = info->control.hw_key;
1608
1609 switch (keyconf->cipher) {
1610 case WLAN_CIPHER_SUITE_CCMP:
1611 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
1612 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
1613 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1614 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
1615 D_TX("tx_cmd with AES hwcrypto\n");
1616 break;
1617
1618 case WLAN_CIPHER_SUITE_TKIP:
1619 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
1620 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
1621 D_TX("tx_cmd with tkip hwcrypto\n");
1622 break;
1623
1624 case WLAN_CIPHER_SUITE_WEP104:
1625 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
1626 /* fall through */
1627 case WLAN_CIPHER_SUITE_WEP40:
1628 tx_cmd->sec_ctl |=
1629 (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
1630 TX_CMD_SEC_SHIFT);
1631
1632 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
1633
1634 D_TX("Configuring packet for WEP encryption " "with key %d\n",
1635 keyconf->keyidx);
1636 break;
1637
1638 default:
1639 IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
1640 break;
1641 }
1642}
1643
1644/*
1645 * start C_TX command process
1646 */
1647int
1648il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
1649{
1650 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1651 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1652 struct ieee80211_sta *sta = info->control.sta;
1653 struct il_station_priv *sta_priv = NULL;
1654 struct il_tx_queue *txq;
1655 struct il_queue *q;
1656 struct il_device_cmd *out_cmd;
1657 struct il_cmd_meta *out_meta;
1658 struct il_tx_cmd *tx_cmd;
1659 struct il_rxon_context *ctx = &il->ctx;
1660 int txq_id;
1661 dma_addr_t phys_addr;
1662 dma_addr_t txcmd_phys;
1663 dma_addr_t scratch_phys;
1664 u16 len, firstlen, secondlen;
1665 u16 seq_number = 0;
1666 __le16 fc;
1667 u8 hdr_len;
1668 u8 sta_id;
1669 u8 wait_write_ptr = 0;
1670 u8 tid = 0;
1671 u8 *qc = NULL;
1672 unsigned long flags;
1673 bool is_agg = false;
1674
1675 if (info->control.vif)
1676 ctx = il_rxon_ctx_from_vif(info->control.vif);
1677
1678 spin_lock_irqsave(&il->lock, flags);
1679 if (il_is_rfkill(il)) {
1680 D_DROP("Dropping - RF KILL\n");
1681 goto drop_unlock;
1682 }
1683
1684 fc = hdr->frame_control;
1685
1686#ifdef CONFIG_IWLEGACY_DEBUG
1687 if (ieee80211_is_auth(fc))
1688 D_TX("Sending AUTH frame\n");
1689 else if (ieee80211_is_assoc_req(fc))
1690 D_TX("Sending ASSOC frame\n");
1691 else if (ieee80211_is_reassoc_req(fc))
1692 D_TX("Sending REASSOC frame\n");
1693#endif
1694
1695 hdr_len = ieee80211_hdrlen(fc);
1696
1697 /* For management frames use broadcast id to do not break aggregation */
1698 if (!ieee80211_is_data(fc))
1699 sta_id = ctx->bcast_sta_id;
1700 else {
1701 /* Find idx into station table for destination station */
1702 sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
1703
1704 if (sta_id == IL_INVALID_STATION) {
1705 D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
1706 goto drop_unlock;
1707 }
1708 }
1709
1710 D_TX("station Id %d\n", sta_id);
1711
1712 if (sta)
1713 sta_priv = (void *)sta->drv_priv;
1714
1715 if (sta_priv && sta_priv->asleep &&
1716 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
1717 /*
1718 * This sends an asynchronous command to the device,
1719 * but we can rely on it being processed before the
1720 * next frame is processed -- and the next frame to
1721 * this station is the one that will consume this
1722 * counter.
1723 * For now set the counter to just 1 since we do not
1724 * support uAPSD yet.
1725 */
1726 il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
1727 }
1728
1729 /*
1730 * Send this frame after DTIM -- there's a special queue
1731 * reserved for this for contexts that support AP mode.
1732 */
1733 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1734 txq_id = ctx->mcast_queue;
1735 /*
1736 * The microcode will clear the more data
1737 * bit in the last frame it transmits.
1738 */
1739 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1740 } else
1741 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
1742
1743 /* irqs already disabled/saved above when locking il->lock */
1744 spin_lock(&il->sta_lock);
1745
1746 if (ieee80211_is_data_qos(fc)) {
1747 qc = ieee80211_get_qos_ctl(hdr);
1748 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1749 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
1750 spin_unlock(&il->sta_lock);
1751 goto drop_unlock;
1752 }
1753 seq_number = il->stations[sta_id].tid[tid].seq_number;
1754 seq_number &= IEEE80211_SCTL_SEQ;
1755 hdr->seq_ctrl =
1756 hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
1757 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1758 seq_number += 0x10;
1759 /* aggregation is on for this <sta,tid> */
1760 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1761 il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
1762 txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
1763 is_agg = true;
1764 }
1765 }
1766
1767 txq = &il->txq[txq_id];
1768 q = &txq->q;
1769
1770 if (unlikely(il_queue_space(q) < q->high_mark)) {
1771 spin_unlock(&il->sta_lock);
1772 goto drop_unlock;
1773 }
1774
1775 if (ieee80211_is_data_qos(fc)) {
1776 il->stations[sta_id].tid[tid].tfds_in_queue++;
1777 if (!ieee80211_has_morefrags(fc))
1778 il->stations[sta_id].tid[tid].seq_number = seq_number;
1779 }
1780
1781 spin_unlock(&il->sta_lock);
1782
1783 /* Set up driver data for this TFD */
1784 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
1785 txq->txb[q->write_ptr].skb = skb;
1786 txq->txb[q->write_ptr].ctx = ctx;
1787
1788 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1789 out_cmd = txq->cmd[q->write_ptr];
1790 out_meta = &txq->meta[q->write_ptr];
1791 tx_cmd = &out_cmd->cmd.tx;
1792 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
1793 memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
1794
1795 /*
1796 * Set up the Tx-command (not MAC!) header.
1797 * Store the chosen Tx queue and TFD idx within the sequence field;
1798 * after Tx, uCode's Tx response will return this value so driver can
1799 * locate the frame within the tx queue and do post-tx processing.
1800 */
1801 out_cmd->hdr.cmd = C_TX;
1802 out_cmd->hdr.sequence =
1803 cpu_to_le16((u16)
1804 (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
1805
1806 /* Copy MAC header from skb into command buffer */
1807 memcpy(tx_cmd->hdr, hdr, hdr_len);
1808
1809 /* Total # bytes to be transmitted */
1810 len = (u16) skb->len;
1811 tx_cmd->len = cpu_to_le16(len);
1812
1813 if (info->control.hw_key)
1814 il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
1815
1816 /* TODO need this for burst mode later on */
1817 il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
1818 il_dbg_log_tx_data_frame(il, len, hdr);
1819
1820 il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
1821
1822 il_update_stats(il, true, fc, len);
1823 /*
1824 * Use the first empty entry in this queue's command buffer array
1825 * to contain the Tx command and MAC header concatenated together
1826 * (payload data will be in another buffer).
1827 * Size of this varies, due to varying MAC header length.
1828 * If end is not dword aligned, we'll have 2 extra bytes at the end
1829 * of the MAC header (device reads on dword boundaries).
1830 * We'll tell device about this padding later.
1831 */
1832 len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
1833 firstlen = (len + 3) & ~3;
1834
1835 /* Tell NIC about any 2-byte padding after MAC header */
1836 if (firstlen != len)
1837 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1838
1839 /* Physical address of this Tx command's header (not MAC header!),
1840 * within command buffer array. */
1841 txcmd_phys =
1842 pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
1843 PCI_DMA_BIDIRECTIONAL);
1844 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1845 dma_unmap_len_set(out_meta, len, firstlen);
1846 /* Add buffer containing Tx command and MAC(!) header to TFD's
1847 * first entry */
1848 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
1849 1, 0);
1850
1851 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1852 txq->need_update = 1;
1853 } else {
1854 wait_write_ptr = 1;
1855 txq->need_update = 0;
1856 }
1857
1858 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1859 * if any (802.11 null frames have no payload). */
1860 secondlen = skb->len - hdr_len;
1861 if (secondlen > 0) {
1862 phys_addr =
1863 pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
1864 PCI_DMA_TODEVICE);
1865 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
1866 secondlen, 0, 0);
1867 }
1868
1869 scratch_phys =
1870 txcmd_phys + sizeof(struct il_cmd_header) +
1871 offsetof(struct il_tx_cmd, scratch);
1872
1873 /* take back ownership of DMA buffer to enable update */
1874 pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
1875 PCI_DMA_BIDIRECTIONAL);
1876 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1877 tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
1878
1879 D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
1880 D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1881 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
1882 il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
1883
1884 /* Set up entry for this TFD in Tx byte-count array */
1885 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1886 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
1887 le16_to_cpu(tx_cmd->
1888 len));
1889
1890 pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
1891 PCI_DMA_BIDIRECTIONAL);
1892
1893 /* Tell device the write idx *just past* this latest filled TFD */
1894 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
1895 il_txq_update_write_ptr(il, txq);
1896 spin_unlock_irqrestore(&il->lock, flags);
1897
1898 /*
1899 * At this point the frame is "transmitted" successfully
1900 * and we will get a TX status notification eventually,
1901 * regardless of the value of ret. "ret" only indicates
1902 * whether or not we should update the write pointer.
1903 */
1904
1905 /*
1906 * Avoid atomic ops if it isn't an associated client.
1907 * Also, if this is a packet for aggregation, don't
1908 * increase the counter because the ucode will stop
1909 * aggregation queues when their respective station
1910 * goes to sleep.
1911 */
1912 if (sta_priv && sta_priv->client && !is_agg)
1913 atomic_inc(&sta_priv->pending_frames);
1914
1915 if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
1916 if (wait_write_ptr) {
1917 spin_lock_irqsave(&il->lock, flags);
1918 txq->need_update = 1;
1919 il_txq_update_write_ptr(il, txq);
1920 spin_unlock_irqrestore(&il->lock, flags);
1921 } else {
1922 il_stop_queue(il, txq);
1923 }
1924 }
1925
1926 return 0;
1927
1928drop_unlock:
1929 spin_unlock_irqrestore(&il->lock, flags);
1930 return -1;
1931}
1932
1933static inline int
1934il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1935{
1936 ptr->addr =
1937 dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
1938 if (!ptr->addr)
1939 return -ENOMEM;
1940 ptr->size = size;
1941 return 0;
1942}
1943
1944static inline void
1945il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
1946{
1947 if (unlikely(!ptr->addr))
1948 return;
1949
1950 dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
1951 memset(ptr, 0, sizeof(*ptr));
1952}
1953
1954/**
1955 * il4965_hw_txq_ctx_free - Free TXQ Context
1956 *
1957 * Destroy all TX DMA queues and structures
1958 */
1959void
1960il4965_hw_txq_ctx_free(struct il_priv *il)
1961{
1962 int txq_id;
1963
1964 /* Tx queues */
1965 if (il->txq) {
1966 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
1967 if (txq_id == il->cmd_queue)
1968 il_cmd_queue_free(il);
1969 else
1970 il_tx_queue_free(il, txq_id);
1971 }
1972 il4965_free_dma_ptr(il, &il->kw);
1973
1974 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
1975
1976 /* free tx queue structure */
1977 il_txq_mem(il);
1978}
1979
1980/**
1981 * il4965_txq_ctx_alloc - allocate TX queue context
1982 * Allocate all Tx DMA structures and initialize them
1983 *
1984 * @param il
1985 * @return error code
1986 */
1987int
1988il4965_txq_ctx_alloc(struct il_priv *il)
1989{
1990 int ret;
1991 int txq_id, slots_num;
1992 unsigned long flags;
1993
1994 /* Free all tx/cmd queues and keep-warm buffer */
1995 il4965_hw_txq_ctx_free(il);
1996
1997 ret =
1998 il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
1999 il->hw_params.scd_bc_tbls_size);
2000 if (ret) {
2001 IL_ERR("Scheduler BC Table allocation failed\n");
2002 goto error_bc_tbls;
2003 }
2004 /* Alloc keep-warm buffer */
2005 ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
2006 if (ret) {
2007 IL_ERR("Keep Warm allocation failed\n");
2008 goto error_kw;
2009 }
2010
2011 /* allocate tx queue structure */
2012 ret = il_alloc_txq_mem(il);
2013 if (ret)
2014 goto error;
2015
2016 spin_lock_irqsave(&il->lock, flags);
2017
2018 /* Turn off all Tx DMA fifos */
2019 il4965_txq_set_sched(il, 0);
2020
2021 /* Tell NIC where to find the "keep warm" buffer */
2022 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2023
2024 spin_unlock_irqrestore(&il->lock, flags);
2025
2026 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
2027 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2028 slots_num =
2029 (txq_id ==
2030 il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2031 ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
2032 if (ret) {
2033 IL_ERR("Tx %d queue init failed\n", txq_id);
2034 goto error;
2035 }
2036 }
2037
2038 return ret;
2039
2040error:
2041 il4965_hw_txq_ctx_free(il);
2042 il4965_free_dma_ptr(il, &il->kw);
2043error_kw:
2044 il4965_free_dma_ptr(il, &il->scd_bc_tbls);
2045error_bc_tbls:
2046 return ret;
2047}
2048
2049void
2050il4965_txq_ctx_reset(struct il_priv *il)
2051{
2052 int txq_id, slots_num;
2053 unsigned long flags;
2054
2055 spin_lock_irqsave(&il->lock, flags);
2056
2057 /* Turn off all Tx DMA fifos */
2058 il4965_txq_set_sched(il, 0);
2059
2060 /* Tell NIC where to find the "keep warm" buffer */
2061 il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
2062
2063 spin_unlock_irqrestore(&il->lock, flags);
2064
2065 /* Alloc and init all Tx queues, including the command queue (#4) */
2066 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
2067 slots_num =
2068 txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
2069 il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
2070 }
2071}
2072
2073/**
2074 * il4965_txq_ctx_stop - Stop all Tx DMA channels
2075 */
2076void
2077il4965_txq_ctx_stop(struct il_priv *il)
2078{
2079 int ch, txq_id;
2080 unsigned long flags;
2081
2082 /* Turn off all Tx DMA fifos */
2083 spin_lock_irqsave(&il->lock, flags);
2084
2085 il4965_txq_set_sched(il, 0);
2086
2087 /* Stop each Tx DMA channel, and wait for it to be idle */
2088 for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
2089 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
2090 if (il_poll_bit
2091 (il, FH49_TSSR_TX_STATUS_REG,
2092 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
2093 IL_ERR("Failing on timeout while stopping"
2094 " DMA channel %d [0x%08x]", ch,
2095 il_rd(il, FH49_TSSR_TX_STATUS_REG));
2096 }
2097 spin_unlock_irqrestore(&il->lock, flags);
2098
2099 if (!il->txq)
2100 return;
2101
2102 /* Unmap DMA from host system and free skb's */
2103 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2104 if (txq_id == il->cmd_queue)
2105 il_cmd_queue_unmap(il);
2106 else
2107 il_tx_queue_unmap(il, txq_id);
2108}
2109
2110/*
2111 * Find first available (lowest unused) Tx Queue, mark it "active".
2112 * Called only when finding queue for aggregation.
2113 * Should never return anything < 7, because they should already
2114 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
2115 */
2116static int
2117il4965_txq_ctx_activate_free(struct il_priv *il)
2118{
2119 int txq_id;
2120
2121 for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
2122 if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
2123 return txq_id;
2124 return -1;
2125}
2126
2127/**
2128 * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
2129 */
2130static void
2131il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
2132{
2133 /* Simply stop the queue, but don't change any configuration;
2134 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
2135 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
2136 (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2137 (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2138}
2139
2140/**
2141 * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
2142 */
2143static int
2144il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
2145{
2146 u32 tbl_dw_addr;
2147 u32 tbl_dw;
2148 u16 scd_q2ratid;
2149
2150 scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
2151
2152 tbl_dw_addr =
2153 il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
2154
2155 tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
2156
2157 if (txq_id & 0x1)
2158 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
2159 else
2160 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
2161
2162 il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
2163
2164 return 0;
2165}
2166
2167/**
2168 * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
2169 *
2170 * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
2171 * i.e. it must be one of the higher queues used for aggregation
2172 */
2173static int
2174il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
2175 int tid, u16 ssn_idx)
2176{
2177 unsigned long flags;
2178 u16 ra_tid;
2179 int ret;
2180
2181 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2182 (IL49_FIRST_AMPDU_QUEUE +
2183 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2184 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2185 txq_id, IL49_FIRST_AMPDU_QUEUE,
2186 IL49_FIRST_AMPDU_QUEUE +
2187 il->cfg->base_params->num_of_ampdu_queues - 1);
2188 return -EINVAL;
2189 }
2190
2191 ra_tid = BUILD_RAxTID(sta_id, tid);
2192
2193 /* Modify device's station table to Tx this TID */
2194 ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
2195 if (ret)
2196 return ret;
2197
2198 spin_lock_irqsave(&il->lock, flags);
2199
2200 /* Stop this Tx queue before configuring it */
2201 il4965_tx_queue_stop_scheduler(il, txq_id);
2202
2203 /* Map receiver-address / traffic-ID to this queue */
2204 il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
2205
2206 /* Set this queue as a chain-building queue */
2207 il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2208
2209 /* Place first TFD at idx corresponding to start sequence number.
2210 * Assumes that ssn_idx is valid (!= 0xFFF) */
2211 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2212 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2213 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2214
2215 /* Set up Tx win size and frame limit for this queue */
2216 il_write_targ_mem(il,
2217 il->scd_base_addr +
2218 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
2219 (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
2220 & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
2221
2222 il_write_targ_mem(il,
2223 il->scd_base_addr +
2224 IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
2225 (SCD_FRAME_LIMIT <<
2226 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2227 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
2228
2229 il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2230
2231 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
2232 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
2233
2234 spin_unlock_irqrestore(&il->lock, flags);
2235
2236 return 0;
2237}
2238
2239int
2240il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2241 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
2242{
2243 int sta_id;
2244 int tx_fifo;
2245 int txq_id;
2246 int ret;
2247 unsigned long flags;
2248 struct il_tid_data *tid_data;
2249
2250 tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2251 if (unlikely(tx_fifo < 0))
2252 return tx_fifo;
2253
2254 D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
2255
2256 sta_id = il_sta_id(sta);
2257 if (sta_id == IL_INVALID_STATION) {
2258 IL_ERR("Start AGG on invalid station\n");
2259 return -ENXIO;
2260 }
2261 if (unlikely(tid >= MAX_TID_COUNT))
2262 return -EINVAL;
2263
2264 if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
2265 IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
2266 return -ENXIO;
2267 }
2268
2269 txq_id = il4965_txq_ctx_activate_free(il);
2270 if (txq_id == -1) {
2271 IL_ERR("No free aggregation queue available\n");
2272 return -ENXIO;
2273 }
2274
2275 spin_lock_irqsave(&il->sta_lock, flags);
2276 tid_data = &il->stations[sta_id].tid[tid];
2277 *ssn = SEQ_TO_SN(tid_data->seq_number);
2278 tid_data->agg.txq_id = txq_id;
2279 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2280 spin_unlock_irqrestore(&il->sta_lock, flags);
2281
2282 ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
2283 if (ret)
2284 return ret;
2285
2286 spin_lock_irqsave(&il->sta_lock, flags);
2287 tid_data = &il->stations[sta_id].tid[tid];
2288 if (tid_data->tfds_in_queue == 0) {
2289 D_HT("HW queue is empty\n");
2290 tid_data->agg.state = IL_AGG_ON;
2291 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2292 } else {
2293 D_HT("HW queue is NOT empty: %d packets in HW queue\n",
2294 tid_data->tfds_in_queue);
2295 tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
2296 }
2297 spin_unlock_irqrestore(&il->sta_lock, flags);
2298 return ret;
2299}
2300
2301/**
2302 * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
2303 * il->lock must be held by the caller
2304 */
2305static int
2306il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
2307{
2308 if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
2309 (IL49_FIRST_AMPDU_QUEUE +
2310 il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
2311 IL_WARN("queue number out of range: %d, must be %d to %d\n",
2312 txq_id, IL49_FIRST_AMPDU_QUEUE,
2313 IL49_FIRST_AMPDU_QUEUE +
2314 il->cfg->base_params->num_of_ampdu_queues - 1);
2315 return -EINVAL;
2316 }
2317
2318 il4965_tx_queue_stop_scheduler(il, txq_id);
2319
2320 il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
2321
2322 il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
2323 il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
2324 /* supposes that ssn_idx is valid (!= 0xFFF) */
2325 il4965_set_wr_ptrs(il, txq_id, ssn_idx);
2326
2327 il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
2328 il_txq_ctx_deactivate(il, txq_id);
2329 il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
2330
2331 return 0;
2332}
2333
2334int
2335il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
2336 struct ieee80211_sta *sta, u16 tid)
2337{
2338 int tx_fifo_id, txq_id, sta_id, ssn;
2339 struct il_tid_data *tid_data;
2340 int write_ptr, read_ptr;
2341 unsigned long flags;
2342
2343 tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
2344 if (unlikely(tx_fifo_id < 0))
2345 return tx_fifo_id;
2346
2347 sta_id = il_sta_id(sta);
2348
2349 if (sta_id == IL_INVALID_STATION) {
2350 IL_ERR("Invalid station for AGG tid %d\n", tid);
2351 return -ENXIO;
2352 }
2353
2354 spin_lock_irqsave(&il->sta_lock, flags);
2355
2356 tid_data = &il->stations[sta_id].tid[tid];
2357 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
2358 txq_id = tid_data->agg.txq_id;
2359
2360 switch (il->stations[sta_id].tid[tid].agg.state) {
2361 case IL_EMPTYING_HW_QUEUE_ADDBA:
2362 /*
2363 * This can happen if the peer stops aggregation
2364 * again before we've had a chance to drain the
2365 * queue we selected previously, i.e. before the
2366 * session was really started completely.
2367 */
2368 D_HT("AGG stop before setup done\n");
2369 goto turn_off;
2370 case IL_AGG_ON:
2371 break;
2372 default:
2373 IL_WARN("Stopping AGG while state not ON or starting\n");
2374 }
2375
2376 write_ptr = il->txq[txq_id].q.write_ptr;
2377 read_ptr = il->txq[txq_id].q.read_ptr;
2378
2379 /* The queue is not empty */
2380 if (write_ptr != read_ptr) {
2381 D_HT("Stopping a non empty AGG HW QUEUE\n");
2382 il->stations[sta_id].tid[tid].agg.state =
2383 IL_EMPTYING_HW_QUEUE_DELBA;
2384 spin_unlock_irqrestore(&il->sta_lock, flags);
2385 return 0;
2386 }
2387
2388 D_HT("HW queue is empty\n");
2389turn_off:
2390 il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
2391
2392 /* do not restore/save irqs */
2393 spin_unlock(&il->sta_lock);
2394 spin_lock(&il->lock);
2395
2396 /*
2397 * the only reason this call can fail is queue number out of range,
2398 * which can happen if uCode is reloaded and all the station
2399 * information are lost. if it is outside the range, there is no need
2400 * to deactivate the uCode queue, just return "success" to allow
2401 * mac80211 to clean up it own data.
2402 */
2403 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
2404 spin_unlock_irqrestore(&il->lock, flags);
2405
2406 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2407
2408 return 0;
2409}
2410
2411int
2412il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2413{
2414 struct il_queue *q = &il->txq[txq_id].q;
2415 u8 *addr = il->stations[sta_id].sta.sta.addr;
2416 struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
2417 struct il_rxon_context *ctx;
2418
2419 ctx = &il->ctx;
2420
2421 lockdep_assert_held(&il->sta_lock);
2422
2423 switch (il->stations[sta_id].tid[tid].agg.state) {
2424 case IL_EMPTYING_HW_QUEUE_DELBA:
2425 /* We are reclaiming the last packet of the */
2426 /* aggregated HW queue */
2427 if (txq_id == tid_data->agg.txq_id &&
2428 q->read_ptr == q->write_ptr) {
2429 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
2430 int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
2431 D_HT("HW queue empty: continue DELBA flow\n");
2432 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
2433 tid_data->agg.state = IL_AGG_OFF;
2434 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2435 }
2436 break;
2437 case IL_EMPTYING_HW_QUEUE_ADDBA:
2438 /* We are reclaiming the last packet of the queue */
2439 if (tid_data->tfds_in_queue == 0) {
2440 D_HT("HW queue empty: continue ADDBA flow\n");
2441 tid_data->agg.state = IL_AGG_ON;
2442 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
2443 }
2444 break;
2445 }
2446
2447 return 0;
2448}
2449
2450static void
2451il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
2452 const u8 *addr1)
2453{
2454 struct ieee80211_sta *sta;
2455 struct il_station_priv *sta_priv;
2456
2457 rcu_read_lock();
2458 sta = ieee80211_find_sta(ctx->vif, addr1);
2459 if (sta) {
2460 sta_priv = (void *)sta->drv_priv;
2461 /* avoid atomic ops if this isn't a client */
2462 if (sta_priv->client &&
2463 atomic_dec_return(&sta_priv->pending_frames) == 0)
2464 ieee80211_sta_block_awake(il->hw, sta, false);
2465 }
2466 rcu_read_unlock();
2467}
2468
2469static void
2470il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
2471{
2472 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2473
2474 if (!is_agg)
2475 il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
2476
2477 ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
2478}
2479
2480int
2481il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
2482{
2483 struct il_tx_queue *txq = &il->txq[txq_id];
2484 struct il_queue *q = &txq->q;
2485 struct il_tx_info *tx_info;
2486 int nfreed = 0;
2487 struct ieee80211_hdr *hdr;
2488
2489 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
2490 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
2491 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
2492 q->write_ptr, q->read_ptr);
2493 return 0;
2494 }
2495
2496 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
2497 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
2498
2499 tx_info = &txq->txb[txq->q.read_ptr];
2500
2501 if (WARN_ON_ONCE(tx_info->skb == NULL))
2502 continue;
2503
2504 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
2505 if (ieee80211_is_data_qos(hdr->frame_control))
2506 nfreed++;
2507
2508 il4965_tx_status(il, tx_info,
2509 txq_id >= IL4965_FIRST_AMPDU_QUEUE);
2510 tx_info->skb = NULL;
2511
2512 il->cfg->ops->lib->txq_free_tfd(il, txq);
2513 }
2514 return nfreed;
2515}
2516
2517/**
2518 * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
2519 *
2520 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
2521 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
2522 */
2523static int
2524il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
2525 struct il_compressed_ba_resp *ba_resp)
2526{
2527 int i, sh, ack;
2528 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
2529 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2530 int successes = 0;
2531 struct ieee80211_tx_info *info;
2532 u64 bitmap, sent_bitmap;
2533
2534 if (unlikely(!agg->wait_for_ba)) {
2535 if (unlikely(ba_resp->bitmap))
2536 IL_ERR("Received BA when not expected\n");
2537 return -EINVAL;
2538 }
2539
2540 /* Mark that the expected block-ack response arrived */
2541 agg->wait_for_ba = 0;
2542 D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
2543
2544 /* Calculate shift to align block-ack bits with our Tx win bits */
2545 sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
2546 if (sh < 0) /* tbw something is wrong with indices */
2547 sh += 0x100;
2548
2549 if (agg->frame_count > (64 - sh)) {
2550 D_TX_REPLY("more frames than bitmap size");
2551 return -1;
2552 }
2553
2554 /* don't use 64-bit values for now */
2555 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
2556
2557 /* check for success or failure according to the
2558 * transmitted bitmap and block-ack bitmap */
2559 sent_bitmap = bitmap & agg->bitmap;
2560
2561 /* For each frame attempted in aggregation,
2562 * update driver's record of tx frame's status. */
2563 i = 0;
2564 while (sent_bitmap) {
2565 ack = sent_bitmap & 1ULL;
2566 successes += ack;
2567 D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
2568 i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
2569 sent_bitmap >>= 1;
2570 ++i;
2571 }
2572
2573 D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
2574
2575 info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
2576 memset(&info->status, 0, sizeof(info->status));
2577 info->flags |= IEEE80211_TX_STAT_ACK;
2578 info->flags |= IEEE80211_TX_STAT_AMPDU;
2579 info->status.ampdu_ack_len = successes;
2580 info->status.ampdu_len = agg->frame_count;
2581 il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
2582
2583 return 0;
2584}
2585
2586/**
2587 * translate ucode response to mac80211 tx status control values
2588 */
2589void
2590il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2591 struct ieee80211_tx_info *info)
2592{
2593 struct ieee80211_tx_rate *r = &info->control.rates[0];
2594
2595 info->antenna_sel_tx =
2596 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2597 if (rate_n_flags & RATE_MCS_HT_MSK)
2598 r->flags |= IEEE80211_TX_RC_MCS;
2599 if (rate_n_flags & RATE_MCS_GF_MSK)
2600 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
2601 if (rate_n_flags & RATE_MCS_HT40_MSK)
2602 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
2603 if (rate_n_flags & RATE_MCS_DUP_MSK)
2604 r->flags |= IEEE80211_TX_RC_DUP_DATA;
2605 if (rate_n_flags & RATE_MCS_SGI_MSK)
2606 r->flags |= IEEE80211_TX_RC_SHORT_GI;
2607 r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
2608}
2609
2610/**
2611 * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
2612 *
2613 * Handles block-acknowledge notification from device, which reports success
2614 * of frames sent via aggregation.
2615 */
2616void
2617il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2618{
2619 struct il_rx_pkt *pkt = rxb_addr(rxb);
2620 struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
2621 struct il_tx_queue *txq = NULL;
2622 struct il_ht_agg *agg;
2623 int idx;
2624 int sta_id;
2625 int tid;
2626 unsigned long flags;
2627
2628 /* "flow" corresponds to Tx queue */
2629 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
2630
2631 /* "ssn" is start of block-ack Tx win, corresponds to idx
2632 * (in Tx queue's circular buffer) of first TFD/frame in win */
2633 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
2634
2635 if (scd_flow >= il->hw_params.max_txq_num) {
2636 IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
2637 return;
2638 }
2639
2640 txq = &il->txq[scd_flow];
2641 sta_id = ba_resp->sta_id;
2642 tid = ba_resp->tid;
2643 agg = &il->stations[sta_id].tid[tid].agg;
2644 if (unlikely(agg->txq_id != scd_flow)) {
2645 /*
2646 * FIXME: this is a uCode bug which need to be addressed,
2647 * log the information and return for now!
2648 * since it is possible happen very often and in order
2649 * not to fill the syslog, don't enable the logging by default
2650 */
2651 D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
2652 scd_flow, agg->txq_id);
2653 return;
2654 }
2655
2656 /* Find idx just before block-ack win */
2657 idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
2658
2659 spin_lock_irqsave(&il->sta_lock, flags);
2660
2661 D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
2662 agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
2663 ba_resp->sta_id);
2664 D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
2665 "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
2666 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
2667 ba_resp->scd_flow, ba_resp->scd_ssn);
2668 D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
2669 (unsigned long long)agg->bitmap);
2670
2671 /* Update driver's record of ACK vs. not for each frame in win */
2672 il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
2673
2674 /* Release all TFDs before the SSN, i.e. all TFDs in front of
2675 * block-ack win (we assume that they've been successfully
2676 * transmitted ... if not, it's too late anyway). */
2677 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
2678 /* calculate mac80211 ampdu sw queue to wake */
2679 int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
2680 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2681
2682 if (il_queue_space(&txq->q) > txq->q.low_mark &&
2683 il->mac80211_registered &&
2684 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2685 il_wake_queue(il, txq);
2686
2687 il4965_txq_check_empty(il, sta_id, tid, scd_flow);
2688 }
2689
2690 spin_unlock_irqrestore(&il->sta_lock, flags);
2691}
2692
2693#ifdef CONFIG_IWLEGACY_DEBUG
2694const char *
2695il4965_get_tx_fail_reason(u32 status)
2696{
2697#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
2698#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
2699
2700 switch (status & TX_STATUS_MSK) {
2701 case TX_STATUS_SUCCESS:
2702 return "SUCCESS";
2703 TX_STATUS_POSTPONE(DELAY);
2704 TX_STATUS_POSTPONE(FEW_BYTES);
2705 TX_STATUS_POSTPONE(QUIET_PERIOD);
2706 TX_STATUS_POSTPONE(CALC_TTAK);
2707 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
2708 TX_STATUS_FAIL(SHORT_LIMIT);
2709 TX_STATUS_FAIL(LONG_LIMIT);
2710 TX_STATUS_FAIL(FIFO_UNDERRUN);
2711 TX_STATUS_FAIL(DRAIN_FLOW);
2712 TX_STATUS_FAIL(RFKILL_FLUSH);
2713 TX_STATUS_FAIL(LIFE_EXPIRE);
2714 TX_STATUS_FAIL(DEST_PS);
2715 TX_STATUS_FAIL(HOST_ABORTED);
2716 TX_STATUS_FAIL(BT_RETRY);
2717 TX_STATUS_FAIL(STA_INVALID);
2718 TX_STATUS_FAIL(FRAG_DROPPED);
2719 TX_STATUS_FAIL(TID_DISABLE);
2720 TX_STATUS_FAIL(FIFO_FLUSHED);
2721 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
2722 TX_STATUS_FAIL(PASSIVE_NO_RX);
2723 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
2724 }
2725
2726 return "UNKNOWN";
2727
2728#undef TX_STATUS_FAIL
2729#undef TX_STATUS_POSTPONE
2730}
2731#endif /* CONFIG_IWLEGACY_DEBUG */
2732
2733static struct il_link_quality_cmd *
2734il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
2735{
2736 int i, r;
2737 struct il_link_quality_cmd *link_cmd;
2738 u32 rate_flags = 0;
2739 __le32 rate_n_flags;
2740
2741 link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
2742 if (!link_cmd) {
2743 IL_ERR("Unable to allocate memory for LQ cmd.\n");
2744 return NULL;
2745 }
2746 /* Set up the rate scaling to start at selected rate, fall back
2747 * all the way down to 1M in IEEE order, and then spin on 1M */
2748 if (il->band == IEEE80211_BAND_5GHZ)
2749 r = RATE_6M_IDX;
2750 else
2751 r = RATE_1M_IDX;
2752
2753 if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
2754 rate_flags |= RATE_MCS_CCK_MSK;
2755
2756 rate_flags |=
2757 il4965_first_antenna(il->hw_params.
2758 valid_tx_ant) << RATE_MCS_ANT_POS;
2759 rate_n_flags = il4965_hw_set_rate_n_flags(il_rates[r].plcp, rate_flags);
2760 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2761 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
2762
2763 link_cmd->general_params.single_stream_ant_msk =
2764 il4965_first_antenna(il->hw_params.valid_tx_ant);
2765
2766 link_cmd->general_params.dual_stream_ant_msk =
2767 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2768 valid_tx_ant);
2769 if (!link_cmd->general_params.dual_stream_ant_msk) {
2770 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
2771 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2772 link_cmd->general_params.dual_stream_ant_msk =
2773 il->hw_params.valid_tx_ant;
2774 }
2775
2776 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2777 link_cmd->agg_params.agg_time_limit =
2778 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2779
2780 link_cmd->sta_id = sta_id;
2781
2782 return link_cmd;
2783}
2784
2785/*
2786 * il4965_add_bssid_station - Add the special IBSS BSSID station
2787 *
2788 * Function sleeps.
2789 */
2790int
2791il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
2792 const u8 *addr, u8 *sta_id_r)
2793{
2794 int ret;
2795 u8 sta_id;
2796 struct il_link_quality_cmd *link_cmd;
2797 unsigned long flags;
2798
2799 if (sta_id_r)
2800 *sta_id_r = IL_INVALID_STATION;
2801
2802 ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
2803 if (ret) {
2804 IL_ERR("Unable to add station %pM\n", addr);
2805 return ret;
2806 }
2807
2808 if (sta_id_r)
2809 *sta_id_r = sta_id;
2810
2811 spin_lock_irqsave(&il->sta_lock, flags);
2812 il->stations[sta_id].used |= IL_STA_LOCAL;
2813 spin_unlock_irqrestore(&il->sta_lock, flags);
2814
2815 /* Set up default rate scaling table in device's station table */
2816 link_cmd = il4965_sta_alloc_lq(il, sta_id);
2817 if (!link_cmd) {
2818 IL_ERR("Unable to initialize rate scaling for station %pM.\n",
2819 addr);
2820 return -ENOMEM;
2821 }
2822
2823 ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
2824 if (ret)
2825 IL_ERR("Link quality command failed (%d)\n", ret);
2826
2827 spin_lock_irqsave(&il->sta_lock, flags);
2828 il->stations[sta_id].lq = link_cmd;
2829 spin_unlock_irqrestore(&il->sta_lock, flags);
2830
2831 return 0;
2832}
2833
2834static int
2835il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2836 bool send_if_empty)
2837{
2838 int i, not_empty = 0;
2839 u8 buff[sizeof(struct il_wep_cmd) +
2840 sizeof(struct il_wep_key) * WEP_KEYS_MAX];
2841 struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
2842 size_t cmd_size = sizeof(struct il_wep_cmd);
2843 struct il_host_cmd cmd = {
2844 .id = ctx->wep_key_cmd,
2845 .data = wep_cmd,
2846 .flags = CMD_SYNC,
2847 };
2848
2849 might_sleep();
2850
2851 memset(wep_cmd, 0,
2852 cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
2853
2854 for (i = 0; i < WEP_KEYS_MAX; i++) {
2855 wep_cmd->key[i].key_idx = i;
2856 if (ctx->wep_keys[i].key_size) {
2857 wep_cmd->key[i].key_offset = i;
2858 not_empty = 1;
2859 } else {
2860 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
2861 }
2862
2863 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
2864 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
2865 ctx->wep_keys[i].key_size);
2866 }
2867
2868 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
2869 wep_cmd->num_keys = WEP_KEYS_MAX;
2870
2871 cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
2872
2873 cmd.len = cmd_size;
2874
2875 if (not_empty || send_if_empty)
2876 return il_send_cmd(il, &cmd);
2877 else
2878 return 0;
2879}
2880
2881int
2882il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
2883{
2884 lockdep_assert_held(&il->mutex);
2885
2886 return il4965_static_wepkey_cmd(il, ctx, false);
2887}
2888
2889int
2890il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
2891 struct ieee80211_key_conf *keyconf)
2892{
2893 int ret;
2894
2895 lockdep_assert_held(&il->mutex);
2896
2897 D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
2898
2899 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
2900 if (il_is_rfkill(il)) {
2901 D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
2902 /* but keys in device are clear anyway so return success */
2903 return 0;
2904 }
2905 ret = il4965_static_wepkey_cmd(il, ctx, 1);
2906 D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
2907
2908 return ret;
2909}
2910
2911int
2912il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
2913 struct ieee80211_key_conf *keyconf)
2914{
2915 int ret;
2916
2917 lockdep_assert_held(&il->mutex);
2918
2919 if (keyconf->keylen != WEP_KEY_LEN_128 &&
2920 keyconf->keylen != WEP_KEY_LEN_64) {
2921 D_WEP("Bad WEP key length %d\n", keyconf->keylen);
2922 return -EINVAL;
2923 }
2924
2925 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2926 keyconf->hw_key_idx = HW_KEY_DEFAULT;
2927 il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
2928
2929 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
2930 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
2931 keyconf->keylen);
2932
2933 ret = il4965_static_wepkey_cmd(il, ctx, false);
2934 D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
2935 keyconf->keyidx, ret);
2936
2937 return ret;
2938}
2939
2940static int
2941il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
2942 struct ieee80211_key_conf *keyconf, u8 sta_id)
2943{
2944 unsigned long flags;
2945 __le16 key_flags = 0;
2946 struct il_addsta_cmd sta_cmd;
2947
2948 lockdep_assert_held(&il->mutex);
2949
2950 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
2951
2952 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
2953 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
2954 key_flags &= ~STA_KEY_FLG_INVALID;
2955
2956 if (keyconf->keylen == WEP_KEY_LEN_128)
2957 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
2958
2959 if (sta_id == ctx->bcast_sta_id)
2960 key_flags |= STA_KEY_MULTICAST_MSK;
2961
2962 spin_lock_irqsave(&il->sta_lock, flags);
2963
2964 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
2965 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
2966 il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
2967
2968 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
2969
2970 memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
2971 keyconf->keylen);
2972
2973 if ((il->stations[sta_id].sta.key.
2974 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
2975 il->stations[sta_id].sta.key.key_offset =
2976 il_get_free_ucode_key_idx(il);
2977 /* else, we are overriding an existing key => no need to allocated room
2978 * in uCode. */
2979
2980 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
2981 "no space for a new key");
2982
2983 il->stations[sta_id].sta.key.key_flags = key_flags;
2984 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
2985 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2986
2987 memcpy(&sta_cmd, &il->stations[sta_id].sta,
2988 sizeof(struct il_addsta_cmd));
2989 spin_unlock_irqrestore(&il->sta_lock, flags);
2990
2991 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2992}
2993
2994static int
2995il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
2996 struct il_rxon_context *ctx,
2997 struct ieee80211_key_conf *keyconf, u8 sta_id)
2998{
2999 unsigned long flags;
3000 __le16 key_flags = 0;
3001 struct il_addsta_cmd sta_cmd;
3002
3003 lockdep_assert_held(&il->mutex);
3004
3005 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
3006 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3007 key_flags &= ~STA_KEY_FLG_INVALID;
3008
3009 if (sta_id == ctx->bcast_sta_id)
3010 key_flags |= STA_KEY_MULTICAST_MSK;
3011
3012 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3013
3014 spin_lock_irqsave(&il->sta_lock, flags);
3015 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3016 il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
3017
3018 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
3019
3020 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
3021
3022 if ((il->stations[sta_id].sta.key.
3023 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3024 il->stations[sta_id].sta.key.key_offset =
3025 il_get_free_ucode_key_idx(il);
3026 /* else, we are overriding an existing key => no need to allocated room
3027 * in uCode. */
3028
3029 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3030 "no space for a new key");
3031
3032 il->stations[sta_id].sta.key.key_flags = key_flags;
3033 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3034 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3035
3036 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3037 sizeof(struct il_addsta_cmd));
3038 spin_unlock_irqrestore(&il->sta_lock, flags);
3039
3040 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3041}
3042
3043static int
3044il4965_set_tkip_dynamic_key_info(struct il_priv *il,
3045 struct il_rxon_context *ctx,
3046 struct ieee80211_key_conf *keyconf, u8 sta_id)
3047{
3048 unsigned long flags;
3049 int ret = 0;
3050 __le16 key_flags = 0;
3051
3052 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3053 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3054 key_flags &= ~STA_KEY_FLG_INVALID;
3055
3056 if (sta_id == ctx->bcast_sta_id)
3057 key_flags |= STA_KEY_MULTICAST_MSK;
3058
3059 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3060 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3061
3062 spin_lock_irqsave(&il->sta_lock, flags);
3063
3064 il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
3065 il->stations[sta_id].keyinfo.keylen = 16;
3066
3067 if ((il->stations[sta_id].sta.key.
3068 key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
3069 il->stations[sta_id].sta.key.key_offset =
3070 il_get_free_ucode_key_idx(il);
3071 /* else, we are overriding an existing key => no need to allocated room
3072 * in uCode. */
3073
3074 WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
3075 "no space for a new key");
3076
3077 il->stations[sta_id].sta.key.key_flags = key_flags;
3078
3079 /* This copy is acutally not needed: we get the key with each TX */
3080 memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
3081
3082 memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
3083
3084 spin_unlock_irqrestore(&il->sta_lock, flags);
3085
3086 return ret;
3087}
3088
3089void
3090il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
3091 struct ieee80211_key_conf *keyconf,
3092 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
3093{
3094 u8 sta_id;
3095 unsigned long flags;
3096 int i;
3097
3098 if (il_scan_cancel(il)) {
3099 /* cancel scan failed, just live w/ bad key and rely
3100 briefly on SW decryption */
3101 return;
3102 }
3103
3104 sta_id = il_sta_id_or_broadcast(il, ctx, sta);
3105 if (sta_id == IL_INVALID_STATION)
3106 return;
3107
3108 spin_lock_irqsave(&il->sta_lock, flags);
3109
3110 il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3111
3112 for (i = 0; i < 5; i++)
3113 il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3114 cpu_to_le16(phase1key[i]);
3115
3116 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3117 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3118
3119 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3120
3121 spin_unlock_irqrestore(&il->sta_lock, flags);
3122
3123}
3124
3125int
3126il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3127 struct ieee80211_key_conf *keyconf, u8 sta_id)
3128{
3129 unsigned long flags;
3130 u16 key_flags;
3131 u8 keyidx;
3132 struct il_addsta_cmd sta_cmd;
3133
3134 lockdep_assert_held(&il->mutex);
3135
3136 ctx->key_mapping_keys--;
3137
3138 spin_lock_irqsave(&il->sta_lock, flags);
3139 key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
3140 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
3141
3142 D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
3143
3144 if (keyconf->keyidx != keyidx) {
3145 /* We need to remove a key with idx different that the one
3146 * in the uCode. This means that the key we need to remove has
3147 * been replaced by another one with different idx.
3148 * Don't do anything and return ok
3149 */
3150 spin_unlock_irqrestore(&il->sta_lock, flags);
3151 return 0;
3152 }
3153
3154 if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
3155 IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
3156 key_flags);
3157 spin_unlock_irqrestore(&il->sta_lock, flags);
3158 return 0;
3159 }
3160
3161 if (!test_and_clear_bit
3162 (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
3163 IL_ERR("idx %d not used in uCode key table.\n",
3164 il->stations[sta_id].sta.key.key_offset);
3165 memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
3166 memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
3167 il->stations[sta_id].sta.key.key_flags =
3168 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
3169 il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
3170 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3171 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3172
3173 if (il_is_rfkill(il)) {
3174 D_WEP
3175 ("Not sending C_ADD_STA command because RFKILL enabled.\n");
3176 spin_unlock_irqrestore(&il->sta_lock, flags);
3177 return 0;
3178 }
3179 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3180 sizeof(struct il_addsta_cmd));
3181 spin_unlock_irqrestore(&il->sta_lock, flags);
3182
3183 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3184}
3185
3186int
3187il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
3188 struct ieee80211_key_conf *keyconf, u8 sta_id)
3189{
3190 int ret;
3191
3192 lockdep_assert_held(&il->mutex);
3193
3194 ctx->key_mapping_keys++;
3195 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
3196
3197 switch (keyconf->cipher) {
3198 case WLAN_CIPHER_SUITE_CCMP:
3199 ret =
3200 il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
3201 break;
3202 case WLAN_CIPHER_SUITE_TKIP:
3203 ret =
3204 il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
3205 break;
3206 case WLAN_CIPHER_SUITE_WEP40:
3207 case WLAN_CIPHER_SUITE_WEP104:
3208 ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
3209 break;
3210 default:
3211 IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
3212 keyconf->cipher);
3213 ret = -EINVAL;
3214 }
3215
3216 D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
3217 keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
3218
3219 return ret;
3220}
3221
3222/**
3223 * il4965_alloc_bcast_station - add broadcast station into driver's station table.
3224 *
3225 * This adds the broadcast station into the driver's station table
3226 * and marks it driver active, so that it will be restored to the
3227 * device at the next best time.
3228 */
3229int
3230il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
3231{
3232 struct il_link_quality_cmd *link_cmd;
3233 unsigned long flags;
3234 u8 sta_id;
3235
3236 spin_lock_irqsave(&il->sta_lock, flags);
3237 sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
3238 if (sta_id == IL_INVALID_STATION) {
3239 IL_ERR("Unable to prepare broadcast station\n");
3240 spin_unlock_irqrestore(&il->sta_lock, flags);
3241
3242 return -EINVAL;
3243 }
3244
3245 il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
3246 il->stations[sta_id].used |= IL_STA_BCAST;
3247 spin_unlock_irqrestore(&il->sta_lock, flags);
3248
3249 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3250 if (!link_cmd) {
3251 IL_ERR
3252 ("Unable to initialize rate scaling for bcast station.\n");
3253 return -ENOMEM;
3254 }
3255
3256 spin_lock_irqsave(&il->sta_lock, flags);
3257 il->stations[sta_id].lq = link_cmd;
3258 spin_unlock_irqrestore(&il->sta_lock, flags);
3259
3260 return 0;
3261}
3262
3263/**
3264 * il4965_update_bcast_station - update broadcast station's LQ command
3265 *
3266 * Only used by iwl4965. Placed here to have all bcast station management
3267 * code together.
3268 */
3269static int
3270il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
3271{
3272 unsigned long flags;
3273 struct il_link_quality_cmd *link_cmd;
3274 u8 sta_id = ctx->bcast_sta_id;
3275
3276 link_cmd = il4965_sta_alloc_lq(il, sta_id);
3277 if (!link_cmd) {
3278 IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
3279 return -ENOMEM;
3280 }
3281
3282 spin_lock_irqsave(&il->sta_lock, flags);
3283 if (il->stations[sta_id].lq)
3284 kfree(il->stations[sta_id].lq);
3285 else
3286 D_INFO("Bcast sta rate scaling has not been initialized.\n");
3287 il->stations[sta_id].lq = link_cmd;
3288 spin_unlock_irqrestore(&il->sta_lock, flags);
3289
3290 return 0;
3291}
3292
3293int
3294il4965_update_bcast_stations(struct il_priv *il)
3295{
3296 return il4965_update_bcast_station(il, &il->ctx);
3297}
3298
3299/**
3300 * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
3301 */
3302int
3303il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
3304{
3305 unsigned long flags;
3306 struct il_addsta_cmd sta_cmd;
3307
3308 lockdep_assert_held(&il->mutex);
3309
3310 /* Remove "disable" flag, to enable Tx for this TID */
3311 spin_lock_irqsave(&il->sta_lock, flags);
3312 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3313 il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3314 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3315 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3316 sizeof(struct il_addsta_cmd));
3317 spin_unlock_irqrestore(&il->sta_lock, flags);
3318
3319 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3320}
3321
3322int
3323il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
3324 u16 ssn)
3325{
3326 unsigned long flags;
3327 int sta_id;
3328 struct il_addsta_cmd sta_cmd;
3329
3330 lockdep_assert_held(&il->mutex);
3331
3332 sta_id = il_sta_id(sta);
3333 if (sta_id == IL_INVALID_STATION)
3334 return -ENXIO;
3335
3336 spin_lock_irqsave(&il->sta_lock, flags);
3337 il->stations[sta_id].sta.station_flags_msk = 0;
3338 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3339 il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
3340 il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3341 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3342 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3343 sizeof(struct il_addsta_cmd));
3344 spin_unlock_irqrestore(&il->sta_lock, flags);
3345
3346 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3347}
3348
3349int
3350il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
3351{
3352 unsigned long flags;
3353 int sta_id;
3354 struct il_addsta_cmd sta_cmd;
3355
3356 lockdep_assert_held(&il->mutex);
3357
3358 sta_id = il_sta_id(sta);
3359 if (sta_id == IL_INVALID_STATION) {
3360 IL_ERR("Invalid station for AGG tid %d\n", tid);
3361 return -ENXIO;
3362 }
3363
3364 spin_lock_irqsave(&il->sta_lock, flags);
3365 il->stations[sta_id].sta.station_flags_msk = 0;
3366 il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3367 il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
3368 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3369 memcpy(&sta_cmd, &il->stations[sta_id].sta,
3370 sizeof(struct il_addsta_cmd));
3371 spin_unlock_irqrestore(&il->sta_lock, flags);
3372
3373 return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
3374}
3375
3376void
3377il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
3378{
3379 unsigned long flags;
3380
3381 spin_lock_irqsave(&il->sta_lock, flags);
3382 il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
3383 il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
3384 il->stations[sta_id].sta.sta.modify_mask =
3385 STA_MODIFY_SLEEP_TX_COUNT_MSK;
3386 il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
3387 il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3388 il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
3389 spin_unlock_irqrestore(&il->sta_lock, flags);
3390
3391}
3392
3393void
3394il4965_update_chain_flags(struct il_priv *il)
3395{
3396 if (il->cfg->ops->hcmd->set_rxon_chain) {
3397 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
3398 if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
3399 il_commit_rxon(il, &il->ctx);
3400 }
3401}
3402
3403static void
3404il4965_clear_free_frames(struct il_priv *il)
3405{
3406 struct list_head *element;
3407
3408 D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
3409
3410 while (!list_empty(&il->free_frames)) {
3411 element = il->free_frames.next;
3412 list_del(element);
3413 kfree(list_entry(element, struct il_frame, list));
3414 il->frames_count--;
3415 }
3416
3417 if (il->frames_count) {
3418 IL_WARN("%d frames still in use. Did we lose one?\n",
3419 il->frames_count);
3420 il->frames_count = 0;
3421 }
3422}
3423
3424static struct il_frame *
3425il4965_get_free_frame(struct il_priv *il)
3426{
3427 struct il_frame *frame;
3428 struct list_head *element;
3429 if (list_empty(&il->free_frames)) {
3430 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
3431 if (!frame) {
3432 IL_ERR("Could not allocate frame!\n");
3433 return NULL;
3434 }
3435
3436 il->frames_count++;
3437 return frame;
3438 }
3439
3440 element = il->free_frames.next;
3441 list_del(element);
3442 return list_entry(element, struct il_frame, list);
3443}
3444
3445static void
3446il4965_free_frame(struct il_priv *il, struct il_frame *frame)
3447{
3448 memset(frame, 0, sizeof(*frame));
3449 list_add(&frame->list, &il->free_frames);
3450}
3451
3452static u32
3453il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
3454 int left)
3455{
3456 lockdep_assert_held(&il->mutex);
3457
3458 if (!il->beacon_skb)
3459 return 0;
3460
3461 if (il->beacon_skb->len > left)
3462 return 0;
3463
3464 memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
3465
3466 return il->beacon_skb->len;
3467}
3468
3469/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
3470static void
3471il4965_set_beacon_tim(struct il_priv *il,
3472 struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
3473 u32 frame_size)
3474{
3475 u16 tim_idx;
3476 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
3477
3478 /*
3479 * The idx is relative to frame start but we start looking at the
3480 * variable-length part of the beacon.
3481 */
3482 tim_idx = mgmt->u.beacon.variable - beacon;
3483
3484 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
3485 while ((tim_idx < (frame_size - 2)) &&
3486 (beacon[tim_idx] != WLAN_EID_TIM))
3487 tim_idx += beacon[tim_idx + 1] + 2;
3488
3489 /* If TIM field was found, set variables */
3490 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
3491 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
3492 tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
3493 } else
3494 IL_WARN("Unable to find TIM Element in beacon\n");
3495}
3496
3497static unsigned int
3498il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
3499{
3500 struct il_tx_beacon_cmd *tx_beacon_cmd;
3501 u32 frame_size;
3502 u32 rate_flags;
3503 u32 rate;
3504 /*
3505 * We have to set up the TX command, the TX Beacon command, and the
3506 * beacon contents.
3507 */
3508
3509 lockdep_assert_held(&il->mutex);
3510
3511 if (!il->beacon_ctx) {
3512 IL_ERR("trying to build beacon w/o beacon context!\n");
3513 return 0;
3514 }
3515
3516 /* Initialize memory */
3517 tx_beacon_cmd = &frame->u.beacon;
3518 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
3519
3520 /* Set up TX beacon contents */
3521 frame_size =
3522 il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
3523 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
3524 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
3525 return 0;
3526 if (!frame_size)
3527 return 0;
3528
3529 /* Set up TX command fields */
3530 tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
3531 tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
3532 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
3533 tx_beacon_cmd->tx.tx_flags =
3534 TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
3535 TX_CMD_FLG_STA_RATE_MSK;
3536
3537 /* Set up TX beacon command fields */
3538 il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
3539 frame_size);
3540
3541 /* Set up packet rate and flags */
3542 rate = il_get_lowest_plcp(il, il->beacon_ctx);
3543 il->mgmt_tx_ant =
3544 il4965_toggle_tx_ant(il, il->mgmt_tx_ant,
3545 il->hw_params.valid_tx_ant);
3546 rate_flags = il4965_ant_idx_to_flags(il->mgmt_tx_ant);
3547 if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
3548 rate_flags |= RATE_MCS_CCK_MSK;
3549 tx_beacon_cmd->tx.rate_n_flags =
3550 il4965_hw_set_rate_n_flags(rate, rate_flags);
3551
3552 return sizeof(*tx_beacon_cmd) + frame_size;
3553}
3554
3555int
3556il4965_send_beacon_cmd(struct il_priv *il)
3557{
3558 struct il_frame *frame;
3559 unsigned int frame_size;
3560 int rc;
3561
3562 frame = il4965_get_free_frame(il);
3563 if (!frame) {
3564 IL_ERR("Could not obtain free frame buffer for beacon "
3565 "command.\n");
3566 return -ENOMEM;
3567 }
3568
3569 frame_size = il4965_hw_get_beacon_cmd(il, frame);
3570 if (!frame_size) {
3571 IL_ERR("Error configuring the beacon command\n");
3572 il4965_free_frame(il, frame);
3573 return -EINVAL;
3574 }
3575
3576 rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
3577
3578 il4965_free_frame(il, frame);
3579
3580 return rc;
3581}
3582
3583static inline dma_addr_t
3584il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
3585{
3586 struct il_tfd_tb *tb = &tfd->tbs[idx];
3587
3588 dma_addr_t addr = get_unaligned_le32(&tb->lo);
3589 if (sizeof(dma_addr_t) > sizeof(u32))
3590 addr |=
3591 ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
3592 16;
3593
3594 return addr;
3595}
3596
3597static inline u16
3598il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
3599{
3600 struct il_tfd_tb *tb = &tfd->tbs[idx];
3601
3602 return le16_to_cpu(tb->hi_n_len) >> 4;
3603}
3604
3605static inline void
3606il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
3607{
3608 struct il_tfd_tb *tb = &tfd->tbs[idx];
3609 u16 hi_n_len = len << 4;
3610
3611 put_unaligned_le32(addr, &tb->lo);
3612 if (sizeof(dma_addr_t) > sizeof(u32))
3613 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
3614
3615 tb->hi_n_len = cpu_to_le16(hi_n_len);
3616
3617 tfd->num_tbs = idx + 1;
3618}
3619
3620static inline u8
3621il4965_tfd_get_num_tbs(struct il_tfd *tfd)
3622{
3623 return tfd->num_tbs & 0x1f;
3624}
3625
3626/**
3627 * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
3628 * @il - driver ilate data
3629 * @txq - tx queue
3630 *
3631 * Does NOT advance any TFD circular buffer read/write idxes
3632 * Does NOT free the TFD itself (which is within circular buffer)
3633 */
3634void
3635il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
3636{
3637 struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
3638 struct il_tfd *tfd;
3639 struct pci_dev *dev = il->pci_dev;
3640 int idx = txq->q.read_ptr;
3641 int i;
3642 int num_tbs;
3643
3644 tfd = &tfd_tmp[idx];
3645
3646 /* Sanity check on number of chunks */
3647 num_tbs = il4965_tfd_get_num_tbs(tfd);
3648
3649 if (num_tbs >= IL_NUM_OF_TBS) {
3650 IL_ERR("Too many chunks: %i\n", num_tbs);
3651 /* @todo issue fatal error, it is quite serious situation */
3652 return;
3653 }
3654
3655 /* Unmap tx_cmd */
3656 if (num_tbs)
3657 pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
3658 dma_unmap_len(&txq->meta[idx], len),
3659 PCI_DMA_BIDIRECTIONAL);
3660
3661 /* Unmap chunks, if any. */
3662 for (i = 1; i < num_tbs; i++)
3663 pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
3664 il4965_tfd_tb_get_len(tfd, i),
3665 PCI_DMA_TODEVICE);
3666
3667 /* free SKB */
3668 if (txq->txb) {
3669 struct sk_buff *skb;
3670
3671 skb = txq->txb[txq->q.read_ptr].skb;
3672
3673 /* can be called from irqs-disabled context */
3674 if (skb) {
3675 dev_kfree_skb_any(skb);
3676 txq->txb[txq->q.read_ptr].skb = NULL;
3677 }
3678 }
3679}
3680
3681int
3682il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
3683 dma_addr_t addr, u16 len, u8 reset, u8 pad)
3684{
3685 struct il_queue *q;
3686 struct il_tfd *tfd, *tfd_tmp;
3687 u32 num_tbs;
3688
3689 q = &txq->q;
3690 tfd_tmp = (struct il_tfd *)txq->tfds;
3691 tfd = &tfd_tmp[q->write_ptr];
3692
3693 if (reset)
3694 memset(tfd, 0, sizeof(*tfd));
3695
3696 num_tbs = il4965_tfd_get_num_tbs(tfd);
3697
3698 /* Each TFD can point to a maximum 20 Tx buffers */
3699 if (num_tbs >= IL_NUM_OF_TBS) {
3700 IL_ERR("Error can not send more than %d chunks\n",
3701 IL_NUM_OF_TBS);
3702 return -EINVAL;
3703 }
3704
3705 BUG_ON(addr & ~DMA_BIT_MASK(36));
3706 if (unlikely(addr & ~IL_TX_DMA_MASK))
3707 IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
3708
3709 il4965_tfd_set_tb(tfd, num_tbs, addr, len);
3710
3711 return 0;
3712}
3713
3714/*
3715 * Tell nic where to find circular buffer of Tx Frame Descriptors for
3716 * given Tx queue, and enable the DMA channel used for that queue.
3717 *
3718 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
3719 * channels supported in hardware.
3720 */
3721int
3722il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
3723{
3724 int txq_id = txq->q.id;
3725
3726 /* Circular buffer (TFD queue in DRAM) physical base address */
3727 il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
3728
3729 return 0;
3730}
3731
3732/******************************************************************************
3733 *
3734 * Generic RX handler implementations
3735 *
3736 ******************************************************************************/
3737static void
3738il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
3739{
3740 struct il_rx_pkt *pkt = rxb_addr(rxb);
3741 struct il_alive_resp *palive;
3742 struct delayed_work *pwork;
3743
3744 palive = &pkt->u.alive_frame;
3745
3746 D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
3747 palive->is_valid, palive->ver_type, palive->ver_subtype);
3748
3749 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3750 D_INFO("Initialization Alive received.\n");
3751 memcpy(&il->card_alive_init, &pkt->u.alive_frame,
3752 sizeof(struct il_init_alive_resp));
3753 pwork = &il->init_alive_start;
3754 } else {
3755 D_INFO("Runtime Alive received.\n");
3756 memcpy(&il->card_alive, &pkt->u.alive_frame,
3757 sizeof(struct il_alive_resp));
3758 pwork = &il->alive_start;
3759 }
3760
3761 /* We delay the ALIVE response by 5ms to
3762 * give the HW RF Kill time to activate... */
3763 if (palive->is_valid == UCODE_VALID_OK)
3764 queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
3765 else
3766 IL_WARN("uCode did not respond OK.\n");
3767}
3768
3769/**
3770 * il4965_bg_stats_periodic - Timer callback to queue stats
3771 *
3772 * This callback is provided in order to send a stats request.
3773 *
3774 * This timer function is continually reset to execute within
3775 * REG_RECALIB_PERIOD seconds since the last N_STATS
3776 * was received. We need to ensure we receive the stats in order
3777 * to update the temperature used for calibrating the TXPOWER.
3778 */
3779static void
3780il4965_bg_stats_periodic(unsigned long data)
3781{
3782 struct il_priv *il = (struct il_priv *)data;
3783
3784 if (test_bit(S_EXIT_PENDING, &il->status))
3785 return;
3786
3787 /* dont send host command if rf-kill is on */
3788 if (!il_is_ready_rf(il))
3789 return;
3790
3791 il_send_stats_request(il, CMD_ASYNC, false);
3792}
3793
3794static void
3795il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
3796{
3797 struct il_rx_pkt *pkt = rxb_addr(rxb);
3798 struct il4965_beacon_notif *beacon =
3799 (struct il4965_beacon_notif *)pkt->u.raw;
3800#ifdef CONFIG_IWLEGACY_DEBUG
3801 u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3802
3803 D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
3804 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
3805 beacon->beacon_notify_hdr.failure_frame,
3806 le32_to_cpu(beacon->ibss_mgr_status),
3807 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
3808#endif
3809
3810 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
3811}
3812
3813static void
3814il4965_perform_ct_kill_task(struct il_priv *il)
3815{
3816 unsigned long flags;
3817
3818 D_POWER("Stop all queues\n");
3819
3820 if (il->mac80211_registered)
3821 ieee80211_stop_queues(il->hw);
3822
3823 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3824 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
3825 _il_rd(il, CSR_UCODE_DRV_GP1);
3826
3827 spin_lock_irqsave(&il->reg_lock, flags);
3828 if (!_il_grab_nic_access(il))
3829 _il_release_nic_access(il);
3830 spin_unlock_irqrestore(&il->reg_lock, flags);
3831}
3832
3833/* Handle notification from uCode that card's power state is changing
3834 * due to software, hardware, or critical temperature RFKILL */
3835static void
3836il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
3837{
3838 struct il_rx_pkt *pkt = rxb_addr(rxb);
3839 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3840 unsigned long status = il->status;
3841
3842 D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
3843 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3844 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
3845 (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
3846
3847 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
3848
3849 _il_wr(il, CSR_UCODE_DRV_GP1_SET,
3850 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3851
3852 il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3853
3854 if (!(flags & RXON_CARD_DISABLED)) {
3855 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
3856 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3857 il_wr(il, HBUS_TARG_MBX_C,
3858 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
3859 }
3860 }
3861
3862 if (flags & CT_CARD_DISABLED)
3863 il4965_perform_ct_kill_task(il);
3864
3865 if (flags & HW_CARD_DISABLED)
3866 set_bit(S_RF_KILL_HW, &il->status);
3867 else
3868 clear_bit(S_RF_KILL_HW, &il->status);
3869
3870 if (!(flags & RXON_CARD_DISABLED))
3871 il_scan_cancel(il);
3872
3873 if ((test_bit(S_RF_KILL_HW, &status) !=
3874 test_bit(S_RF_KILL_HW, &il->status)))
3875 wiphy_rfkill_set_hw_state(il->hw->wiphy,
3876 test_bit(S_RF_KILL_HW, &il->status));
3877 else
3878 wake_up(&il->wait_command_queue);
3879}
3880
3881/**
3882 * il4965_setup_handlers - Initialize Rx handler callbacks
3883 *
3884 * Setup the RX handlers for each of the reply types sent from the uCode
3885 * to the host.
3886 *
3887 * This function chains into the hardware specific files for them to setup
3888 * any hardware specific handlers as well.
3889 */
3890static void
3891il4965_setup_handlers(struct il_priv *il)
3892{
3893 il->handlers[N_ALIVE] = il4965_hdl_alive;
3894 il->handlers[N_ERROR] = il_hdl_error;
3895 il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
3896 il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
3897 il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
3898 il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
3899 il->handlers[N_BEACON] = il4965_hdl_beacon;
3900
3901 /*
3902 * The same handler is used for both the REPLY to a discrete
3903 * stats request from the host as well as for the periodic
3904 * stats notifications (after received beacons) from the uCode.
3905 */
3906 il->handlers[C_STATS] = il4965_hdl_c_stats;
3907 il->handlers[N_STATS] = il4965_hdl_stats;
3908
3909 il_setup_rx_scan_handlers(il);
3910
3911 /* status change handler */
3912 il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
3913
3914 il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
3915 /* Rx handlers */
3916 il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
3917 il->handlers[N_RX_MPDU] = il4965_hdl_rx;
3918 /* block ack */
3919 il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
3920 /* Set up hardware specific Rx handlers */
3921 il->cfg->ops->lib->handler_setup(il);
3922}
3923
3924/**
3925 * il4965_rx_handle - Main entry function for receiving responses from uCode
3926 *
3927 * Uses the il->handlers callback function array to invoke
3928 * the appropriate handlers, including command responses,
3929 * frame-received notifications, and other notifications.
3930 */
3931void
3932il4965_rx_handle(struct il_priv *il)
3933{
3934 struct il_rx_buf *rxb;
3935 struct il_rx_pkt *pkt;
3936 struct il_rx_queue *rxq = &il->rxq;
3937 u32 r, i;
3938 int reclaim;
3939 unsigned long flags;
3940 u8 fill_rx = 0;
3941 u32 count = 8;
3942 int total_empty;
3943
3944 /* uCode's read idx (stored in shared DRAM) indicates the last Rx
3945 * buffer that the driver may process (last buffer filled by ucode). */
3946 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
3947 i = rxq->read;
3948
3949 /* Rx interrupt, but nothing sent from uCode */
3950 if (i == r)
3951 D_RX("r = %d, i = %d\n", r, i);
3952
3953 /* calculate total frames need to be restock after handling RX */
3954 total_empty = r - rxq->write_actual;
3955 if (total_empty < 0)
3956 total_empty += RX_QUEUE_SIZE;
3957
3958 if (total_empty > (RX_QUEUE_SIZE / 2))
3959 fill_rx = 1;
3960
3961 while (i != r) {
3962 int len;
3963
3964 rxb = rxq->queue[i];
3965
3966 /* If an RXB doesn't have a Rx queue slot associated with it,
3967 * then a bug has been introduced in the queue refilling
3968 * routines -- catch it here */
3969 BUG_ON(rxb == NULL);
3970
3971 rxq->queue[i] = NULL;
3972
3973 pci_unmap_page(il->pci_dev, rxb->page_dma,
3974 PAGE_SIZE << il->hw_params.rx_page_order,
3975 PCI_DMA_FROMDEVICE);
3976 pkt = rxb_addr(rxb);
3977
3978 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
3979 len += sizeof(u32); /* account for status word */
3980
3981 /* Reclaim a command buffer only if this packet is a response
3982 * to a (driver-originated) command.
3983 * If the packet (e.g. Rx frame) originated from uCode,
3984 * there is no command buffer to reclaim.
3985 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
3986 * but apparently a few don't get set; catch them here. */
3987 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
3988 (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
3989 (pkt->hdr.cmd != N_RX_MPDU) &&
3990 (pkt->hdr.cmd != N_COMPRESSED_BA) &&
3991 (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
3992
3993 /* Based on type of command response or notification,
3994 * handle those that need handling via function in
3995 * handlers table. See il4965_setup_handlers() */
3996 if (il->handlers[pkt->hdr.cmd]) {
3997 D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
3998 il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
3999 il->isr_stats.handlers[pkt->hdr.cmd]++;
4000 il->handlers[pkt->hdr.cmd] (il, rxb);
4001 } else {
4002 /* No handling needed */
4003 D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
4004 i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4005 }
4006
4007 /*
4008 * XXX: After here, we should always check rxb->page
4009 * against NULL before touching it or its virtual
4010 * memory (pkt). Because some handler might have
4011 * already taken or freed the pages.
4012 */
4013
4014 if (reclaim) {
4015 /* Invoke any callbacks, transfer the buffer to caller,
4016 * and fire off the (possibly) blocking il_send_cmd()
4017 * as we reclaim the driver command queue */
4018 if (rxb->page)
4019 il_tx_cmd_complete(il, rxb);
4020 else
4021 IL_WARN("Claim null rxb?\n");
4022 }
4023
4024 /* Reuse the page if possible. For notification packets and
4025 * SKBs that fail to Rx correctly, add them back into the
4026 * rx_free list for reuse later. */
4027 spin_lock_irqsave(&rxq->lock, flags);
4028 if (rxb->page != NULL) {
4029 rxb->page_dma =
4030 pci_map_page(il->pci_dev, rxb->page, 0,
4031 PAGE_SIZE << il->hw_params.
4032 rx_page_order, PCI_DMA_FROMDEVICE);
4033 list_add_tail(&rxb->list, &rxq->rx_free);
4034 rxq->free_count++;
4035 } else
4036 list_add_tail(&rxb->list, &rxq->rx_used);
4037
4038 spin_unlock_irqrestore(&rxq->lock, flags);
4039
4040 i = (i + 1) & RX_QUEUE_MASK;
4041 /* If there are a lot of unused frames,
4042 * restock the Rx queue so ucode wont assert. */
4043 if (fill_rx) {
4044 count++;
4045 if (count >= 8) {
4046 rxq->read = i;
4047 il4965_rx_replenish_now(il);
4048 count = 0;
4049 }
4050 }
4051 }
4052
4053 /* Backtrack one entry */
4054 rxq->read = i;
4055 if (fill_rx)
4056 il4965_rx_replenish_now(il);
4057 else
4058 il4965_rx_queue_restock(il);
4059}
4060
4061/* call this function to flush any scheduled tasklet */
4062static inline void
4063il4965_synchronize_irq(struct il_priv *il)
4064{
4065 /* wait to make sure we flush pending tasklet */
4066 synchronize_irq(il->pci_dev->irq);
4067 tasklet_kill(&il->irq_tasklet);
4068}
4069
4070static void
4071il4965_irq_tasklet(struct il_priv *il)
4072{
4073 u32 inta, handled = 0;
4074 u32 inta_fh;
4075 unsigned long flags;
4076 u32 i;
4077#ifdef CONFIG_IWLEGACY_DEBUG
4078 u32 inta_mask;
4079#endif
4080
4081 spin_lock_irqsave(&il->lock, flags);
4082
4083 /* Ack/clear/reset pending uCode interrupts.
4084 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4085 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
4086 inta = _il_rd(il, CSR_INT);
4087 _il_wr(il, CSR_INT, inta);
4088
4089 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4090 * Any new interrupts that happen after this, either while we're
4091 * in this tasklet, or later, will show up in next ISR/tasklet. */
4092 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4093 _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
4094
4095#ifdef CONFIG_IWLEGACY_DEBUG
4096 if (il_get_debug_level(il) & IL_DL_ISR) {
4097 /* just for debug */
4098 inta_mask = _il_rd(il, CSR_INT_MASK);
4099 D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
4100 inta_mask, inta_fh);
4101 }
4102#endif
4103
4104 spin_unlock_irqrestore(&il->lock, flags);
4105
4106 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4107 * atomic, make sure that inta covers all the interrupts that
4108 * we've discovered, even if FH interrupt came in just after
4109 * reading CSR_INT. */
4110 if (inta_fh & CSR49_FH_INT_RX_MASK)
4111 inta |= CSR_INT_BIT_FH_RX;
4112 if (inta_fh & CSR49_FH_INT_TX_MASK)
4113 inta |= CSR_INT_BIT_FH_TX;
4114
4115 /* Now service all interrupt bits discovered above. */
4116 if (inta & CSR_INT_BIT_HW_ERR) {
4117 IL_ERR("Hardware error detected. Restarting.\n");
4118
4119 /* Tell the device to stop sending interrupts */
4120 il_disable_interrupts(il);
4121
4122 il->isr_stats.hw++;
4123 il_irq_handle_error(il);
4124
4125 handled |= CSR_INT_BIT_HW_ERR;
4126
4127 return;
4128 }
4129#ifdef CONFIG_IWLEGACY_DEBUG
4130 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4131 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4132 if (inta & CSR_INT_BIT_SCD) {
4133 D_ISR("Scheduler finished to transmit "
4134 "the frame/frames.\n");
4135 il->isr_stats.sch++;
4136 }
4137
4138 /* Alive notification via Rx interrupt will do the real work */
4139 if (inta & CSR_INT_BIT_ALIVE) {
4140 D_ISR("Alive interrupt\n");
4141 il->isr_stats.alive++;
4142 }
4143 }
4144#endif
4145 /* Safely ignore these bits for debug checks below */
4146 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4147
4148 /* HW RF KILL switch toggled */
4149 if (inta & CSR_INT_BIT_RF_KILL) {
4150 int hw_rf_kill = 0;
4151 if (!
4152 (_il_rd(il, CSR_GP_CNTRL) &
4153 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4154 hw_rf_kill = 1;
4155
4156 IL_WARN("RF_KILL bit toggled to %s.\n",
4157 hw_rf_kill ? "disable radio" : "enable radio");
4158
4159 il->isr_stats.rfkill++;
4160
4161 /* driver only loads ucode once setting the interface up.
4162 * the driver allows loading the ucode even if the radio
4163 * is killed. Hence update the killswitch state here. The
4164 * rfkill handler will care about restarting if needed.
4165 */
4166 if (!test_bit(S_ALIVE, &il->status)) {
4167 if (hw_rf_kill)
4168 set_bit(S_RF_KILL_HW, &il->status);
4169 else
4170 clear_bit(S_RF_KILL_HW, &il->status);
4171 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4172 }
4173
4174 handled |= CSR_INT_BIT_RF_KILL;
4175 }
4176
4177 /* Chip got too hot and stopped itself */
4178 if (inta & CSR_INT_BIT_CT_KILL) {
4179 IL_ERR("Microcode CT kill error detected.\n");
4180 il->isr_stats.ctkill++;
4181 handled |= CSR_INT_BIT_CT_KILL;
4182 }
4183
4184 /* Error detected by uCode */
4185 if (inta & CSR_INT_BIT_SW_ERR) {
4186 IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
4187 inta);
4188 il->isr_stats.sw++;
4189 il_irq_handle_error(il);
4190 handled |= CSR_INT_BIT_SW_ERR;
4191 }
4192
4193 /*
4194 * uCode wakes up after power-down sleep.
4195 * Tell device about any new tx or host commands enqueued,
4196 * and about any Rx buffers made available while asleep.
4197 */
4198 if (inta & CSR_INT_BIT_WAKEUP) {
4199 D_ISR("Wakeup interrupt\n");
4200 il_rx_queue_update_write_ptr(il, &il->rxq);
4201 for (i = 0; i < il->hw_params.max_txq_num; i++)
4202 il_txq_update_write_ptr(il, &il->txq[i]);
4203 il->isr_stats.wakeup++;
4204 handled |= CSR_INT_BIT_WAKEUP;
4205 }
4206
4207 /* All uCode command responses, including Tx command responses,
4208 * Rx "responses" (frame-received notification), and other
4209 * notifications from uCode come through here*/
4210 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4211 il4965_rx_handle(il);
4212 il->isr_stats.rx++;
4213 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4214 }
4215
4216 /* This "Tx" DMA channel is used only for loading uCode */
4217 if (inta & CSR_INT_BIT_FH_TX) {
4218 D_ISR("uCode load interrupt\n");
4219 il->isr_stats.tx++;
4220 handled |= CSR_INT_BIT_FH_TX;
4221 /* Wake up uCode load routine, now that load is complete */
4222 il->ucode_write_complete = 1;
4223 wake_up(&il->wait_command_queue);
4224 }
4225
4226 if (inta & ~handled) {
4227 IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4228 il->isr_stats.unhandled++;
4229 }
4230
4231 if (inta & ~(il->inta_mask)) {
4232 IL_WARN("Disabled INTA bits 0x%08x were pending\n",
4233 inta & ~il->inta_mask);
4234 IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
4235 }
4236
4237 /* Re-enable all interrupts */
4238 /* only Re-enable if disabled by irq */
4239 if (test_bit(S_INT_ENABLED, &il->status))
4240 il_enable_interrupts(il);
4241 /* Re-enable RF_KILL if it occurred */
4242 else if (handled & CSR_INT_BIT_RF_KILL)
4243 il_enable_rfkill_int(il);
4244
4245#ifdef CONFIG_IWLEGACY_DEBUG
4246 if (il_get_debug_level(il) & (IL_DL_ISR)) {
4247 inta = _il_rd(il, CSR_INT);
4248 inta_mask = _il_rd(il, CSR_INT_MASK);
4249 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
4250 D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4251 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4252 }
4253#endif
4254}
4255
4256/*****************************************************************************
4257 *
4258 * sysfs attributes
4259 *
4260 *****************************************************************************/
4261
4262#ifdef CONFIG_IWLEGACY_DEBUG
4263
4264/*
4265 * The following adds a new attribute to the sysfs representation
4266 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
4267 * used for controlling the debug level.
4268 *
4269 * See the level definitions in iwl for details.
4270 *
4271 * The debug_level being managed using sysfs below is a per device debug
4272 * level that is used instead of the global debug level if it (the per
4273 * device debug level) is set.
4274 */
4275static ssize_t
4276il4965_show_debug_level(struct device *d, struct device_attribute *attr,
4277 char *buf)
4278{
4279 struct il_priv *il = dev_get_drvdata(d);
4280 return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
4281}
4282
4283static ssize_t
4284il4965_store_debug_level(struct device *d, struct device_attribute *attr,
4285 const char *buf, size_t count)
4286{
4287 struct il_priv *il = dev_get_drvdata(d);
4288 unsigned long val;
4289 int ret;
4290
4291 ret = strict_strtoul(buf, 0, &val);
4292 if (ret)
4293 IL_ERR("%s is not in hex or decimal form.\n", buf);
4294 else {
4295 il->debug_level = val;
4296 if (il_alloc_traffic_mem(il))
4297 IL_ERR("Not enough memory to generate traffic log\n");
4298 }
4299 return strnlen(buf, count);
4300}
4301
4302static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
4303 il4965_store_debug_level);
4304
4305#endif /* CONFIG_IWLEGACY_DEBUG */
4306
4307static ssize_t
4308il4965_show_temperature(struct device *d, struct device_attribute *attr,
4309 char *buf)
4310{
4311 struct il_priv *il = dev_get_drvdata(d);
4312
4313 if (!il_is_alive(il))
4314 return -EAGAIN;
4315
4316 return sprintf(buf, "%d\n", il->temperature);
4317}
4318
4319static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
4320
4321static ssize_t
4322il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
4323{
4324 struct il_priv *il = dev_get_drvdata(d);
4325
4326 if (!il_is_ready_rf(il))
4327 return sprintf(buf, "off\n");
4328 else
4329 return sprintf(buf, "%d\n", il->tx_power_user_lmt);
4330}
4331
4332static ssize_t
4333il4965_store_tx_power(struct device *d, struct device_attribute *attr,
4334 const char *buf, size_t count)
4335{
4336 struct il_priv *il = dev_get_drvdata(d);
4337 unsigned long val;
4338 int ret;
4339
4340 ret = strict_strtoul(buf, 10, &val);
4341 if (ret)
4342 IL_INFO("%s is not in decimal form.\n", buf);
4343 else {
4344 ret = il_set_tx_power(il, val, false);
4345 if (ret)
4346 IL_ERR("failed setting tx power (0x%d).\n", ret);
4347 else
4348 ret = count;
4349 }
4350 return ret;
4351}
4352
4353static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
4354 il4965_store_tx_power);
4355
4356static struct attribute *il_sysfs_entries[] = {
4357 &dev_attr_temperature.attr,
4358 &dev_attr_tx_power.attr,
4359#ifdef CONFIG_IWLEGACY_DEBUG
4360 &dev_attr_debug_level.attr,
4361#endif
4362 NULL
4363};
4364
4365static struct attribute_group il_attribute_group = {
4366 .name = NULL, /* put in device directory */
4367 .attrs = il_sysfs_entries,
4368};
4369
4370/******************************************************************************
4371 *
4372 * uCode download functions
4373 *
4374 ******************************************************************************/
4375
4376static void
4377il4965_dealloc_ucode_pci(struct il_priv *il)
4378{
4379 il_free_fw_desc(il->pci_dev, &il->ucode_code);
4380 il_free_fw_desc(il->pci_dev, &il->ucode_data);
4381 il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
4382 il_free_fw_desc(il->pci_dev, &il->ucode_init);
4383 il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
4384 il_free_fw_desc(il->pci_dev, &il->ucode_boot);
4385}
4386
4387static void
4388il4965_nic_start(struct il_priv *il)
4389{
4390 /* Remove all resets to allow NIC to operate */
4391 _il_wr(il, CSR_RESET, 0);
4392}
4393
4394static void il4965_ucode_callback(const struct firmware *ucode_raw,
4395 void *context);
4396static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
4397
4398static int __must_check
4399il4965_request_firmware(struct il_priv *il, bool first)
4400{
4401 const char *name_pre = il->cfg->fw_name_pre;
4402 char tag[8];
4403
4404 if (first) {
4405 il->fw_idx = il->cfg->ucode_api_max;
4406 sprintf(tag, "%d", il->fw_idx);
4407 } else {
4408 il->fw_idx--;
4409 sprintf(tag, "%d", il->fw_idx);
4410 }
4411
4412 if (il->fw_idx < il->cfg->ucode_api_min) {
4413 IL_ERR("no suitable firmware found!\n");
4414 return -ENOENT;
4415 }
4416
4417 sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
4418
4419 D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
4420
4421 return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
4422 &il->pci_dev->dev, GFP_KERNEL, il,
4423 il4965_ucode_callback);
4424}
4425
4426struct il4965_firmware_pieces {
4427 const void *inst, *data, *init, *init_data, *boot;
4428 size_t inst_size, data_size, init_size, init_data_size, boot_size;
4429};
4430
4431static int
4432il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
4433 struct il4965_firmware_pieces *pieces)
4434{
4435 struct il_ucode_header *ucode = (void *)ucode_raw->data;
4436 u32 api_ver, hdr_size;
4437 const u8 *src;
4438
4439 il->ucode_ver = le32_to_cpu(ucode->ver);
4440 api_ver = IL_UCODE_API(il->ucode_ver);
4441
4442 switch (api_ver) {
4443 default:
4444 case 0:
4445 case 1:
4446 case 2:
4447 hdr_size = 24;
4448 if (ucode_raw->size < hdr_size) {
4449 IL_ERR("File size too small!\n");
4450 return -EINVAL;
4451 }
4452 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
4453 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
4454 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
4455 pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
4456 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
4457 src = ucode->v1.data;
4458 break;
4459 }
4460
4461 /* Verify size of file vs. image size info in file's header */
4462 if (ucode_raw->size !=
4463 hdr_size + pieces->inst_size + pieces->data_size +
4464 pieces->init_size + pieces->init_data_size + pieces->boot_size) {
4465
4466 IL_ERR("uCode file size %d does not match expected size\n",
4467 (int)ucode_raw->size);
4468 return -EINVAL;
4469 }
4470
4471 pieces->inst = src;
4472 src += pieces->inst_size;
4473 pieces->data = src;
4474 src += pieces->data_size;
4475 pieces->init = src;
4476 src += pieces->init_size;
4477 pieces->init_data = src;
4478 src += pieces->init_data_size;
4479 pieces->boot = src;
4480 src += pieces->boot_size;
4481
4482 return 0;
4483}
4484
4485/**
4486 * il4965_ucode_callback - callback when firmware was loaded
4487 *
4488 * If loaded successfully, copies the firmware into buffers
4489 * for the card to fetch (via DMA).
4490 */
4491static void
4492il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
4493{
4494 struct il_priv *il = context;
4495 struct il_ucode_header *ucode;
4496 int err;
4497 struct il4965_firmware_pieces pieces;
4498 const unsigned int api_max = il->cfg->ucode_api_max;
4499 const unsigned int api_min = il->cfg->ucode_api_min;
4500 u32 api_ver;
4501
4502 u32 max_probe_length = 200;
4503 u32 standard_phy_calibration_size =
4504 IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
4505
4506 memset(&pieces, 0, sizeof(pieces));
4507
4508 if (!ucode_raw) {
4509 if (il->fw_idx <= il->cfg->ucode_api_max)
4510 IL_ERR("request for firmware file '%s' failed.\n",
4511 il->firmware_name);
4512 goto try_again;
4513 }
4514
4515 D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
4516 ucode_raw->size);
4517
4518 /* Make sure that we got at least the API version number */
4519 if (ucode_raw->size < 4) {
4520 IL_ERR("File size way too small!\n");
4521 goto try_again;
4522 }
4523
4524 /* Data from ucode file: header followed by uCode images */
4525 ucode = (struct il_ucode_header *)ucode_raw->data;
4526
4527 err = il4965_load_firmware(il, ucode_raw, &pieces);
4528
4529 if (err)
4530 goto try_again;
4531
4532 api_ver = IL_UCODE_API(il->ucode_ver);
4533
4534 /*
4535 * api_ver should match the api version forming part of the
4536 * firmware filename ... but we don't check for that and only rely
4537 * on the API version read from firmware header from here on forward
4538 */
4539 if (api_ver < api_min || api_ver > api_max) {
4540 IL_ERR("Driver unable to support your firmware API. "
4541 "Driver supports v%u, firmware is v%u.\n", api_max,
4542 api_ver);
4543 goto try_again;
4544 }
4545
4546 if (api_ver != api_max)
4547 IL_ERR("Firmware has old API version. Expected v%u, "
4548 "got v%u. New firmware can be obtained "
4549 "from http://www.intellinuxwireless.org.\n", api_max,
4550 api_ver);
4551
4552 IL_INFO("loaded firmware version %u.%u.%u.%u\n",
4553 IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
4554 IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
4555
4556 snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
4557 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
4558 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
4559 IL_UCODE_SERIAL(il->ucode_ver));
4560
4561 /*
4562 * For any of the failures below (before allocating pci memory)
4563 * we will try to load a version with a smaller API -- maybe the
4564 * user just got a corrupted version of the latest API.
4565 */
4566
4567 D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
4568 D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
4569 D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
4570 D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
4571 D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
4572 D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
4573
4574 /* Verify that uCode images will fit in card's SRAM */
4575 if (pieces.inst_size > il->hw_params.max_inst_size) {
4576 IL_ERR("uCode instr len %Zd too large to fit in\n",
4577 pieces.inst_size);
4578 goto try_again;
4579 }
4580
4581 if (pieces.data_size > il->hw_params.max_data_size) {
4582 IL_ERR("uCode data len %Zd too large to fit in\n",
4583 pieces.data_size);
4584 goto try_again;
4585 }
4586
4587 if (pieces.init_size > il->hw_params.max_inst_size) {
4588 IL_ERR("uCode init instr len %Zd too large to fit in\n",
4589 pieces.init_size);
4590 goto try_again;
4591 }
4592
4593 if (pieces.init_data_size > il->hw_params.max_data_size) {
4594 IL_ERR("uCode init data len %Zd too large to fit in\n",
4595 pieces.init_data_size);
4596 goto try_again;
4597 }
4598
4599 if (pieces.boot_size > il->hw_params.max_bsm_size) {
4600 IL_ERR("uCode boot instr len %Zd too large to fit in\n",
4601 pieces.boot_size);
4602 goto try_again;
4603 }
4604
4605 /* Allocate ucode buffers for card's bus-master loading ... */
4606
4607 /* Runtime instructions and 2 copies of data:
4608 * 1) unmodified from disk
4609 * 2) backup cache for save/restore during power-downs */
4610 il->ucode_code.len = pieces.inst_size;
4611 il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
4612
4613 il->ucode_data.len = pieces.data_size;
4614 il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
4615
4616 il->ucode_data_backup.len = pieces.data_size;
4617 il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
4618
4619 if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
4620 !il->ucode_data_backup.v_addr)
4621 goto err_pci_alloc;
4622
4623 /* Initialization instructions and data */
4624 if (pieces.init_size && pieces.init_data_size) {
4625 il->ucode_init.len = pieces.init_size;
4626 il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
4627
4628 il->ucode_init_data.len = pieces.init_data_size;
4629 il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
4630
4631 if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
4632 goto err_pci_alloc;
4633 }
4634
4635 /* Bootstrap (instructions only, no data) */
4636 if (pieces.boot_size) {
4637 il->ucode_boot.len = pieces.boot_size;
4638 il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
4639
4640 if (!il->ucode_boot.v_addr)
4641 goto err_pci_alloc;
4642 }
4643
4644 /* Now that we can no longer fail, copy information */
4645
4646 il->sta_key_max_num = STA_KEY_MAX_NUM;
4647
4648 /* Copy images into buffers for card's bus-master reads ... */
4649
4650 /* Runtime instructions (first block of data in file) */
4651 D_INFO("Copying (but not loading) uCode instr len %Zd\n",
4652 pieces.inst_size);
4653 memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
4654
4655 D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
4656 il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
4657
4658 /*
4659 * Runtime data
4660 * NOTE: Copy into backup buffer will be done in il_up()
4661 */
4662 D_INFO("Copying (but not loading) uCode data len %Zd\n",
4663 pieces.data_size);
4664 memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
4665 memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
4666
4667 /* Initialization instructions */
4668 if (pieces.init_size) {
4669 D_INFO("Copying (but not loading) init instr len %Zd\n",
4670 pieces.init_size);
4671 memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
4672 }
4673
4674 /* Initialization data */
4675 if (pieces.init_data_size) {
4676 D_INFO("Copying (but not loading) init data len %Zd\n",
4677 pieces.init_data_size);
4678 memcpy(il->ucode_init_data.v_addr, pieces.init_data,
4679 pieces.init_data_size);
4680 }
4681
4682 /* Bootstrap instructions */
4683 D_INFO("Copying (but not loading) boot instr len %Zd\n",
4684 pieces.boot_size);
4685 memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
4686
4687 /*
4688 * figure out the offset of chain noise reset and gain commands
4689 * base on the size of standard phy calibration commands table size
4690 */
4691 il->_4965.phy_calib_chain_noise_reset_cmd =
4692 standard_phy_calibration_size;
4693 il->_4965.phy_calib_chain_noise_gain_cmd =
4694 standard_phy_calibration_size + 1;
4695
4696 /**************************************************
4697 * This is still part of probe() in a sense...
4698 *
4699 * 9. Setup and register with mac80211 and debugfs
4700 **************************************************/
4701 err = il4965_mac_setup_register(il, max_probe_length);
4702 if (err)
4703 goto out_unbind;
4704
4705 err = il_dbgfs_register(il, DRV_NAME);
4706 if (err)
4707 IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
4708 err);
4709
4710 err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
4711 if (err) {
4712 IL_ERR("failed to create sysfs device attributes\n");
4713 goto out_unbind;
4714 }
4715
4716 /* We have our copies now, allow OS release its copies */
4717 release_firmware(ucode_raw);
4718 complete(&il->_4965.firmware_loading_complete);
4719 return;
4720
4721try_again:
4722 /* try next, if any */
4723 if (il4965_request_firmware(il, false))
4724 goto out_unbind;
4725 release_firmware(ucode_raw);
4726 return;
4727
4728err_pci_alloc:
4729 IL_ERR("failed to allocate pci memory\n");
4730 il4965_dealloc_ucode_pci(il);
4731out_unbind:
4732 complete(&il->_4965.firmware_loading_complete);
4733 device_release_driver(&il->pci_dev->dev);
4734 release_firmware(ucode_raw);
4735}
4736
4737static const char *const desc_lookup_text[] = {
4738 "OK",
4739 "FAIL",
4740 "BAD_PARAM",
4741 "BAD_CHECKSUM",
4742 "NMI_INTERRUPT_WDG",
4743 "SYSASSERT",
4744 "FATAL_ERROR",
4745 "BAD_COMMAND",
4746 "HW_ERROR_TUNE_LOCK",
4747 "HW_ERROR_TEMPERATURE",
4748 "ILLEGAL_CHAN_FREQ",
4749 "VCC_NOT_STBL",
4750 "FH49_ERROR",
4751 "NMI_INTERRUPT_HOST",
4752 "NMI_INTERRUPT_ACTION_PT",
4753 "NMI_INTERRUPT_UNKNOWN",
4754 "UCODE_VERSION_MISMATCH",
4755 "HW_ERROR_ABS_LOCK",
4756 "HW_ERROR_CAL_LOCK_FAIL",
4757 "NMI_INTERRUPT_INST_ACTION_PT",
4758 "NMI_INTERRUPT_DATA_ACTION_PT",
4759 "NMI_TRM_HW_ER",
4760 "NMI_INTERRUPT_TRM",
4761 "NMI_INTERRUPT_BREAK_POINT",
4762 "DEBUG_0",
4763 "DEBUG_1",
4764 "DEBUG_2",
4765 "DEBUG_3",
4766};
4767
4768static struct {
4769 char *name;
4770 u8 num;
4771} advanced_lookup[] = {
4772 {
4773 "NMI_INTERRUPT_WDG", 0x34}, {
4774 "SYSASSERT", 0x35}, {
4775 "UCODE_VERSION_MISMATCH", 0x37}, {
4776 "BAD_COMMAND", 0x38}, {
4777 "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
4778 "FATAL_ERROR", 0x3D}, {
4779 "NMI_TRM_HW_ERR", 0x46}, {
4780 "NMI_INTERRUPT_TRM", 0x4C}, {
4781 "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
4782 "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
4783 "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
4784 "NMI_INTERRUPT_HOST", 0x66}, {
4785 "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
4786 "NMI_INTERRUPT_UNKNOWN", 0x84}, {
4787 "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
4788"ADVANCED_SYSASSERT", 0},};
4789
4790static const char *
4791il4965_desc_lookup(u32 num)
4792{
4793 int i;
4794 int max = ARRAY_SIZE(desc_lookup_text);
4795
4796 if (num < max)
4797 return desc_lookup_text[num];
4798
4799 max = ARRAY_SIZE(advanced_lookup) - 1;
4800 for (i = 0; i < max; i++) {
4801 if (advanced_lookup[i].num == num)
4802 break;
4803 }
4804 return advanced_lookup[i].name;
4805}
4806
4807#define ERROR_START_OFFSET (1 * sizeof(u32))
4808#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4809
4810void
4811il4965_dump_nic_error_log(struct il_priv *il)
4812{
4813 u32 data2, line;
4814 u32 desc, time, count, base, data1;
4815 u32 blink1, blink2, ilink1, ilink2;
4816 u32 pc, hcmd;
4817
4818 if (il->ucode_type == UCODE_INIT)
4819 base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
4820 else
4821 base = le32_to_cpu(il->card_alive.error_event_table_ptr);
4822
4823 if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4824 IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
4825 base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
4826 return;
4827 }
4828
4829 count = il_read_targ_mem(il, base);
4830
4831 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4832 IL_ERR("Start IWL Error Log Dump:\n");
4833 IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
4834 }
4835
4836 desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
4837 il->isr_stats.err_code = desc;
4838 pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
4839 blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
4840 blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
4841 ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
4842 ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
4843 data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
4844 data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
4845 line = il_read_targ_mem(il, base + 9 * sizeof(u32));
4846 time = il_read_targ_mem(il, base + 11 * sizeof(u32));
4847 hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
4848
4849 IL_ERR("Desc Time "
4850 "data1 data2 line\n");
4851 IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
4852 il4965_desc_lookup(desc), desc, time, data1, data2, line);
4853 IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
4854 IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
4855 blink2, ilink1, ilink2, hcmd);
4856}
4857
4858static void
4859il4965_rf_kill_ct_config(struct il_priv *il)
4860{
4861 struct il_ct_kill_config cmd;
4862 unsigned long flags;
4863 int ret = 0;
4864
4865 spin_lock_irqsave(&il->lock, flags);
4866 _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
4867 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4868 spin_unlock_irqrestore(&il->lock, flags);
4869
4870 cmd.critical_temperature_R =
4871 cpu_to_le32(il->hw_params.ct_kill_threshold);
4872
4873 ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
4874 if (ret)
4875 IL_ERR("C_CT_KILL_CONFIG failed\n");
4876 else
4877 D_INFO("C_CT_KILL_CONFIG " "succeeded, "
4878 "critical temperature is %d\n",
4879 il->hw_params.ct_kill_threshold);
4880}
4881
4882static const s8 default_queue_to_tx_fifo[] = {
4883 IL_TX_FIFO_VO,
4884 IL_TX_FIFO_VI,
4885 IL_TX_FIFO_BE,
4886 IL_TX_FIFO_BK,
4887 IL49_CMD_FIFO_NUM,
4888 IL_TX_FIFO_UNUSED,
4889 IL_TX_FIFO_UNUSED,
4890};
4891
4892#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
4893
4894static int
4895il4965_alive_notify(struct il_priv *il)
4896{
4897 u32 a;
4898 unsigned long flags;
4899 int i, chan;
4900 u32 reg_val;
4901
4902 spin_lock_irqsave(&il->lock, flags);
4903
4904 /* Clear 4965's internal Tx Scheduler data base */
4905 il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
4906 a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
4907 for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
4908 il_write_targ_mem(il, a, 0);
4909 for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
4910 il_write_targ_mem(il, a, 0);
4911 for (;
4912 a <
4913 il->scd_base_addr +
4914 IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
4915 a += 4)
4916 il_write_targ_mem(il, a, 0);
4917
4918 /* Tel 4965 where to find Tx byte count tables */
4919 il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
4920
4921 /* Enable DMA channel */
4922 for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
4923 il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
4924 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
4925 FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
4926
4927 /* Update FH chicken bits */
4928 reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
4929 il_wr(il, FH49_TX_CHICKEN_BITS_REG,
4930 reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
4931
4932 /* Disable chain mode for all queues */
4933 il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
4934
4935 /* Initialize each Tx queue (including the command queue) */
4936 for (i = 0; i < il->hw_params.max_txq_num; i++) {
4937
4938 /* TFD circular buffer read/write idxes */
4939 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
4940 il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
4941
4942 /* Max Tx Window size for Scheduler-ACK mode */
4943 il_write_targ_mem(il,
4944 il->scd_base_addr +
4945 IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
4946 (SCD_WIN_SIZE <<
4947 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4948 IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4949
4950 /* Frame limit */
4951 il_write_targ_mem(il,
4952 il->scd_base_addr +
4953 IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
4954 sizeof(u32),
4955 (SCD_FRAME_LIMIT <<
4956 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
4957 IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4958
4959 }
4960 il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
4961 (1 << il->hw_params.max_txq_num) - 1);
4962
4963 /* Activate all Tx DMA/FIFO channels */
4964 il4965_txq_set_sched(il, IL_MASK(0, 6));
4965
4966 il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
4967
4968 /* make sure all queue are not stopped */
4969 memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
4970 for (i = 0; i < 4; i++)
4971 atomic_set(&il->queue_stop_count[i], 0);
4972
4973 /* reset to 0 to enable all the queue first */
4974 il->txq_ctx_active_msk = 0;
4975 /* Map each Tx/cmd queue to its corresponding fifo */
4976 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
4977
4978 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
4979 int ac = default_queue_to_tx_fifo[i];
4980
4981 il_txq_ctx_activate(il, i);
4982
4983 if (ac == IL_TX_FIFO_UNUSED)
4984 continue;
4985
4986 il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
4987 }
4988
4989 spin_unlock_irqrestore(&il->lock, flags);
4990
4991 return 0;
4992}
4993
4994/**
4995 * il4965_alive_start - called after N_ALIVE notification received
4996 * from protocol/runtime uCode (initialization uCode's
4997 * Alive gets handled by il_init_alive_start()).
4998 */
4999static void
5000il4965_alive_start(struct il_priv *il)
5001{
5002 int ret = 0;
5003 struct il_rxon_context *ctx = &il->ctx;
5004
5005 D_INFO("Runtime Alive received.\n");
5006
5007 if (il->card_alive.is_valid != UCODE_VALID_OK) {
5008 /* We had an error bringing up the hardware, so take it
5009 * all the way back down so we can try again */
5010 D_INFO("Alive failed.\n");
5011 goto restart;
5012 }
5013
5014 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
5015 * This is a paranoid check, because we would not have gotten the
5016 * "runtime" alive if code weren't properly loaded. */
5017 if (il4965_verify_ucode(il)) {
5018 /* Runtime instruction load was bad;
5019 * take it all the way back down so we can try again */
5020 D_INFO("Bad runtime uCode load.\n");
5021 goto restart;
5022 }
5023
5024 ret = il4965_alive_notify(il);
5025 if (ret) {
5026 IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
5027 goto restart;
5028 }
5029
5030 /* After the ALIVE response, we can send host commands to the uCode */
5031 set_bit(S_ALIVE, &il->status);
5032
5033 /* Enable watchdog to monitor the driver tx queues */
5034 il_setup_watchdog(il);
5035
5036 if (il_is_rfkill(il))
5037 return;
5038
5039 ieee80211_wake_queues(il->hw);
5040
5041 il->active_rate = RATES_MASK;
5042
5043 if (il_is_associated_ctx(ctx)) {
5044 struct il_rxon_cmd *active_rxon =
5045 (struct il_rxon_cmd *)&ctx->active;
5046 /* apply any changes in staging */
5047 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
5048 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5049 } else {
5050 /* Initialize our rx_config data */
5051 il_connection_init_rx_config(il, &il->ctx);
5052
5053 if (il->cfg->ops->hcmd->set_rxon_chain)
5054 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5055 }
5056
5057 /* Configure bluetooth coexistence if enabled */
5058 il_send_bt_config(il);
5059
5060 il4965_reset_run_time_calib(il);
5061
5062 set_bit(S_READY, &il->status);
5063
5064 /* Configure the adapter for unassociated operation */
5065 il_commit_rxon(il, ctx);
5066
5067 /* At this point, the NIC is initialized and operational */
5068 il4965_rf_kill_ct_config(il);
5069
5070 D_INFO("ALIVE processing complete.\n");
5071 wake_up(&il->wait_command_queue);
5072
5073 il_power_update_mode(il, true);
5074 D_INFO("Updated power mode\n");
5075
5076 return;
5077
5078restart:
5079 queue_work(il->workqueue, &il->restart);
5080}
5081
5082static void il4965_cancel_deferred_work(struct il_priv *il);
5083
5084static void
5085__il4965_down(struct il_priv *il)
5086{
5087 unsigned long flags;
5088 int exit_pending;
5089
5090 D_INFO(DRV_NAME " is going down\n");
5091
5092 il_scan_cancel_timeout(il, 200);
5093
5094 exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
5095
5096 /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
5097 * to prevent rearm timer */
5098 del_timer_sync(&il->watchdog);
5099
5100 il_clear_ucode_stations(il, NULL);
5101 il_dealloc_bcast_stations(il);
5102 il_clear_driver_stations(il);
5103
5104 /* Unblock any waiting calls */
5105 wake_up_all(&il->wait_command_queue);
5106
5107 /* Wipe out the EXIT_PENDING status bit if we are not actually
5108 * exiting the module */
5109 if (!exit_pending)
5110 clear_bit(S_EXIT_PENDING, &il->status);
5111
5112 /* stop and reset the on-board processor */
5113 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
5114
5115 /* tell the device to stop sending interrupts */
5116 spin_lock_irqsave(&il->lock, flags);
5117 il_disable_interrupts(il);
5118 spin_unlock_irqrestore(&il->lock, flags);
5119 il4965_synchronize_irq(il);
5120
5121 if (il->mac80211_registered)
5122 ieee80211_stop_queues(il->hw);
5123
5124 /* If we have not previously called il_init() then
5125 * clear all bits but the RF Kill bit and return */
5126 if (!il_is_init(il)) {
5127 il->status =
5128 test_bit(S_RF_KILL_HW,
5129 &il->
5130 status) << S_RF_KILL_HW |
5131 test_bit(S_GEO_CONFIGURED,
5132 &il->
5133 status) << S_GEO_CONFIGURED |
5134 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5135 goto exit;
5136 }
5137
5138 /* ...otherwise clear out all the status bits but the RF Kill
5139 * bit and continue taking the NIC down. */
5140 il->status &=
5141 test_bit(S_RF_KILL_HW,
5142 &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
5143 &il->
5144 status) <<
5145 S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
5146 &il->
5147 status) << S_FW_ERROR |
5148 test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
5149
5150 il4965_txq_ctx_stop(il);
5151 il4965_rxq_stop(il);
5152
5153 /* Power-down device's busmaster DMA clocks */
5154 il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
5155 udelay(5);
5156
5157 /* Make sure (redundant) we've released our request to stay awake */
5158 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
5159
5160 /* Stop the device, and put it in low power state */
5161 il_apm_stop(il);
5162
5163exit:
5164 memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
5165
5166 dev_kfree_skb(il->beacon_skb);
5167 il->beacon_skb = NULL;
5168
5169 /* clear out any free frames */
5170 il4965_clear_free_frames(il);
5171}
5172
5173static void
5174il4965_down(struct il_priv *il)
5175{
5176 mutex_lock(&il->mutex);
5177 __il4965_down(il);
5178 mutex_unlock(&il->mutex);
5179
5180 il4965_cancel_deferred_work(il);
5181}
5182
5183#define HW_READY_TIMEOUT (50)
5184
5185static int
5186il4965_set_hw_ready(struct il_priv *il)
5187{
5188 int ret = 0;
5189
5190 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
5191 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
5192
5193 /* See if we got it */
5194 ret =
5195 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5196 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
5197 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
5198 if (ret != -ETIMEDOUT)
5199 il->hw_ready = true;
5200 else
5201 il->hw_ready = false;
5202
5203 D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
5204 return ret;
5205}
5206
5207static int
5208il4965_prepare_card_hw(struct il_priv *il)
5209{
5210 int ret = 0;
5211
5212 D_INFO("il4965_prepare_card_hw enter\n");
5213
5214 ret = il4965_set_hw_ready(il);
5215 if (il->hw_ready)
5216 return ret;
5217
5218 /* If HW is not ready, prepare the conditions to check again */
5219 il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
5220
5221 ret =
5222 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
5223 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
5224 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
5225
5226 /* HW should be ready by now, check again. */
5227 if (ret != -ETIMEDOUT)
5228 il4965_set_hw_ready(il);
5229
5230 return ret;
5231}
5232
5233#define MAX_HW_RESTARTS 5
5234
5235static int
5236__il4965_up(struct il_priv *il)
5237{
5238 int i;
5239 int ret;
5240
5241 if (test_bit(S_EXIT_PENDING, &il->status)) {
5242 IL_WARN("Exit pending; will not bring the NIC up\n");
5243 return -EIO;
5244 }
5245
5246 if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
5247 IL_ERR("ucode not available for device bringup\n");
5248 return -EIO;
5249 }
5250
5251 ret = il4965_alloc_bcast_station(il, &il->ctx);
5252 if (ret) {
5253 il_dealloc_bcast_stations(il);
5254 return ret;
5255 }
5256
5257 il4965_prepare_card_hw(il);
5258
5259 if (!il->hw_ready) {
5260 IL_WARN("Exit HW not ready\n");
5261 return -EIO;
5262 }
5263
5264 /* If platform's RF_KILL switch is NOT set to KILL */
5265 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
5266 clear_bit(S_RF_KILL_HW, &il->status);
5267 else
5268 set_bit(S_RF_KILL_HW, &il->status);
5269
5270 if (il_is_rfkill(il)) {
5271 wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
5272
5273 il_enable_interrupts(il);
5274 IL_WARN("Radio disabled by HW RF Kill switch\n");
5275 return 0;
5276 }
5277
5278 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5279
5280 /* must be initialised before il_hw_nic_init */
5281 il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
5282
5283 ret = il4965_hw_nic_init(il);
5284 if (ret) {
5285 IL_ERR("Unable to init nic\n");
5286 return ret;
5287 }
5288
5289 /* make sure rfkill handshake bits are cleared */
5290 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5291 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
5292
5293 /* clear (again), then enable host interrupts */
5294 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5295 il_enable_interrupts(il);
5296
5297 /* really make sure rfkill handshake bits are cleared */
5298 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5299 _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
5300
5301 /* Copy original ucode data image from disk into backup cache.
5302 * This will be used to initialize the on-board processor's
5303 * data SRAM for a clean start when the runtime program first loads. */
5304 memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
5305 il->ucode_data.len);
5306
5307 for (i = 0; i < MAX_HW_RESTARTS; i++) {
5308
5309 /* load bootstrap state machine,
5310 * load bootstrap program into processor's memory,
5311 * prepare to load the "initialize" uCode */
5312 ret = il->cfg->ops->lib->load_ucode(il);
5313
5314 if (ret) {
5315 IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
5316 continue;
5317 }
5318
5319 /* start card; "initialize" will load runtime ucode */
5320 il4965_nic_start(il);
5321
5322 D_INFO(DRV_NAME " is coming up\n");
5323
5324 return 0;
5325 }
5326
5327 set_bit(S_EXIT_PENDING, &il->status);
5328 __il4965_down(il);
5329 clear_bit(S_EXIT_PENDING, &il->status);
5330
5331 /* tried to restart and config the device for as long as our
5332 * patience could withstand */
5333 IL_ERR("Unable to initialize device after %d attempts.\n", i);
5334 return -EIO;
5335}
5336
5337/*****************************************************************************
5338 *
5339 * Workqueue callbacks
5340 *
5341 *****************************************************************************/
5342
5343static void
5344il4965_bg_init_alive_start(struct work_struct *data)
5345{
5346 struct il_priv *il =
5347 container_of(data, struct il_priv, init_alive_start.work);
5348
5349 mutex_lock(&il->mutex);
5350 if (test_bit(S_EXIT_PENDING, &il->status))
5351 goto out;
5352
5353 il->cfg->ops->lib->init_alive_start(il);
5354out:
5355 mutex_unlock(&il->mutex);
5356}
5357
5358static void
5359il4965_bg_alive_start(struct work_struct *data)
5360{
5361 struct il_priv *il =
5362 container_of(data, struct il_priv, alive_start.work);
5363
5364 mutex_lock(&il->mutex);
5365 if (test_bit(S_EXIT_PENDING, &il->status))
5366 goto out;
5367
5368 il4965_alive_start(il);
5369out:
5370 mutex_unlock(&il->mutex);
5371}
5372
5373static void
5374il4965_bg_run_time_calib_work(struct work_struct *work)
5375{
5376 struct il_priv *il = container_of(work, struct il_priv,
5377 run_time_calib_work);
5378
5379 mutex_lock(&il->mutex);
5380
5381 if (test_bit(S_EXIT_PENDING, &il->status) ||
5382 test_bit(S_SCANNING, &il->status)) {
5383 mutex_unlock(&il->mutex);
5384 return;
5385 }
5386
5387 if (il->start_calib) {
5388 il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
5389 il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
5390 }
5391
5392 mutex_unlock(&il->mutex);
5393}
5394
5395static void
5396il4965_bg_restart(struct work_struct *data)
5397{
5398 struct il_priv *il = container_of(data, struct il_priv, restart);
5399
5400 if (test_bit(S_EXIT_PENDING, &il->status))
5401 return;
5402
5403 if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
5404 mutex_lock(&il->mutex);
5405 il->ctx.vif = NULL;
5406 il->is_open = 0;
5407
5408 __il4965_down(il);
5409
5410 mutex_unlock(&il->mutex);
5411 il4965_cancel_deferred_work(il);
5412 ieee80211_restart_hw(il->hw);
5413 } else {
5414 il4965_down(il);
5415
5416 mutex_lock(&il->mutex);
5417 if (test_bit(S_EXIT_PENDING, &il->status)) {
5418 mutex_unlock(&il->mutex);
5419 return;
5420 }
5421
5422 __il4965_up(il);
5423 mutex_unlock(&il->mutex);
5424 }
5425}
5426
5427static void
5428il4965_bg_rx_replenish(struct work_struct *data)
5429{
5430 struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
5431
5432 if (test_bit(S_EXIT_PENDING, &il->status))
5433 return;
5434
5435 mutex_lock(&il->mutex);
5436 il4965_rx_replenish(il);
5437 mutex_unlock(&il->mutex);
5438}
5439
5440/*****************************************************************************
5441 *
5442 * mac80211 entry point functions
5443 *
5444 *****************************************************************************/
5445
5446#define UCODE_READY_TIMEOUT (4 * HZ)
5447
5448/*
5449 * Not a mac80211 entry point function, but it fits in with all the
5450 * other mac80211 functions grouped here.
5451 */
5452static int
5453il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5454{
5455 int ret;
5456 struct ieee80211_hw *hw = il->hw;
5457
5458 hw->rate_control_algorithm = "iwl-4965-rs";
5459
5460 /* Tell mac80211 our characteristics */
5461 hw->flags =
5462 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5463 IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
5464 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
5465
5466 if (il->cfg->sku & IL_SKU_N)
5467 hw->flags |=
5468 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
5469 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
5470
5471 hw->sta_data_size = sizeof(struct il_station_priv);
5472 hw->vif_data_size = sizeof(struct il_vif_priv);
5473
5474 hw->wiphy->interface_modes |= il->ctx.interface_modes;
5475 hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
5476
5477 hw->wiphy->flags |=
5478 WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
5479
5480 /*
5481 * For now, disable PS by default because it affects
5482 * RX performance significantly.
5483 */
5484 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
5485
5486 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
5487 /* we create the 802.11 header and a zero-length SSID element */
5488 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
5489
5490 /* Default value; 4 EDCA QOS priorities */
5491 hw->queues = 4;
5492
5493 hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
5494
5495 if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
5496 il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5497 &il->bands[IEEE80211_BAND_2GHZ];
5498 if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
5499 il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5500 &il->bands[IEEE80211_BAND_5GHZ];
5501
5502 il_leds_init(il);
5503
5504 ret = ieee80211_register_hw(il->hw);
5505 if (ret) {
5506 IL_ERR("Failed to register hw (error %d)\n", ret);
5507 return ret;
5508 }
5509 il->mac80211_registered = 1;
5510
5511 return 0;
5512}
5513
5514int
5515il4965_mac_start(struct ieee80211_hw *hw)
5516{
5517 struct il_priv *il = hw->priv;
5518 int ret;
5519
5520 D_MAC80211("enter\n");
5521
5522 /* we should be verifying the device is ready to be opened */
5523 mutex_lock(&il->mutex);
5524 ret = __il4965_up(il);
5525 mutex_unlock(&il->mutex);
5526
5527 if (ret)
5528 return ret;
5529
5530 if (il_is_rfkill(il))
5531 goto out;
5532
5533 D_INFO("Start UP work done.\n");
5534
5535 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
5536 * mac80211 will not be run successfully. */
5537 ret = wait_event_timeout(il->wait_command_queue,
5538 test_bit(S_READY, &il->status),
5539 UCODE_READY_TIMEOUT);
5540 if (!ret) {
5541 if (!test_bit(S_READY, &il->status)) {
5542 IL_ERR("START_ALIVE timeout after %dms.\n",
5543 jiffies_to_msecs(UCODE_READY_TIMEOUT));
5544 return -ETIMEDOUT;
5545 }
5546 }
5547
5548 il4965_led_enable(il);
5549
5550out:
5551 il->is_open = 1;
5552 D_MAC80211("leave\n");
5553 return 0;
5554}
5555
5556void
5557il4965_mac_stop(struct ieee80211_hw *hw)
5558{
5559 struct il_priv *il = hw->priv;
5560
5561 D_MAC80211("enter\n");
5562
5563 if (!il->is_open)
5564 return;
5565
5566 il->is_open = 0;
5567
5568 il4965_down(il);
5569
5570 flush_workqueue(il->workqueue);
5571
5572 /* User space software may expect getting rfkill changes
5573 * even if interface is down */
5574 _il_wr(il, CSR_INT, 0xFFFFFFFF);
5575 il_enable_rfkill_int(il);
5576
5577 D_MAC80211("leave\n");
5578}
5579
5580void
5581il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
5582{
5583 struct il_priv *il = hw->priv;
5584
5585 D_MACDUMP("enter\n");
5586
5587 D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
5588 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
5589
5590 if (il4965_tx_skb(il, skb))
5591 dev_kfree_skb_any(skb);
5592
5593 D_MACDUMP("leave\n");
5594}
5595
5596void
5597il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5598 struct ieee80211_key_conf *keyconf,
5599 struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
5600{
5601 struct il_priv *il = hw->priv;
5602 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5603
5604 D_MAC80211("enter\n");
5605
5606 il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
5607 phase1key);
5608
5609 D_MAC80211("leave\n");
5610}
5611
5612int
5613il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5614 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5615 struct ieee80211_key_conf *key)
5616{
5617 struct il_priv *il = hw->priv;
5618 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5619 struct il_rxon_context *ctx = vif_priv->ctx;
5620 int ret;
5621 u8 sta_id;
5622 bool is_default_wep_key = false;
5623
5624 D_MAC80211("enter\n");
5625
5626 if (il->cfg->mod_params->sw_crypto) {
5627 D_MAC80211("leave - hwcrypto disabled\n");
5628 return -EOPNOTSUPP;
5629 }
5630
5631 sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
5632 if (sta_id == IL_INVALID_STATION)
5633 return -EINVAL;
5634
5635 mutex_lock(&il->mutex);
5636 il_scan_cancel_timeout(il, 100);
5637
5638 /*
5639 * If we are getting WEP group key and we didn't receive any key mapping
5640 * so far, we are in legacy wep mode (group key only), otherwise we are
5641 * in 1X mode.
5642 * In legacy wep mode, we use another host command to the uCode.
5643 */
5644 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5645 key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
5646 if (cmd == SET_KEY)
5647 is_default_wep_key = !ctx->key_mapping_keys;
5648 else
5649 is_default_wep_key =
5650 (key->hw_key_idx == HW_KEY_DEFAULT);
5651 }
5652
5653 switch (cmd) {
5654 case SET_KEY:
5655 if (is_default_wep_key)
5656 ret =
5657 il4965_set_default_wep_key(il, vif_priv->ctx, key);
5658 else
5659 ret =
5660 il4965_set_dynamic_key(il, vif_priv->ctx, key,
5661 sta_id);
5662
5663 D_MAC80211("enable hwcrypto key\n");
5664 break;
5665 case DISABLE_KEY:
5666 if (is_default_wep_key)
5667 ret = il4965_remove_default_wep_key(il, ctx, key);
5668 else
5669 ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
5670
5671 D_MAC80211("disable hwcrypto key\n");
5672 break;
5673 default:
5674 ret = -EINVAL;
5675 }
5676
5677 mutex_unlock(&il->mutex);
5678 D_MAC80211("leave\n");
5679
5680 return ret;
5681}
5682
5683int
5684il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5685 enum ieee80211_ampdu_mlme_action action,
5686 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
5687 u8 buf_size)
5688{
5689 struct il_priv *il = hw->priv;
5690 int ret = -EINVAL;
5691
5692 D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
5693
5694 if (!(il->cfg->sku & IL_SKU_N))
5695 return -EACCES;
5696
5697 mutex_lock(&il->mutex);
5698
5699 switch (action) {
5700 case IEEE80211_AMPDU_RX_START:
5701 D_HT("start Rx\n");
5702 ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
5703 break;
5704 case IEEE80211_AMPDU_RX_STOP:
5705 D_HT("stop Rx\n");
5706 ret = il4965_sta_rx_agg_stop(il, sta, tid);
5707 if (test_bit(S_EXIT_PENDING, &il->status))
5708 ret = 0;
5709 break;
5710 case IEEE80211_AMPDU_TX_START:
5711 D_HT("start Tx\n");
5712 ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
5713 break;
5714 case IEEE80211_AMPDU_TX_STOP:
5715 D_HT("stop Tx\n");
5716 ret = il4965_tx_agg_stop(il, vif, sta, tid);
5717 if (test_bit(S_EXIT_PENDING, &il->status))
5718 ret = 0;
5719 break;
5720 case IEEE80211_AMPDU_TX_OPERATIONAL:
5721 ret = 0;
5722 break;
5723 }
5724 mutex_unlock(&il->mutex);
5725
5726 return ret;
5727}
5728
5729int
5730il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5731 struct ieee80211_sta *sta)
5732{
5733 struct il_priv *il = hw->priv;
5734 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
5735 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
5736 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
5737 int ret;
5738 u8 sta_id;
5739
5740 D_INFO("received request to add station %pM\n", sta->addr);
5741 mutex_lock(&il->mutex);
5742 D_INFO("proceeding to add station %pM\n", sta->addr);
5743 sta_priv->common.sta_id = IL_INVALID_STATION;
5744
5745 atomic_set(&sta_priv->pending_frames, 0);
5746
5747 ret =
5748 il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
5749 &sta_id);
5750 if (ret) {
5751 IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
5752 /* Should we return success if return code is EEXIST ? */
5753 mutex_unlock(&il->mutex);
5754 return ret;
5755 }
5756
5757 sta_priv->common.sta_id = sta_id;
5758
5759 /* Initialize rate scaling */
5760 D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
5761 il4965_rs_rate_init(il, sta, sta_id);
5762 mutex_unlock(&il->mutex);
5763
5764 return 0;
5765}
5766
5767void
5768il4965_mac_channel_switch(struct ieee80211_hw *hw,
5769 struct ieee80211_channel_switch *ch_switch)
5770{
5771 struct il_priv *il = hw->priv;
5772 const struct il_channel_info *ch_info;
5773 struct ieee80211_conf *conf = &hw->conf;
5774 struct ieee80211_channel *channel = ch_switch->channel;
5775 struct il_ht_config *ht_conf = &il->current_ht_config;
5776
5777 struct il_rxon_context *ctx = &il->ctx;
5778 u16 ch;
5779
5780 D_MAC80211("enter\n");
5781
5782 mutex_lock(&il->mutex);
5783
5784 if (il_is_rfkill(il))
5785 goto out;
5786
5787 if (test_bit(S_EXIT_PENDING, &il->status) ||
5788 test_bit(S_SCANNING, &il->status) ||
5789 test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
5790 goto out;
5791
5792 if (!il_is_associated_ctx(ctx))
5793 goto out;
5794
5795 if (!il->cfg->ops->lib->set_channel_switch)
5796 goto out;
5797
5798 ch = channel->hw_value;
5799 if (le16_to_cpu(ctx->active.channel) == ch)
5800 goto out;
5801
5802 ch_info = il_get_channel_info(il, channel->band, ch);
5803 if (!il_is_channel_valid(ch_info)) {
5804 D_MAC80211("invalid channel\n");
5805 goto out;
5806 }
5807
5808 spin_lock_irq(&il->lock);
5809
5810 il->current_ht_config.smps = conf->smps_mode;
5811
5812 /* Configure HT40 channels */
5813 ctx->ht.enabled = conf_is_ht(conf);
5814 if (ctx->ht.enabled) {
5815 if (conf_is_ht40_minus(conf)) {
5816 ctx->ht.extension_chan_offset =
5817 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5818 ctx->ht.is_40mhz = true;
5819 } else if (conf_is_ht40_plus(conf)) {
5820 ctx->ht.extension_chan_offset =
5821 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5822 ctx->ht.is_40mhz = true;
5823 } else {
5824 ctx->ht.extension_chan_offset =
5825 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5826 ctx->ht.is_40mhz = false;
5827 }
5828 } else
5829 ctx->ht.is_40mhz = false;
5830
5831 if ((le16_to_cpu(ctx->staging.channel) != ch))
5832 ctx->staging.flags = 0;
5833
5834 il_set_rxon_channel(il, channel, ctx);
5835 il_set_rxon_ht(il, ht_conf);
5836 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5837
5838 spin_unlock_irq(&il->lock);
5839
5840 il_set_rate(il);
5841 /*
5842 * at this point, staging_rxon has the
5843 * configuration for channel switch
5844 */
5845 set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5846 il->switch_channel = cpu_to_le16(ch);
5847 if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
5848 clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
5849 il->switch_channel = 0;
5850 ieee80211_chswitch_done(ctx->vif, false);
5851 }
5852
5853out:
5854 mutex_unlock(&il->mutex);
5855 D_MAC80211("leave\n");
5856}
5857
5858void
5859il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
5860 unsigned int *total_flags, u64 multicast)
5861{
5862 struct il_priv *il = hw->priv;
5863 __le32 filter_or = 0, filter_nand = 0;
5864
5865#define CHK(test, flag) do { \
5866 if (*total_flags & (test)) \
5867 filter_or |= (flag); \
5868 else \
5869 filter_nand |= (flag); \
5870 } while (0)
5871
5872 D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
5873 *total_flags);
5874
5875 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
5876 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
5877 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
5878 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
5879
5880#undef CHK
5881
5882 mutex_lock(&il->mutex);
5883
5884 il->ctx.staging.filter_flags &= ~filter_nand;
5885 il->ctx.staging.filter_flags |= filter_or;
5886
5887 /*
5888 * Not committing directly because hardware can perform a scan,
5889 * but we'll eventually commit the filter flags change anyway.
5890 */
5891
5892 mutex_unlock(&il->mutex);
5893
5894 /*
5895 * Receiving all multicast frames is always enabled by the
5896 * default flags setup in il_connection_init_rx_config()
5897 * since we currently do not support programming multicast
5898 * filters into the device.
5899 */
5900 *total_flags &=
5901 FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
5902 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
5903}
5904
5905/*****************************************************************************
5906 *
5907 * driver setup and teardown
5908 *
5909 *****************************************************************************/
5910
5911static void
5912il4965_bg_txpower_work(struct work_struct *work)
5913{
5914 struct il_priv *il = container_of(work, struct il_priv,
5915 txpower_work);
5916
5917 mutex_lock(&il->mutex);
5918
5919 /* If a scan happened to start before we got here
5920 * then just return; the stats notification will
5921 * kick off another scheduled work to compensate for
5922 * any temperature delta we missed here. */
5923 if (test_bit(S_EXIT_PENDING, &il->status) ||
5924 test_bit(S_SCANNING, &il->status))
5925 goto out;
5926
5927 /* Regardless of if we are associated, we must reconfigure the
5928 * TX power since frames can be sent on non-radar channels while
5929 * not associated */
5930 il->cfg->ops->lib->send_tx_power(il);
5931
5932 /* Update last_temperature to keep is_calib_needed from running
5933 * when it isn't needed... */
5934 il->last_temperature = il->temperature;
5935out:
5936 mutex_unlock(&il->mutex);
5937}
5938
5939static void
5940il4965_setup_deferred_work(struct il_priv *il)
5941{
5942 il->workqueue = create_singlethread_workqueue(DRV_NAME);
5943
5944 init_waitqueue_head(&il->wait_command_queue);
5945
5946 INIT_WORK(&il->restart, il4965_bg_restart);
5947 INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
5948 INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
5949 INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
5950 INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
5951
5952 il_setup_scan_deferred_work(il);
5953
5954 INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
5955
5956 init_timer(&il->stats_periodic);
5957 il->stats_periodic.data = (unsigned long)il;
5958 il->stats_periodic.function = il4965_bg_stats_periodic;
5959
5960 init_timer(&il->watchdog);
5961 il->watchdog.data = (unsigned long)il;
5962 il->watchdog.function = il_bg_watchdog;
5963
5964 tasklet_init(&il->irq_tasklet,
5965 (void (*)(unsigned long))il4965_irq_tasklet,
5966 (unsigned long)il);
5967}
5968
5969static void
5970il4965_cancel_deferred_work(struct il_priv *il)
5971{
5972 cancel_work_sync(&il->txpower_work);
5973 cancel_delayed_work_sync(&il->init_alive_start);
5974 cancel_delayed_work(&il->alive_start);
5975 cancel_work_sync(&il->run_time_calib_work);
5976
5977 il_cancel_scan_deferred_work(il);
5978
5979 del_timer_sync(&il->stats_periodic);
5980}
5981
5982static void
5983il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
5984{
5985 int i;
5986
5987 for (i = 0; i < RATE_COUNT_LEGACY; i++) {
5988 rates[i].bitrate = il_rates[i].ieee * 5;
5989 rates[i].hw_value = i; /* Rate scaling will work on idxes */
5990 rates[i].hw_value_short = i;
5991 rates[i].flags = 0;
5992 if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
5993 /*
5994 * If CCK != 1M then set short preamble rate flag.
5995 */
5996 rates[i].flags |=
5997 (il_rates[i].plcp ==
5998 RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
5999 }
6000 }
6001}
6002
6003/*
6004 * Acquire il->lock before calling this function !
6005 */
6006void
6007il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
6008{
6009 il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
6010 il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
6011}
6012
6013void
6014il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6015 int tx_fifo_id, int scd_retry)
6016{
6017 int txq_id = txq->q.id;
6018
6019 /* Find out whether to activate Tx queue */
6020 int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
6021
6022 /* Set up and activate */
6023 il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
6024 (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
6025 (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
6026 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
6027 (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
6028 IL49_SCD_QUEUE_STTS_REG_MSK);
6029
6030 txq->sched_retry = scd_retry;
6031
6032 D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
6033 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6034}
6035
6036static int
6037il4965_init_drv(struct il_priv *il)
6038{
6039 int ret;
6040
6041 spin_lock_init(&il->sta_lock);
6042 spin_lock_init(&il->hcmd_lock);
6043
6044 INIT_LIST_HEAD(&il->free_frames);
6045
6046 mutex_init(&il->mutex);
6047
6048 il->ieee_channels = NULL;
6049 il->ieee_rates = NULL;
6050 il->band = IEEE80211_BAND_2GHZ;
6051
6052 il->iw_mode = NL80211_IFTYPE_STATION;
6053 il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
6054 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
6055
6056 /* initialize force reset */
6057 il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
6058
6059 /* Choose which receivers/antennas to use */
6060 if (il->cfg->ops->hcmd->set_rxon_chain)
6061 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
6062
6063 il_init_scan_params(il);
6064
6065 ret = il_init_channel_map(il);
6066 if (ret) {
6067 IL_ERR("initializing regulatory failed: %d\n", ret);
6068 goto err;
6069 }
6070
6071 ret = il_init_geos(il);
6072 if (ret) {
6073 IL_ERR("initializing geos failed: %d\n", ret);
6074 goto err_free_channel_map;
6075 }
6076 il4965_init_hw_rates(il, il->ieee_rates);
6077
6078 return 0;
6079
6080err_free_channel_map:
6081 il_free_channel_map(il);
6082err:
6083 return ret;
6084}
6085
6086static void
6087il4965_uninit_drv(struct il_priv *il)
6088{
6089 il4965_calib_free_results(il);
6090 il_free_geos(il);
6091 il_free_channel_map(il);
6092 kfree(il->scan_cmd);
6093}
6094
6095static void
6096il4965_hw_detect(struct il_priv *il)
6097{
6098 il->hw_rev = _il_rd(il, CSR_HW_REV);
6099 il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
6100 il->rev_id = il->pci_dev->revision;
6101 D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
6102}
6103
6104static int
6105il4965_set_hw_params(struct il_priv *il)
6106{
6107 il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
6108 il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
6109 if (il->cfg->mod_params->amsdu_size_8K)
6110 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
6111 else
6112 il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
6113
6114 il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
6115
6116 if (il->cfg->mod_params->disable_11n)
6117 il->cfg->sku &= ~IL_SKU_N;
6118
6119 /* Device-specific setup */
6120 return il->cfg->ops->lib->set_hw_params(il);
6121}
6122
6123static const u8 il4965_bss_ac_to_fifo[] = {
6124 IL_TX_FIFO_VO,
6125 IL_TX_FIFO_VI,
6126 IL_TX_FIFO_BE,
6127 IL_TX_FIFO_BK,
6128};
6129
6130static const u8 il4965_bss_ac_to_queue[] = {
6131 0, 1, 2, 3,
6132};
6133
6134static int
6135il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6136{
6137 int err = 0;
6138 struct il_priv *il;
6139 struct ieee80211_hw *hw;
6140 struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
6141 unsigned long flags;
6142 u16 pci_cmd;
6143
6144 /************************
6145 * 1. Allocating HW data
6146 ************************/
6147
6148 hw = il_alloc_all(cfg);
6149 if (!hw) {
6150 err = -ENOMEM;
6151 goto out;
6152 }
6153 il = hw->priv;
6154 /* At this point both hw and il are allocated. */
6155
6156 il->ctx.ctxid = 0;
6157
6158 il->ctx.always_active = true;
6159 il->ctx.is_active = true;
6160 il->ctx.rxon_cmd = C_RXON;
6161 il->ctx.rxon_timing_cmd = C_RXON_TIMING;
6162 il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
6163 il->ctx.qos_cmd = C_QOS_PARAM;
6164 il->ctx.ap_sta_id = IL_AP_ID;
6165 il->ctx.wep_key_cmd = C_WEPKEY;
6166 il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
6167 il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
6168 il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
6169 il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
6170 il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
6171 il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
6172 il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
6173 il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
6174
6175 SET_IEEE80211_DEV(hw, &pdev->dev);
6176
6177 D_INFO("*** LOAD DRIVER ***\n");
6178 il->cfg = cfg;
6179 il->pci_dev = pdev;
6180 il->inta_mask = CSR_INI_SET_MASK;
6181
6182 if (il_alloc_traffic_mem(il))
6183 IL_ERR("Not enough memory to generate traffic log\n");
6184
6185 /**************************
6186 * 2. Initializing PCI bus
6187 **************************/
6188 pci_disable_link_state(pdev,
6189 PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6190 PCIE_LINK_STATE_CLKPM);
6191
6192 if (pci_enable_device(pdev)) {
6193 err = -ENODEV;
6194 goto out_ieee80211_free_hw;
6195 }
6196
6197 pci_set_master(pdev);
6198
6199 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
6200 if (!err)
6201 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
6202 if (err) {
6203 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6204 if (!err)
6205 err =
6206 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6207 /* both attempts failed: */
6208 if (err) {
6209 IL_WARN("No suitable DMA available.\n");
6210 goto out_pci_disable_device;
6211 }
6212 }
6213
6214 err = pci_request_regions(pdev, DRV_NAME);
6215 if (err)
6216 goto out_pci_disable_device;
6217
6218 pci_set_drvdata(pdev, il);
6219
6220 /***********************
6221 * 3. Read REV register
6222 ***********************/
6223 il->hw_base = pci_iomap(pdev, 0, 0);
6224 if (!il->hw_base) {
6225 err = -ENODEV;
6226 goto out_pci_release_regions;
6227 }
6228
6229 D_INFO("pci_resource_len = 0x%08llx\n",
6230 (unsigned long long)pci_resource_len(pdev, 0));
6231 D_INFO("pci_resource_base = %p\n", il->hw_base);
6232
6233 /* these spin locks will be used in apm_ops.init and EEPROM access
6234 * we should init now
6235 */
6236 spin_lock_init(&il->reg_lock);
6237 spin_lock_init(&il->lock);
6238
6239 /*
6240 * stop and reset the on-board processor just in case it is in a
6241 * strange state ... like being left stranded by a primary kernel
6242 * and this is now the kdump kernel trying to start up
6243 */
6244 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6245
6246 il4965_hw_detect(il);
6247 IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
6248
6249 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6250 * PCI Tx retries from interfering with C3 CPU state */
6251 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
6252
6253 il4965_prepare_card_hw(il);
6254 if (!il->hw_ready) {
6255 IL_WARN("Failed, HW not ready\n");
6256 goto out_iounmap;
6257 }
6258
6259 /*****************
6260 * 4. Read EEPROM
6261 *****************/
6262 /* Read the EEPROM */
6263 err = il_eeprom_init(il);
6264 if (err) {
6265 IL_ERR("Unable to init EEPROM\n");
6266 goto out_iounmap;
6267 }
6268 err = il4965_eeprom_check_version(il);
6269 if (err)
6270 goto out_free_eeprom;
6271
6272 if (err)
6273 goto out_free_eeprom;
6274
6275 /* extract MAC Address */
6276 il4965_eeprom_get_mac(il, il->addresses[0].addr);
6277 D_INFO("MAC address: %pM\n", il->addresses[0].addr);
6278 il->hw->wiphy->addresses = il->addresses;
6279 il->hw->wiphy->n_addresses = 1;
6280
6281 /************************
6282 * 5. Setup HW constants
6283 ************************/
6284 if (il4965_set_hw_params(il)) {
6285 IL_ERR("failed to set hw parameters\n");
6286 goto out_free_eeprom;
6287 }
6288
6289 /*******************
6290 * 6. Setup il
6291 *******************/
6292
6293 err = il4965_init_drv(il);
6294 if (err)
6295 goto out_free_eeprom;
6296 /* At this point both hw and il are initialized. */
6297
6298 /********************
6299 * 7. Setup services
6300 ********************/
6301 spin_lock_irqsave(&il->lock, flags);
6302 il_disable_interrupts(il);
6303 spin_unlock_irqrestore(&il->lock, flags);
6304
6305 pci_enable_msi(il->pci_dev);
6306
6307 err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
6308 if (err) {
6309 IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
6310 goto out_disable_msi;
6311 }
6312
6313 il4965_setup_deferred_work(il);
6314 il4965_setup_handlers(il);
6315
6316 /*********************************************
6317 * 8. Enable interrupts and read RFKILL state
6318 *********************************************/
6319
6320 /* enable rfkill interrupt: hw bug w/a */
6321 pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
6322 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
6323 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
6324 pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
6325 }
6326
6327 il_enable_rfkill_int(il);
6328
6329 /* If platform's RF_KILL switch is NOT set to KILL */
6330 if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6331 clear_bit(S_RF_KILL_HW, &il->status);
6332 else
6333 set_bit(S_RF_KILL_HW, &il->status);
6334
6335 wiphy_rfkill_set_hw_state(il->hw->wiphy,
6336 test_bit(S_RF_KILL_HW, &il->status));
6337
6338 il_power_initialize(il);
6339
6340 init_completion(&il->_4965.firmware_loading_complete);
6341
6342 err = il4965_request_firmware(il, true);
6343 if (err)
6344 goto out_destroy_workqueue;
6345
6346 return 0;
6347
6348out_destroy_workqueue:
6349 destroy_workqueue(il->workqueue);
6350 il->workqueue = NULL;
6351 free_irq(il->pci_dev->irq, il);
6352out_disable_msi:
6353 pci_disable_msi(il->pci_dev);
6354 il4965_uninit_drv(il);
6355out_free_eeprom:
6356 il_eeprom_free(il);
6357out_iounmap:
6358 pci_iounmap(pdev, il->hw_base);
6359out_pci_release_regions:
6360 pci_set_drvdata(pdev, NULL);
6361 pci_release_regions(pdev);
6362out_pci_disable_device:
6363 pci_disable_device(pdev);
6364out_ieee80211_free_hw:
6365 il_free_traffic_mem(il);
6366 ieee80211_free_hw(il->hw);
6367out:
6368 return err;
6369}
6370
6371static void __devexit
6372il4965_pci_remove(struct pci_dev *pdev)
6373{
6374 struct il_priv *il = pci_get_drvdata(pdev);
6375 unsigned long flags;
6376
6377 if (!il)
6378 return;
6379
6380 wait_for_completion(&il->_4965.firmware_loading_complete);
6381
6382 D_INFO("*** UNLOAD DRIVER ***\n");
6383
6384 il_dbgfs_unregister(il);
6385 sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
6386
6387 /* ieee80211_unregister_hw call wil cause il_mac_stop to
6388 * to be called and il4965_down since we are removing the device
6389 * we need to set S_EXIT_PENDING bit.
6390 */
6391 set_bit(S_EXIT_PENDING, &il->status);
6392
6393 il_leds_exit(il);
6394
6395 if (il->mac80211_registered) {
6396 ieee80211_unregister_hw(il->hw);
6397 il->mac80211_registered = 0;
6398 } else {
6399 il4965_down(il);
6400 }
6401
6402 /*
6403 * Make sure device is reset to low power before unloading driver.
6404 * This may be redundant with il4965_down(), but there are paths to
6405 * run il4965_down() without calling apm_ops.stop(), and there are
6406 * paths to avoid running il4965_down() at all before leaving driver.
6407 * This (inexpensive) call *makes sure* device is reset.
6408 */
6409 il_apm_stop(il);
6410
6411 /* make sure we flush any pending irq or
6412 * tasklet for the driver
6413 */
6414 spin_lock_irqsave(&il->lock, flags);
6415 il_disable_interrupts(il);
6416 spin_unlock_irqrestore(&il->lock, flags);
6417
6418 il4965_synchronize_irq(il);
6419
6420 il4965_dealloc_ucode_pci(il);
6421
6422 if (il->rxq.bd)
6423 il4965_rx_queue_free(il, &il->rxq);
6424 il4965_hw_txq_ctx_free(il);
6425
6426 il_eeprom_free(il);
6427
6428 /*netif_stop_queue(dev); */
6429 flush_workqueue(il->workqueue);
6430
6431 /* ieee80211_unregister_hw calls il_mac_stop, which flushes
6432 * il->workqueue... so we can't take down the workqueue
6433 * until now... */
6434 destroy_workqueue(il->workqueue);
6435 il->workqueue = NULL;
6436 il_free_traffic_mem(il);
6437
6438 free_irq(il->pci_dev->irq, il);
6439 pci_disable_msi(il->pci_dev);
6440 pci_iounmap(pdev, il->hw_base);
6441 pci_release_regions(pdev);
6442 pci_disable_device(pdev);
6443 pci_set_drvdata(pdev, NULL);
6444
6445 il4965_uninit_drv(il);
6446
6447 dev_kfree_skb(il->beacon_skb);
6448
6449 ieee80211_free_hw(il->hw);
6450}
6451
6452/*
6453 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
6454 * must be called under il->lock and mac access
6455 */
6456void
6457il4965_txq_set_sched(struct il_priv *il, u32 mask)
6458{
6459 il_wr_prph(il, IL49_SCD_TXFACT, mask);
6460}
6461
6462/*****************************************************************************
6463 *
6464 * driver and module entry point
6465 *
6466 *****************************************************************************/
6467
6468/* Hardware specific file defines the PCI IDs table for that hardware module */
6469static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
6470 {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
6471 {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
6472 {0}
6473};
6474MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
6475
6476static struct pci_driver il4965_driver = {
6477 .name = DRV_NAME,
6478 .id_table = il4965_hw_card_ids,
6479 .probe = il4965_pci_probe,
6480 .remove = __devexit_p(il4965_pci_remove),
6481 .driver.pm = IL_LEGACY_PM_OPS,
6482};
6483
6484static int __init
6485il4965_init(void)
6486{
6487
6488 int ret;
6489 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
6490 pr_info(DRV_COPYRIGHT "\n");
6491
6492 ret = il4965_rate_control_register();
6493 if (ret) {
6494 pr_err("Unable to register rate control algorithm: %d\n", ret);
6495 return ret;
6496 }
6497
6498 ret = pci_register_driver(&il4965_driver);
6499 if (ret) {
6500 pr_err("Unable to initialize PCI module\n");
6501 goto error_register;
6502 }
6503
6504 return ret;
6505
6506error_register:
6507 il4965_rate_control_unregister();
6508 return ret;
6509}
6510
6511static void __exit
6512il4965_exit(void)
6513{
6514 pci_unregister_driver(&il4965_driver);
6515 il4965_rate_control_unregister();
6516}
6517
6518module_exit(il4965_exit);
6519module_init(il4965_init);
6520
6521#ifdef CONFIG_IWLEGACY_DEBUG
6522module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
6523MODULE_PARM_DESC(debug, "debug output mask");
6524#endif
6525
6526module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
6527MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
6528module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
6529MODULE_PARM_DESC(queues_num, "number of hw queues.");
6530module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
6531MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
6532module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
6533 S_IRUGO);
6534MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
6535module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
6536MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 000000000000..467d0cb14ecd
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "common.h"
39#include "4965.h"
40
41#define IL4965_RS_NAME "iwl-4965-rs"
42
43#define NUM_TRY_BEFORE_ANT_TOGGLE 1
44#define IL_NUMBER_TRY 1
45#define IL_HT_NUMBER_TRY 3
46
47#define RATE_MAX_WINDOW 62 /* # tx in history win */
48#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
49#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
50
51/* max allowed rate miss before sync LQ cmd */
52#define IL_MISSED_RATE_MAX 15
53/* max time to accum history 2 seconds */
54#define RATE_SCALE_FLUSH_INTVL (3*HZ)
55
56static u8 rs_ht_to_legacy[] = {
57 RATE_6M_IDX, RATE_6M_IDX,
58 RATE_6M_IDX, RATE_6M_IDX,
59 RATE_6M_IDX,
60 RATE_6M_IDX, RATE_9M_IDX,
61 RATE_12M_IDX, RATE_18M_IDX,
62 RATE_24M_IDX, RATE_36M_IDX,
63 RATE_48M_IDX, RATE_54M_IDX
64};
65
66static const u8 ant_toggle_lookup[] = {
67 /*ANT_NONE -> */ ANT_NONE,
68 /*ANT_A -> */ ANT_B,
69 /*ANT_B -> */ ANT_C,
70 /*ANT_AB -> */ ANT_BC,
71 /*ANT_C -> */ ANT_A,
72 /*ANT_AC -> */ ANT_AB,
73 /*ANT_BC -> */ ANT_AC,
74 /*ANT_ABC -> */ ANT_ABC,
75};
76
77#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
78 [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
79 RATE_SISO_##s##M_PLCP, \
80 RATE_MIMO2_##s##M_PLCP,\
81 RATE_##r##M_IEEE, \
82 RATE_##ip##M_IDX, \
83 RATE_##in##M_IDX, \
84 RATE_##rp##M_IDX, \
85 RATE_##rn##M_IDX, \
86 RATE_##pp##M_IDX, \
87 RATE_##np##M_IDX }
88
89/*
90 * Parameter order:
91 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
92 *
93 * If there isn't a valid next or previous rate then INV is used which
94 * maps to RATE_INVALID
95 *
96 */
97const struct il_rate_info il_rates[RATE_COUNT] = {
98 IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
99 IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
100 IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
101 IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
102 IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
103 IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
104 IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
105 IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
106 IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
107 IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
108 IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
109 IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
110 IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
111};
112
113static int
114il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
115{
116 int idx = 0;
117
118 /* HT rate format */
119 if (rate_n_flags & RATE_MCS_HT_MSK) {
120 idx = (rate_n_flags & 0xff);
121
122 if (idx >= RATE_MIMO2_6M_PLCP)
123 idx = idx - RATE_MIMO2_6M_PLCP;
124
125 idx += IL_FIRST_OFDM_RATE;
126 /* skip 9M not supported in ht */
127 if (idx >= RATE_9M_IDX)
128 idx += 1;
129 if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
130 return idx;
131
132 /* legacy rate format, search for match in table */
133 } else {
134 for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
135 if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
136 return idx;
137 }
138
139 return -1;
140}
141
142static void il4965_rs_rate_scale_perform(struct il_priv *il,
143 struct sk_buff *skb,
144 struct ieee80211_sta *sta,
145 struct il_lq_sta *lq_sta);
146static void il4965_rs_fill_link_cmd(struct il_priv *il,
147 struct il_lq_sta *lq_sta, u32 rate_n_flags);
148static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
149 bool force_search);
150
151#ifdef CONFIG_MAC80211_DEBUGFS
152static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
153 u32 *rate_n_flags, int idx);
154#else
155static void
156il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
157{
158}
159#endif
160
161/**
162 * The following tables contain the expected throughput metrics for all rates
163 *
164 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
165 *
166 * where invalid entries are zeros.
167 *
168 * CCK rates are only valid in legacy table and will only be used in G
169 * (2.4 GHz) band.
170 */
171
172static s32 expected_tpt_legacy[RATE_COUNT] = {
173 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
174};
175
176static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
177 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
178 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
179 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
180 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
181};
182
183static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
184 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
185 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
186 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
187 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
188};
189
190static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
191 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
192 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
193 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
194 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
195};
196
197static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
198 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
199 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
200 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
201 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
202};
203
204/* mbps, mcs */
205static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
206 {"1", "BPSK DSSS"},
207 {"2", "QPSK DSSS"},
208 {"5.5", "BPSK CCK"},
209 {"11", "QPSK CCK"},
210 {"6", "BPSK 1/2"},
211 {"9", "BPSK 1/2"},
212 {"12", "QPSK 1/2"},
213 {"18", "QPSK 3/4"},
214 {"24", "16QAM 1/2"},
215 {"36", "16QAM 3/4"},
216 {"48", "64QAM 2/3"},
217 {"54", "64QAM 3/4"},
218 {"60", "64QAM 5/6"},
219};
220
221#define MCS_IDX_PER_STREAM (8)
222
223static inline u8
224il4965_rs_extract_rate(u32 rate_n_flags)
225{
226 return (u8) (rate_n_flags & 0xFF);
227}
228
229static void
230il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
231{
232 win->data = 0;
233 win->success_counter = 0;
234 win->success_ratio = IL_INVALID_VALUE;
235 win->counter = 0;
236 win->average_tpt = IL_INVALID_VALUE;
237 win->stamp = 0;
238}
239
240static inline u8
241il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
242{
243 return (ant_type & valid_antenna) == ant_type;
244}
245
246/*
247 * removes the old data from the stats. All data that is older than
248 * TID_MAX_TIME_DIFF, will be deleted.
249 */
250static void
251il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
252{
253 /* The oldest age we want to keep */
254 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
255
256 while (tl->queue_count && tl->time_stamp < oldest_time) {
257 tl->total -= tl->packet_count[tl->head];
258 tl->packet_count[tl->head] = 0;
259 tl->time_stamp += TID_QUEUE_CELL_SPACING;
260 tl->queue_count--;
261 tl->head++;
262 if (tl->head >= TID_QUEUE_MAX_SIZE)
263 tl->head = 0;
264 }
265}
266
267/*
268 * increment traffic load value for tid and also remove
269 * any old values if passed the certain time period
270 */
271static u8
272il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
273{
274 u32 curr_time = jiffies_to_msecs(jiffies);
275 u32 time_diff;
276 s32 idx;
277 struct il_traffic_load *tl = NULL;
278 u8 tid;
279
280 if (ieee80211_is_data_qos(hdr->frame_control)) {
281 u8 *qc = ieee80211_get_qos_ctl(hdr);
282 tid = qc[0] & 0xf;
283 } else
284 return MAX_TID_COUNT;
285
286 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
287 return MAX_TID_COUNT;
288
289 tl = &lq_data->load[tid];
290
291 curr_time -= curr_time % TID_ROUND_VALUE;
292
293 /* Happens only for the first packet. Initialize the data */
294 if (!(tl->queue_count)) {
295 tl->total = 1;
296 tl->time_stamp = curr_time;
297 tl->queue_count = 1;
298 tl->head = 0;
299 tl->packet_count[0] = 1;
300 return MAX_TID_COUNT;
301 }
302
303 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
304 idx = time_diff / TID_QUEUE_CELL_SPACING;
305
306 /* The history is too long: remove data that is older than */
307 /* TID_MAX_TIME_DIFF */
308 if (idx >= TID_QUEUE_MAX_SIZE)
309 il4965_rs_tl_rm_old_stats(tl, curr_time);
310
311 idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
312 tl->packet_count[idx] = tl->packet_count[idx] + 1;
313 tl->total = tl->total + 1;
314
315 if ((idx + 1) > tl->queue_count)
316 tl->queue_count = idx + 1;
317
318 return tid;
319}
320
321/*
322 get the traffic load value for tid
323*/
324static u32
325il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
326{
327 u32 curr_time = jiffies_to_msecs(jiffies);
328 u32 time_diff;
329 s32 idx;
330 struct il_traffic_load *tl = NULL;
331
332 if (tid >= TID_MAX_LOAD_COUNT)
333 return 0;
334
335 tl = &(lq_data->load[tid]);
336
337 curr_time -= curr_time % TID_ROUND_VALUE;
338
339 if (!(tl->queue_count))
340 return 0;
341
342 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
343 idx = time_diff / TID_QUEUE_CELL_SPACING;
344
345 /* The history is too long: remove data that is older than */
346 /* TID_MAX_TIME_DIFF */
347 if (idx >= TID_QUEUE_MAX_SIZE)
348 il4965_rs_tl_rm_old_stats(tl, curr_time);
349
350 return tl->total;
351}
352
353static int
354il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
355 u8 tid, struct ieee80211_sta *sta)
356{
357 int ret = -EAGAIN;
358 u32 load;
359
360 load = il4965_rs_tl_get_load(lq_data, tid);
361
362 if (load > IL_AGG_LOAD_THRESHOLD) {
363 D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
364 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
365 if (ret == -EAGAIN) {
366 /*
367 * driver and mac80211 is out of sync
368 * this might be cause by reloading firmware
369 * stop the tx ba session here
370 */
371 IL_ERR("Fail start Tx agg on tid: %d\n", tid);
372 ieee80211_stop_tx_ba_session(sta, tid);
373 }
374 } else
375 D_HT("Aggregation not enabled for tid %d because load = %u\n",
376 tid, load);
377
378 return ret;
379}
380
381static void
382il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
383 struct ieee80211_sta *sta)
384{
385 if (tid < TID_MAX_LOAD_COUNT)
386 il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
387 else
388 IL_ERR("tid exceeds max load count: %d/%d\n", tid,
389 TID_MAX_LOAD_COUNT);
390}
391
392static inline int
393il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
394{
395 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
397 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
398}
399
400/*
401 * Static function to get the expected throughput from an il_scale_tbl_info
402 * that wraps a NULL pointer check
403 */
404static s32
405il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
406{
407 if (tbl->expected_tpt)
408 return tbl->expected_tpt[rs_idx];
409 return 0;
410}
411
412/**
413 * il4965_rs_collect_tx_data - Update the success/failure sliding win
414 *
415 * We keep a sliding win of the last 62 packets transmitted
416 * at this rate. win->data contains the bitmask of successful
417 * packets.
418 */
419static int
420il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
421 int attempts, int successes)
422{
423 struct il_rate_scale_data *win = NULL;
424 static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
425 s32 fail_count, tpt;
426
427 if (scale_idx < 0 || scale_idx >= RATE_COUNT)
428 return -EINVAL;
429
430 /* Select win for current tx bit rate */
431 win = &(tbl->win[scale_idx]);
432
433 /* Get expected throughput */
434 tpt = il4965_get_expected_tpt(tbl, scale_idx);
435
436 /*
437 * Keep track of only the latest 62 tx frame attempts in this rate's
438 * history win; anything older isn't really relevant any more.
439 * If we have filled up the sliding win, drop the oldest attempt;
440 * if the oldest attempt (highest bit in bitmap) shows "success",
441 * subtract "1" from the success counter (this is the main reason
442 * we keep these bitmaps!).
443 */
444 while (attempts > 0) {
445 if (win->counter >= RATE_MAX_WINDOW) {
446
447 /* remove earliest */
448 win->counter = RATE_MAX_WINDOW - 1;
449
450 if (win->data & mask) {
451 win->data &= ~mask;
452 win->success_counter--;
453 }
454 }
455
456 /* Increment frames-attempted counter */
457 win->counter++;
458
459 /* Shift bitmap by one frame to throw away oldest history */
460 win->data <<= 1;
461
462 /* Mark the most recent #successes attempts as successful */
463 if (successes > 0) {
464 win->success_counter++;
465 win->data |= 0x1;
466 successes--;
467 }
468
469 attempts--;
470 }
471
472 /* Calculate current success ratio, avoid divide-by-0! */
473 if (win->counter > 0)
474 win->success_ratio =
475 128 * (100 * win->success_counter) / win->counter;
476 else
477 win->success_ratio = IL_INVALID_VALUE;
478
479 fail_count = win->counter - win->success_counter;
480
481 /* Calculate average throughput, if we have enough history. */
482 if (fail_count >= RATE_MIN_FAILURE_TH ||
483 win->success_counter >= RATE_MIN_SUCCESS_TH)
484 win->average_tpt = (win->success_ratio * tpt + 64) / 128;
485 else
486 win->average_tpt = IL_INVALID_VALUE;
487
488 /* Tag this win as having been updated */
489 win->stamp = jiffies;
490
491 return 0;
492}
493
494/*
495 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
496 */
497static u32
498il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
499 int idx, u8 use_green)
500{
501 u32 rate_n_flags = 0;
502
503 if (is_legacy(tbl->lq_type)) {
504 rate_n_flags = il_rates[idx].plcp;
505 if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
506 rate_n_flags |= RATE_MCS_CCK_MSK;
507
508 } else if (is_Ht(tbl->lq_type)) {
509 if (idx > IL_LAST_OFDM_RATE) {
510 IL_ERR("Invalid HT rate idx %d\n", idx);
511 idx = IL_LAST_OFDM_RATE;
512 }
513 rate_n_flags = RATE_MCS_HT_MSK;
514
515 if (is_siso(tbl->lq_type))
516 rate_n_flags |= il_rates[idx].plcp_siso;
517 else
518 rate_n_flags |= il_rates[idx].plcp_mimo2;
519 } else {
520 IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
521 }
522
523 rate_n_flags |=
524 ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
525
526 if (is_Ht(tbl->lq_type)) {
527 if (tbl->is_ht40) {
528 if (tbl->is_dup)
529 rate_n_flags |= RATE_MCS_DUP_MSK;
530 else
531 rate_n_flags |= RATE_MCS_HT40_MSK;
532 }
533 if (tbl->is_SGI)
534 rate_n_flags |= RATE_MCS_SGI_MSK;
535
536 if (use_green) {
537 rate_n_flags |= RATE_MCS_GF_MSK;
538 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
539 rate_n_flags &= ~RATE_MCS_SGI_MSK;
540 IL_ERR("GF was set with SGI:SISO\n");
541 }
542 }
543 }
544 return rate_n_flags;
545}
546
547/*
548 * Interpret uCode API's rate_n_flags format,
549 * fill "search" or "active" tx mode table.
550 */
551static int
552il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
553 enum ieee80211_band band,
554 struct il_scale_tbl_info *tbl, int *rate_idx)
555{
556 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
557 u8 il4965_num_of_ant =
558 il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
559 u8 mcs;
560
561 memset(tbl, 0, sizeof(struct il_scale_tbl_info));
562 *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
563
564 if (*rate_idx == RATE_INVALID) {
565 *rate_idx = -1;
566 return -EINVAL;
567 }
568 tbl->is_SGI = 0; /* default legacy setup */
569 tbl->is_ht40 = 0;
570 tbl->is_dup = 0;
571 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
572 tbl->lq_type = LQ_NONE;
573 tbl->max_search = IL_MAX_SEARCH;
574
575 /* legacy rate format */
576 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
577 if (il4965_num_of_ant == 1) {
578 if (band == IEEE80211_BAND_5GHZ)
579 tbl->lq_type = LQ_A;
580 else
581 tbl->lq_type = LQ_G;
582 }
583 /* HT rate format */
584 } else {
585 if (rate_n_flags & RATE_MCS_SGI_MSK)
586 tbl->is_SGI = 1;
587
588 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
589 (rate_n_flags & RATE_MCS_DUP_MSK))
590 tbl->is_ht40 = 1;
591
592 if (rate_n_flags & RATE_MCS_DUP_MSK)
593 tbl->is_dup = 1;
594
595 mcs = il4965_rs_extract_rate(rate_n_flags);
596
597 /* SISO */
598 if (mcs <= RATE_SISO_60M_PLCP) {
599 if (il4965_num_of_ant == 1)
600 tbl->lq_type = LQ_SISO; /*else NONE */
601 /* MIMO2 */
602 } else {
603 if (il4965_num_of_ant == 2)
604 tbl->lq_type = LQ_MIMO2;
605 }
606 }
607 return 0;
608}
609
610/* switch to another antenna/antennas and return 1 */
611/* if no other valid antenna found, return 0 */
612static int
613il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
614 struct il_scale_tbl_info *tbl)
615{
616 u8 new_ant_type;
617
618 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
619 return 0;
620
621 if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
622 return 0;
623
624 new_ant_type = ant_toggle_lookup[tbl->ant_type];
625
626 while (new_ant_type != tbl->ant_type &&
627 !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
628 new_ant_type = ant_toggle_lookup[new_ant_type];
629
630 if (new_ant_type == tbl->ant_type)
631 return 0;
632
633 tbl->ant_type = new_ant_type;
634 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
635 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
636 return 1;
637}
638
639/**
640 * Green-field mode is valid if the station supports it and
641 * there are no non-GF stations present in the BSS.
642 */
643static bool
644il4965_rs_use_green(struct ieee80211_sta *sta)
645{
646 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
647 struct il_rxon_context *ctx = sta_priv->common.ctx;
648
649 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
650 !(ctx->ht.non_gf_sta_present);
651}
652
653/**
654 * il4965_rs_get_supported_rates - get the available rates
655 *
656 * if management frame or broadcast frame only return
657 * basic available rates.
658 *
659 */
660static u16
661il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
662 struct ieee80211_hdr *hdr,
663 enum il_table_type rate_type)
664{
665 if (is_legacy(rate_type)) {
666 return lq_sta->active_legacy_rate;
667 } else {
668 if (is_siso(rate_type))
669 return lq_sta->active_siso_rate;
670 else
671 return lq_sta->active_mimo2_rate;
672 }
673}
674
675static u16
676il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
677 int rate_type)
678{
679 u8 high = RATE_INVALID;
680 u8 low = RATE_INVALID;
681
682 /* 802.11A or ht walks to the next literal adjacent rate in
683 * the rate table */
684 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
685 int i;
686 u32 mask;
687
688 /* Find the previous rate that is in the rate mask */
689 i = idx - 1;
690 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
691 if (rate_mask & mask) {
692 low = i;
693 break;
694 }
695 }
696
697 /* Find the next rate that is in the rate mask */
698 i = idx + 1;
699 for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
700 if (rate_mask & mask) {
701 high = i;
702 break;
703 }
704 }
705
706 return (high << 8) | low;
707 }
708
709 low = idx;
710 while (low != RATE_INVALID) {
711 low = il_rates[low].prev_rs;
712 if (low == RATE_INVALID)
713 break;
714 if (rate_mask & (1 << low))
715 break;
716 D_RATE("Skipping masked lower rate: %d\n", low);
717 }
718
719 high = idx;
720 while (high != RATE_INVALID) {
721 high = il_rates[high].next_rs;
722 if (high == RATE_INVALID)
723 break;
724 if (rate_mask & (1 << high))
725 break;
726 D_RATE("Skipping masked higher rate: %d\n", high);
727 }
728
729 return (high << 8) | low;
730}
731
732static u32
733il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
734 struct il_scale_tbl_info *tbl, u8 scale_idx,
735 u8 ht_possible)
736{
737 s32 low;
738 u16 rate_mask;
739 u16 high_low;
740 u8 switch_to_legacy = 0;
741 u8 is_green = lq_sta->is_green;
742 struct il_priv *il = lq_sta->drv;
743
744 /* check if we need to switch from HT to legacy rates.
745 * assumption is that mandatory rates (1Mbps or 6Mbps)
746 * are always supported (spec demand) */
747 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
748 switch_to_legacy = 1;
749 scale_idx = rs_ht_to_legacy[scale_idx];
750 if (lq_sta->band == IEEE80211_BAND_5GHZ)
751 tbl->lq_type = LQ_A;
752 else
753 tbl->lq_type = LQ_G;
754
755 if (il4965_num_of_ant(tbl->ant_type) > 1)
756 tbl->ant_type =
757 il4965_first_antenna(il->hw_params.valid_tx_ant);
758
759 tbl->is_ht40 = 0;
760 tbl->is_SGI = 0;
761 tbl->max_search = IL_MAX_SEARCH;
762 }
763
764 rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
765
766 /* Mask with station rate restriction */
767 if (is_legacy(tbl->lq_type)) {
768 /* supp_rates has no CCK bits in A mode */
769 if (lq_sta->band == IEEE80211_BAND_5GHZ)
770 rate_mask =
771 (u16) (rate_mask &
772 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
773 else
774 rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
775 }
776
777 /* If we switched from HT to legacy, check current rate */
778 if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
779 low = scale_idx;
780 goto out;
781 }
782
783 high_low =
784 il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
785 tbl->lq_type);
786 low = high_low & 0xff;
787
788 if (low == RATE_INVALID)
789 low = scale_idx;
790
791out:
792 return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
793}
794
795/*
796 * Simple function to compare two rate scale table types
797 */
798static bool
799il4965_table_type_matches(struct il_scale_tbl_info *a,
800 struct il_scale_tbl_info *b)
801{
802 return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
803 a->is_SGI == b->is_SGI);
804}
805
806/*
807 * mac80211 sends us Tx status
808 */
809static void
810il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
811 struct ieee80211_sta *sta, void *il_sta,
812 struct sk_buff *skb)
813{
814 int legacy_success;
815 int retries;
816 int rs_idx, mac_idx, i;
817 struct il_lq_sta *lq_sta = il_sta;
818 struct il_link_quality_cmd *table;
819 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
820 struct il_priv *il = (struct il_priv *)il_r;
821 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
822 enum mac80211_rate_control_flags mac_flags;
823 u32 tx_rate;
824 struct il_scale_tbl_info tbl_type;
825 struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
826 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
827 struct il_rxon_context *ctx = sta_priv->common.ctx;
828
829 D_RATE("get frame ack response, update rate scale win\n");
830
831 /* Treat uninitialized rate scaling data same as non-existing. */
832 if (!lq_sta) {
833 D_RATE("Station rate scaling not created yet.\n");
834 return;
835 } else if (!lq_sta->drv) {
836 D_RATE("Rate scaling not initialized yet.\n");
837 return;
838 }
839
840 if (!ieee80211_is_data(hdr->frame_control) ||
841 (info->flags & IEEE80211_TX_CTL_NO_ACK))
842 return;
843
844 /* This packet was aggregated but doesn't carry status info */
845 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
846 !(info->flags & IEEE80211_TX_STAT_AMPDU))
847 return;
848
849 /*
850 * Ignore this Tx frame response if its initial rate doesn't match
851 * that of latest Link Quality command. There may be stragglers
852 * from a previous Link Quality command, but we're no longer interested
853 * in those; they're either from the "active" mode while we're trying
854 * to check "search" mode, or a prior "search" mode after we've moved
855 * to a new "search" mode (which might become the new "active" mode).
856 */
857 table = &lq_sta->lq;
858 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
859 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
860 if (il->band == IEEE80211_BAND_5GHZ)
861 rs_idx -= IL_FIRST_OFDM_RATE;
862 mac_flags = info->status.rates[0].flags;
863 mac_idx = info->status.rates[0].idx;
864 /* For HT packets, map MCS to PLCP */
865 if (mac_flags & IEEE80211_TX_RC_MCS) {
866 mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
867 if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
868 mac_idx++;
869 /*
870 * mac80211 HT idx is always zero-idxed; we need to move
871 * HT OFDM rates after CCK rates in 2.4 GHz band
872 */
873 if (il->band == IEEE80211_BAND_2GHZ)
874 mac_idx += IL_FIRST_OFDM_RATE;
875 }
876 /* Here we actually compare this rate to the latest LQ command */
877 if (mac_idx < 0 ||
878 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
879 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
880 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
881 tbl_type.ant_type != info->antenna_sel_tx ||
882 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
883 || !!(tx_rate & RATE_MCS_GF_MSK) !=
884 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
885 D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
886 rs_idx, tx_rate);
887 /*
888 * Since rates mis-match, the last LQ command may have failed.
889 * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
890 * ... driver.
891 */
892 lq_sta->missed_rate_counter++;
893 if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
894 lq_sta->missed_rate_counter = 0;
895 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
896 }
897 /* Regardless, ignore this status info for outdated rate */
898 return;
899 } else
900 /* Rate did match, so reset the missed_rate_counter */
901 lq_sta->missed_rate_counter = 0;
902
903 /* Figure out if rate scale algorithm is in active or search table */
904 if (il4965_table_type_matches
905 (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
906 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
908 } else
909 if (il4965_table_type_matches
910 (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
911 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
912 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
913 } else {
914 D_RATE("Neither active nor search matches tx rate\n");
915 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
916 D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
917 tmp_tbl->ant_type, tmp_tbl->is_SGI);
918 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
919 D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
920 tmp_tbl->ant_type, tmp_tbl->is_SGI);
921 D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
922 tbl_type.ant_type, tbl_type.is_SGI);
923 /*
924 * no matching table found, let's by-pass the data collection
925 * and continue to perform rate scale to find the rate table
926 */
927 il4965_rs_stay_in_table(lq_sta, true);
928 goto done;
929 }
930
931 /*
932 * Updating the frame history depends on whether packets were
933 * aggregated.
934 *
935 * For aggregation, all packets were transmitted at the same rate, the
936 * first idx into rate scale table.
937 */
938 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
939 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
940 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
941 &rs_idx);
942 il4965_rs_collect_tx_data(curr_tbl, rs_idx,
943 info->status.ampdu_len,
944 info->status.ampdu_ack_len);
945
946 /* Update success/fail counts if not searching for new mode */
947 if (lq_sta->stay_in_tbl) {
948 lq_sta->total_success += info->status.ampdu_ack_len;
949 lq_sta->total_failed +=
950 (info->status.ampdu_len -
951 info->status.ampdu_ack_len);
952 }
953 } else {
954 /*
955 * For legacy, update frame history with for each Tx retry.
956 */
957 retries = info->status.rates[0].count - 1;
958 /* HW doesn't send more than 15 retries */
959 retries = min(retries, 15);
960
961 /* The last transmission may have been successful */
962 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
963 /* Collect data for each rate used during failed TX attempts */
964 for (i = 0; i <= retries; ++i) {
965 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
966 il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
967 &tbl_type, &rs_idx);
968 /*
969 * Only collect stats if retried rate is in the same RS
970 * table as active/search.
971 */
972 if (il4965_table_type_matches(&tbl_type, curr_tbl))
973 tmp_tbl = curr_tbl;
974 else if (il4965_table_type_matches
975 (&tbl_type, other_tbl))
976 tmp_tbl = other_tbl;
977 else
978 continue;
979 il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
980 i <
981 retries ? 0 : legacy_success);
982 }
983
984 /* Update success/fail counts if not searching for new mode */
985 if (lq_sta->stay_in_tbl) {
986 lq_sta->total_success += legacy_success;
987 lq_sta->total_failed += retries + (1 - legacy_success);
988 }
989 }
990 /* The last TX rate is cached in lq_sta; it's set in if/else above */
991 lq_sta->last_rate_n_flags = tx_rate;
992done:
993 /* See if there's a better rate or modulation mode to try. */
994 if (sta->supp_rates[sband->band])
995 il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
996}
997
998/*
999 * Begin a period of staying with a selected modulation mode.
1000 * Set "stay_in_tbl" flag to prevent any mode switches.
1001 * Set frame tx success limits according to legacy vs. high-throughput,
1002 * and reset overall (spanning all rates) tx success history stats.
1003 * These control how long we stay using same modulation mode before
1004 * searching for a new mode.
1005 */
1006static void
1007il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
1008 struct il_lq_sta *lq_sta)
1009{
1010 D_RATE("we are staying in the same table\n");
1011 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1012 if (is_legacy) {
1013 lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
1014 lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
1015 lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
1016 } else {
1017 lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
1018 lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
1019 lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
1020 }
1021 lq_sta->table_count = 0;
1022 lq_sta->total_failed = 0;
1023 lq_sta->total_success = 0;
1024 lq_sta->flush_timer = jiffies;
1025 lq_sta->action_counter = 0;
1026}
1027
1028/*
1029 * Find correct throughput table for given mode of modulation
1030 */
1031static void
1032il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
1033 struct il_scale_tbl_info *tbl)
1034{
1035 /* Used to choose among HT tables */
1036 s32(*ht_tbl_pointer)[RATE_COUNT];
1037
1038 /* Check for invalid LQ type */
1039 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1040 tbl->expected_tpt = expected_tpt_legacy;
1041 return;
1042 }
1043
1044 /* Legacy rates have only one table */
1045 if (is_legacy(tbl->lq_type)) {
1046 tbl->expected_tpt = expected_tpt_legacy;
1047 return;
1048 }
1049
1050 /* Choose among many HT tables depending on number of streams
1051 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1052 * status */
1053 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1054 ht_tbl_pointer = expected_tpt_siso20MHz;
1055 else if (is_siso(tbl->lq_type))
1056 ht_tbl_pointer = expected_tpt_siso40MHz;
1057 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1058 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1059 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1060 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1061
1062 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1063 tbl->expected_tpt = ht_tbl_pointer[0];
1064 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1065 tbl->expected_tpt = ht_tbl_pointer[1];
1066 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1067 tbl->expected_tpt = ht_tbl_pointer[2];
1068 else /* AGG+SGI */
1069 tbl->expected_tpt = ht_tbl_pointer[3];
1070}
1071
1072/*
1073 * Find starting rate for new "search" high-throughput mode of modulation.
1074 * Goal is to find lowest expected rate (under perfect conditions) that is
1075 * above the current measured throughput of "active" mode, to give new mode
1076 * a fair chance to prove itself without too many challenges.
1077 *
1078 * This gets called when transitioning to more aggressive modulation
1079 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1080 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1081 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1082 * bit rate will typically need to increase, but not if performance was bad.
1083 */
1084static s32
1085il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
1086 struct il_scale_tbl_info *tbl, /* "search" */
1087 u16 rate_mask, s8 idx)
1088{
1089 /* "active" values */
1090 struct il_scale_tbl_info *active_tbl =
1091 &(lq_sta->lq_info[lq_sta->active_tbl]);
1092 s32 active_sr = active_tbl->win[idx].success_ratio;
1093 s32 active_tpt = active_tbl->expected_tpt[idx];
1094
1095 /* expected "search" throughput */
1096 s32 *tpt_tbl = tbl->expected_tpt;
1097
1098 s32 new_rate, high, low, start_hi;
1099 u16 high_low;
1100 s8 rate = idx;
1101
1102 new_rate = high = low = start_hi = RATE_INVALID;
1103
1104 for (;;) {
1105 high_low =
1106 il4965_rs_get_adjacent_rate(il, rate, rate_mask,
1107 tbl->lq_type);
1108
1109 low = high_low & 0xff;
1110 high = (high_low >> 8) & 0xff;
1111
1112 /*
1113 * Lower the "search" bit rate, to give new "search" mode
1114 * approximately the same throughput as "active" if:
1115 *
1116 * 1) "Active" mode has been working modestly well (but not
1117 * great), and expected "search" throughput (under perfect
1118 * conditions) at candidate rate is above the actual
1119 * measured "active" throughput (but less than expected
1120 * "active" throughput under perfect conditions).
1121 * OR
1122 * 2) "Active" mode has been working perfectly or very well
1123 * and expected "search" throughput (under perfect
1124 * conditions) at candidate rate is above expected
1125 * "active" throughput (under perfect conditions).
1126 */
1127 if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
1128 (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
1129 && tpt_tbl[rate] <= active_tpt)) ||
1130 (active_sr >= RATE_SCALE_SWITCH &&
1131 tpt_tbl[rate] > active_tpt)) {
1132
1133 /* (2nd or later pass)
1134 * If we've already tried to raise the rate, and are
1135 * now trying to lower it, use the higher rate. */
1136 if (start_hi != RATE_INVALID) {
1137 new_rate = start_hi;
1138 break;
1139 }
1140
1141 new_rate = rate;
1142
1143 /* Loop again with lower rate */
1144 if (low != RATE_INVALID)
1145 rate = low;
1146
1147 /* Lower rate not available, use the original */
1148 else
1149 break;
1150
1151 /* Else try to raise the "search" rate to match "active" */
1152 } else {
1153 /* (2nd or later pass)
1154 * If we've already tried to lower the rate, and are
1155 * now trying to raise it, use the lower rate. */
1156 if (new_rate != RATE_INVALID)
1157 break;
1158
1159 /* Loop again with higher rate */
1160 else if (high != RATE_INVALID) {
1161 start_hi = high;
1162 rate = high;
1163
1164 /* Higher rate not available, use the original */
1165 } else {
1166 new_rate = rate;
1167 break;
1168 }
1169 }
1170 }
1171
1172 return new_rate;
1173}
1174
1175/*
1176 * Set up search table for MIMO2
1177 */
1178static int
1179il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
1180 struct ieee80211_conf *conf,
1181 struct ieee80211_sta *sta,
1182 struct il_scale_tbl_info *tbl, int idx)
1183{
1184 u16 rate_mask;
1185 s32 rate;
1186 s8 is_green = lq_sta->is_green;
1187 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1188 struct il_rxon_context *ctx = sta_priv->common.ctx;
1189
1190 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1191 return -1;
1192
1193 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
1194 WLAN_HT_CAP_SM_PS_STATIC)
1195 return -1;
1196
1197 /* Need both Tx chains/antennas to support MIMO */
1198 if (il->hw_params.tx_chains_num < 2)
1199 return -1;
1200
1201 D_RATE("LQ: try to switch to MIMO2\n");
1202
1203 tbl->lq_type = LQ_MIMO2;
1204 tbl->is_dup = lq_sta->is_dup;
1205 tbl->action = 0;
1206 tbl->max_search = IL_MAX_SEARCH;
1207 rate_mask = lq_sta->active_mimo2_rate;
1208
1209 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1210 tbl->is_ht40 = 1;
1211 else
1212 tbl->is_ht40 = 0;
1213
1214 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1215
1216 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1217
1218 D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
1219 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1220 D_RATE("Can't switch with idx %d rate mask %x\n", rate,
1221 rate_mask);
1222 return -1;
1223 }
1224 tbl->current_rate =
1225 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1226
1227 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1228 is_green);
1229 return 0;
1230}
1231
1232/*
1233 * Set up search table for SISO
1234 */
1235static int
1236il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
1237 struct ieee80211_conf *conf, struct ieee80211_sta *sta,
1238 struct il_scale_tbl_info *tbl, int idx)
1239{
1240 u16 rate_mask;
1241 u8 is_green = lq_sta->is_green;
1242 s32 rate;
1243 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1244 struct il_rxon_context *ctx = sta_priv->common.ctx;
1245
1246 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1247 return -1;
1248
1249 D_RATE("LQ: try to switch to SISO\n");
1250
1251 tbl->is_dup = lq_sta->is_dup;
1252 tbl->lq_type = LQ_SISO;
1253 tbl->action = 0;
1254 tbl->max_search = IL_MAX_SEARCH;
1255 rate_mask = lq_sta->active_siso_rate;
1256
1257 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1258 tbl->is_ht40 = 1;
1259 else
1260 tbl->is_ht40 = 0;
1261
1262 if (is_green)
1263 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
1264
1265 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
1266 rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
1267
1268 D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
1269 if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
1270 D_RATE("can not switch with idx %d rate mask %x\n", rate,
1271 rate_mask);
1272 return -1;
1273 }
1274 tbl->current_rate =
1275 il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
1276 D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
1277 is_green);
1278 return 0;
1279}
1280
1281/*
1282 * Try to switch to new modulation mode from legacy
1283 */
1284static int
1285il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1286 struct ieee80211_conf *conf,
1287 struct ieee80211_sta *sta, int idx)
1288{
1289 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1290 struct il_scale_tbl_info *search_tbl =
1291 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1292 struct il_rate_scale_data *win = &(tbl->win[idx]);
1293 u32 sz =
1294 (sizeof(struct il_scale_tbl_info) -
1295 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1296 u8 start_action;
1297 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1298 u8 tx_chains_num = il->hw_params.tx_chains_num;
1299 int ret = 0;
1300 u8 update_search_tbl_counter = 0;
1301
1302 tbl->action = IL_LEGACY_SWITCH_SISO;
1303
1304 start_action = tbl->action;
1305 for (;;) {
1306 lq_sta->action_counter++;
1307 switch (tbl->action) {
1308 case IL_LEGACY_SWITCH_ANTENNA1:
1309 case IL_LEGACY_SWITCH_ANTENNA2:
1310 D_RATE("LQ: Legacy toggle Antenna\n");
1311
1312 if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
1313 tx_chains_num <= 1) ||
1314 (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
1315 tx_chains_num <= 2))
1316 break;
1317
1318 /* Don't change antenna if success has been great */
1319 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1320 break;
1321
1322 /* Set up search table to try other antenna */
1323 memcpy(search_tbl, tbl, sz);
1324
1325 if (il4965_rs_toggle_antenna
1326 (valid_tx_ant, &search_tbl->current_rate,
1327 search_tbl)) {
1328 update_search_tbl_counter = 1;
1329 il4965_rs_set_expected_tpt_table(lq_sta,
1330 search_tbl);
1331 goto out;
1332 }
1333 break;
1334 case IL_LEGACY_SWITCH_SISO:
1335 D_RATE("LQ: Legacy switch to SISO\n");
1336
1337 /* Set up search table to try SISO */
1338 memcpy(search_tbl, tbl, sz);
1339 search_tbl->is_SGI = 0;
1340 ret =
1341 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1342 search_tbl, idx);
1343 if (!ret) {
1344 lq_sta->action_counter = 0;
1345 goto out;
1346 }
1347
1348 break;
1349 case IL_LEGACY_SWITCH_MIMO2_AB:
1350 case IL_LEGACY_SWITCH_MIMO2_AC:
1351 case IL_LEGACY_SWITCH_MIMO2_BC:
1352 D_RATE("LQ: Legacy switch to MIMO2\n");
1353
1354 /* Set up search table to try MIMO */
1355 memcpy(search_tbl, tbl, sz);
1356 search_tbl->is_SGI = 0;
1357
1358 if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
1359 search_tbl->ant_type = ANT_AB;
1360 else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
1361 search_tbl->ant_type = ANT_AC;
1362 else
1363 search_tbl->ant_type = ANT_BC;
1364
1365 if (!il4965_rs_is_valid_ant
1366 (valid_tx_ant, search_tbl->ant_type))
1367 break;
1368
1369 ret =
1370 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1371 search_tbl, idx);
1372 if (!ret) {
1373 lq_sta->action_counter = 0;
1374 goto out;
1375 }
1376 break;
1377 }
1378 tbl->action++;
1379 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1380 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1381
1382 if (tbl->action == start_action)
1383 break;
1384
1385 }
1386 search_tbl->lq_type = LQ_NONE;
1387 return 0;
1388
1389out:
1390 lq_sta->search_better_tbl = 1;
1391 tbl->action++;
1392 if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
1393 tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
1394 if (update_search_tbl_counter)
1395 search_tbl->action = tbl->action;
1396 return 0;
1397
1398}
1399
1400/*
1401 * Try to switch to new modulation mode from SISO
1402 */
1403static int
1404il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1405 struct ieee80211_conf *conf,
1406 struct ieee80211_sta *sta, int idx)
1407{
1408 u8 is_green = lq_sta->is_green;
1409 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1410 struct il_scale_tbl_info *search_tbl =
1411 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1412 struct il_rate_scale_data *win = &(tbl->win[idx]);
1413 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1414 u32 sz =
1415 (sizeof(struct il_scale_tbl_info) -
1416 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1417 u8 start_action;
1418 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1419 u8 tx_chains_num = il->hw_params.tx_chains_num;
1420 u8 update_search_tbl_counter = 0;
1421 int ret;
1422
1423 start_action = tbl->action;
1424
1425 for (;;) {
1426 lq_sta->action_counter++;
1427 switch (tbl->action) {
1428 case IL_SISO_SWITCH_ANTENNA1:
1429 case IL_SISO_SWITCH_ANTENNA2:
1430 D_RATE("LQ: SISO toggle Antenna\n");
1431 if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
1432 tx_chains_num <= 1) ||
1433 (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
1434 tx_chains_num <= 2))
1435 break;
1436
1437 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1438 break;
1439
1440 memcpy(search_tbl, tbl, sz);
1441 if (il4965_rs_toggle_antenna
1442 (valid_tx_ant, &search_tbl->current_rate,
1443 search_tbl)) {
1444 update_search_tbl_counter = 1;
1445 goto out;
1446 }
1447 break;
1448 case IL_SISO_SWITCH_MIMO2_AB:
1449 case IL_SISO_SWITCH_MIMO2_AC:
1450 case IL_SISO_SWITCH_MIMO2_BC:
1451 D_RATE("LQ: SISO switch to MIMO2\n");
1452 memcpy(search_tbl, tbl, sz);
1453 search_tbl->is_SGI = 0;
1454
1455 if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
1456 search_tbl->ant_type = ANT_AB;
1457 else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
1458 search_tbl->ant_type = ANT_AC;
1459 else
1460 search_tbl->ant_type = ANT_BC;
1461
1462 if (!il4965_rs_is_valid_ant
1463 (valid_tx_ant, search_tbl->ant_type))
1464 break;
1465
1466 ret =
1467 il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
1468 search_tbl, idx);
1469 if (!ret)
1470 goto out;
1471 break;
1472 case IL_SISO_SWITCH_GI:
1473 if (!tbl->is_ht40 &&
1474 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1475 break;
1476 if (tbl->is_ht40 &&
1477 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1478 break;
1479
1480 D_RATE("LQ: SISO toggle SGI/NGI\n");
1481
1482 memcpy(search_tbl, tbl, sz);
1483 if (is_green) {
1484 if (!tbl->is_SGI)
1485 break;
1486 else
1487 IL_ERR("SGI was set in GF+SISO\n");
1488 }
1489 search_tbl->is_SGI = !tbl->is_SGI;
1490 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1491 if (tbl->is_SGI) {
1492 s32 tpt = lq_sta->last_tpt / 100;
1493 if (tpt >= search_tbl->expected_tpt[idx])
1494 break;
1495 }
1496 search_tbl->current_rate =
1497 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1498 is_green);
1499 update_search_tbl_counter = 1;
1500 goto out;
1501 }
1502 tbl->action++;
1503 if (tbl->action > IL_SISO_SWITCH_GI)
1504 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1505
1506 if (tbl->action == start_action)
1507 break;
1508 }
1509 search_tbl->lq_type = LQ_NONE;
1510 return 0;
1511
1512out:
1513 lq_sta->search_better_tbl = 1;
1514 tbl->action++;
1515 if (tbl->action > IL_SISO_SWITCH_GI)
1516 tbl->action = IL_SISO_SWITCH_ANTENNA1;
1517 if (update_search_tbl_counter)
1518 search_tbl->action = tbl->action;
1519
1520 return 0;
1521}
1522
1523/*
1524 * Try to switch to new modulation mode from MIMO2
1525 */
1526static int
1527il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
1528 struct ieee80211_conf *conf,
1529 struct ieee80211_sta *sta, int idx)
1530{
1531 s8 is_green = lq_sta->is_green;
1532 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1533 struct il_scale_tbl_info *search_tbl =
1534 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1535 struct il_rate_scale_data *win = &(tbl->win[idx]);
1536 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1537 u32 sz =
1538 (sizeof(struct il_scale_tbl_info) -
1539 (sizeof(struct il_rate_scale_data) * RATE_COUNT));
1540 u8 start_action;
1541 u8 valid_tx_ant = il->hw_params.valid_tx_ant;
1542 u8 tx_chains_num = il->hw_params.tx_chains_num;
1543 u8 update_search_tbl_counter = 0;
1544 int ret;
1545
1546 start_action = tbl->action;
1547 for (;;) {
1548 lq_sta->action_counter++;
1549 switch (tbl->action) {
1550 case IL_MIMO2_SWITCH_ANTENNA1:
1551 case IL_MIMO2_SWITCH_ANTENNA2:
1552 D_RATE("LQ: MIMO2 toggle Antennas\n");
1553
1554 if (tx_chains_num <= 2)
1555 break;
1556
1557 if (win->success_ratio >= IL_RS_GOOD_RATIO)
1558 break;
1559
1560 memcpy(search_tbl, tbl, sz);
1561 if (il4965_rs_toggle_antenna
1562 (valid_tx_ant, &search_tbl->current_rate,
1563 search_tbl)) {
1564 update_search_tbl_counter = 1;
1565 goto out;
1566 }
1567 break;
1568 case IL_MIMO2_SWITCH_SISO_A:
1569 case IL_MIMO2_SWITCH_SISO_B:
1570 case IL_MIMO2_SWITCH_SISO_C:
1571 D_RATE("LQ: MIMO2 switch to SISO\n");
1572
1573 /* Set up new search table for SISO */
1574 memcpy(search_tbl, tbl, sz);
1575
1576 if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
1577 search_tbl->ant_type = ANT_A;
1578 else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
1579 search_tbl->ant_type = ANT_B;
1580 else
1581 search_tbl->ant_type = ANT_C;
1582
1583 if (!il4965_rs_is_valid_ant
1584 (valid_tx_ant, search_tbl->ant_type))
1585 break;
1586
1587 ret =
1588 il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
1589 search_tbl, idx);
1590 if (!ret)
1591 goto out;
1592
1593 break;
1594
1595 case IL_MIMO2_SWITCH_GI:
1596 if (!tbl->is_ht40 &&
1597 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
1598 break;
1599 if (tbl->is_ht40 &&
1600 !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
1601 break;
1602
1603 D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
1604
1605 /* Set up new search table for MIMO2 */
1606 memcpy(search_tbl, tbl, sz);
1607 search_tbl->is_SGI = !tbl->is_SGI;
1608 il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1609 /*
1610 * If active table already uses the fastest possible
1611 * modulation (dual stream with short guard interval),
1612 * and it's working well, there's no need to look
1613 * for a better type of modulation!
1614 */
1615 if (tbl->is_SGI) {
1616 s32 tpt = lq_sta->last_tpt / 100;
1617 if (tpt >= search_tbl->expected_tpt[idx])
1618 break;
1619 }
1620 search_tbl->current_rate =
1621 il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
1622 is_green);
1623 update_search_tbl_counter = 1;
1624 goto out;
1625
1626 }
1627 tbl->action++;
1628 if (tbl->action > IL_MIMO2_SWITCH_GI)
1629 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1630
1631 if (tbl->action == start_action)
1632 break;
1633 }
1634 search_tbl->lq_type = LQ_NONE;
1635 return 0;
1636out:
1637 lq_sta->search_better_tbl = 1;
1638 tbl->action++;
1639 if (tbl->action > IL_MIMO2_SWITCH_GI)
1640 tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
1641 if (update_search_tbl_counter)
1642 search_tbl->action = tbl->action;
1643
1644 return 0;
1645
1646}
1647
1648/*
1649 * Check whether we should continue using same modulation mode, or
1650 * begin search for a new mode, based on:
1651 * 1) # tx successes or failures while using this mode
1652 * 2) # times calling this function
1653 * 3) elapsed time in this mode (not used, for now)
1654 */
1655static void
1656il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
1657{
1658 struct il_scale_tbl_info *tbl;
1659 int i;
1660 int active_tbl;
1661 int flush_interval_passed = 0;
1662 struct il_priv *il;
1663
1664 il = lq_sta->drv;
1665 active_tbl = lq_sta->active_tbl;
1666
1667 tbl = &(lq_sta->lq_info[active_tbl]);
1668
1669 /* If we've been disallowing search, see if we should now allow it */
1670 if (lq_sta->stay_in_tbl) {
1671
1672 /* Elapsed time using current modulation mode */
1673 if (lq_sta->flush_timer)
1674 flush_interval_passed =
1675 time_after(jiffies,
1676 (unsigned long)(lq_sta->flush_timer +
1677 RATE_SCALE_FLUSH_INTVL));
1678
1679 /*
1680 * Check if we should allow search for new modulation mode.
1681 * If many frames have failed or succeeded, or we've used
1682 * this same modulation for a long time, allow search, and
1683 * reset history stats that keep track of whether we should
1684 * allow a new search. Also (below) reset all bitmaps and
1685 * stats in active history.
1686 */
1687 if (force_search ||
1688 lq_sta->total_failed > lq_sta->max_failure_limit ||
1689 lq_sta->total_success > lq_sta->max_success_limit ||
1690 (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
1691 flush_interval_passed)) {
1692 D_RATE("LQ: stay is expired %d %d %d\n:",
1693 lq_sta->total_failed, lq_sta->total_success,
1694 flush_interval_passed);
1695
1696 /* Allow search for new mode */
1697 lq_sta->stay_in_tbl = 0; /* only place reset */
1698 lq_sta->total_failed = 0;
1699 lq_sta->total_success = 0;
1700 lq_sta->flush_timer = 0;
1701
1702 /*
1703 * Else if we've used this modulation mode enough repetitions
1704 * (regardless of elapsed time or success/failure), reset
1705 * history bitmaps and rate-specific stats for all rates in
1706 * active table.
1707 */
1708 } else {
1709 lq_sta->table_count++;
1710 if (lq_sta->table_count >= lq_sta->table_count_limit) {
1711 lq_sta->table_count = 0;
1712
1713 D_RATE("LQ: stay in table clear win\n");
1714 for (i = 0; i < RATE_COUNT; i++)
1715 il4965_rs_rate_scale_clear_win(&
1716 (tbl->
1717 win
1718 [i]));
1719 }
1720 }
1721
1722 /* If transitioning to allow "search", reset all history
1723 * bitmaps and stats in active table (this will become the new
1724 * "search" table). */
1725 if (!lq_sta->stay_in_tbl) {
1726 for (i = 0; i < RATE_COUNT; i++)
1727 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
1728 }
1729 }
1730}
1731
1732/*
1733 * setup rate table in uCode
1734 */
1735static void
1736il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
1737 struct il_lq_sta *lq_sta,
1738 struct il_scale_tbl_info *tbl, int idx, u8 is_green)
1739{
1740 u32 rate;
1741
1742 /* Update uCode's rate table. */
1743 rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
1744 il4965_rs_fill_link_cmd(il, lq_sta, rate);
1745 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
1746}
1747
1748/*
1749 * Do rate scaling and search for new modulation mode.
1750 */
1751static void
1752il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
1753 struct ieee80211_sta *sta,
1754 struct il_lq_sta *lq_sta)
1755{
1756 struct ieee80211_hw *hw = il->hw;
1757 struct ieee80211_conf *conf = &hw->conf;
1758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1759 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1760 int low = RATE_INVALID;
1761 int high = RATE_INVALID;
1762 int idx;
1763 int i;
1764 struct il_rate_scale_data *win = NULL;
1765 int current_tpt = IL_INVALID_VALUE;
1766 int low_tpt = IL_INVALID_VALUE;
1767 int high_tpt = IL_INVALID_VALUE;
1768 u32 fail_count;
1769 s8 scale_action = 0;
1770 u16 rate_mask;
1771 u8 update_lq = 0;
1772 struct il_scale_tbl_info *tbl, *tbl1;
1773 u16 rate_scale_idx_msk = 0;
1774 u8 is_green = 0;
1775 u8 active_tbl = 0;
1776 u8 done_search = 0;
1777 u16 high_low;
1778 s32 sr;
1779 u8 tid = MAX_TID_COUNT;
1780 struct il_tid_data *tid_data;
1781 struct il_station_priv *sta_priv = (void *)sta->drv_priv;
1782 struct il_rxon_context *ctx = sta_priv->common.ctx;
1783
1784 D_RATE("rate scale calculate new rate for skb\n");
1785
1786 /* Send management frames and NO_ACK data using lowest rate. */
1787 /* TODO: this could probably be improved.. */
1788 if (!ieee80211_is_data(hdr->frame_control) ||
1789 (info->flags & IEEE80211_TX_CTL_NO_ACK))
1790 return;
1791
1792 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1793
1794 tid = il4965_rs_tl_add_packet(lq_sta, hdr);
1795 if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1796 tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
1797 if (tid_data->agg.state == IL_AGG_OFF)
1798 lq_sta->is_agg = 0;
1799 else
1800 lq_sta->is_agg = 1;
1801 } else
1802 lq_sta->is_agg = 0;
1803
1804 /*
1805 * Select rate-scale / modulation-mode table to work with in
1806 * the rest of this function: "search" if searching for better
1807 * modulation mode, or "active" if doing rate scaling within a mode.
1808 */
1809 if (!lq_sta->search_better_tbl)
1810 active_tbl = lq_sta->active_tbl;
1811 else
1812 active_tbl = 1 - lq_sta->active_tbl;
1813
1814 tbl = &(lq_sta->lq_info[active_tbl]);
1815 if (is_legacy(tbl->lq_type))
1816 lq_sta->is_green = 0;
1817 else
1818 lq_sta->is_green = il4965_rs_use_green(sta);
1819 is_green = lq_sta->is_green;
1820
1821 /* current tx rate */
1822 idx = lq_sta->last_txrate_idx;
1823
1824 D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
1825
1826 /* rates available for this association, and for modulation mode */
1827 rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1828
1829 D_RATE("mask 0x%04X\n", rate_mask);
1830
1831 /* mask with station rate restriction */
1832 if (is_legacy(tbl->lq_type)) {
1833 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1834 /* supp_rates has no CCK bits in A mode */
1835 rate_scale_idx_msk =
1836 (u16) (rate_mask &
1837 (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
1838 else
1839 rate_scale_idx_msk =
1840 (u16) (rate_mask & lq_sta->supp_rates);
1841
1842 } else
1843 rate_scale_idx_msk = rate_mask;
1844
1845 if (!rate_scale_idx_msk)
1846 rate_scale_idx_msk = rate_mask;
1847
1848 if (!((1 << idx) & rate_scale_idx_msk)) {
1849 IL_ERR("Current Rate is not valid\n");
1850 if (lq_sta->search_better_tbl) {
1851 /* revert to active table if search table is not valid */
1852 tbl->lq_type = LQ_NONE;
1853 lq_sta->search_better_tbl = 0;
1854 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1855 /* get "active" rate info */
1856 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1857 il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
1858 is_green);
1859 }
1860 return;
1861 }
1862
1863 /* Get expected throughput table and history win for current rate */
1864 if (!tbl->expected_tpt) {
1865 IL_ERR("tbl->expected_tpt is NULL\n");
1866 return;
1867 }
1868
1869 /* force user max rate if set by user */
1870 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
1871 idx = lq_sta->max_rate_idx;
1872 update_lq = 1;
1873 win = &(tbl->win[idx]);
1874 goto lq_update;
1875 }
1876
1877 win = &(tbl->win[idx]);
1878
1879 /*
1880 * If there is not enough history to calculate actual average
1881 * throughput, keep analyzing results of more tx frames, without
1882 * changing rate or mode (bypass most of the rest of this function).
1883 * Set up new rate table in uCode only if old rate is not supported
1884 * in current association (use new rate found above).
1885 */
1886 fail_count = win->counter - win->success_counter;
1887 if (fail_count < RATE_MIN_FAILURE_TH &&
1888 win->success_counter < RATE_MIN_SUCCESS_TH) {
1889 D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
1890 win->success_counter, win->counter, idx);
1891
1892 /* Can't calculate this yet; not enough history */
1893 win->average_tpt = IL_INVALID_VALUE;
1894
1895 /* Should we stay with this modulation mode,
1896 * or search for a new one? */
1897 il4965_rs_stay_in_table(lq_sta, false);
1898
1899 goto out;
1900 }
1901 /* Else we have enough samples; calculate estimate of
1902 * actual average throughput */
1903 if (win->average_tpt !=
1904 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
1905 IL_ERR("expected_tpt should have been calculated by now\n");
1906 win->average_tpt =
1907 ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
1908 }
1909
1910 /* If we are searching for better modulation mode, check success. */
1911 if (lq_sta->search_better_tbl) {
1912 /* If good success, continue using the "search" mode;
1913 * no need to send new link quality command, since we're
1914 * continuing to use the setup that we've been trying. */
1915 if (win->average_tpt > lq_sta->last_tpt) {
1916
1917 D_RATE("LQ: SWITCHING TO NEW TBL "
1918 "suc=%d cur-tpt=%d old-tpt=%d\n",
1919 win->success_ratio, win->average_tpt,
1920 lq_sta->last_tpt);
1921
1922 if (!is_legacy(tbl->lq_type))
1923 lq_sta->enable_counter = 1;
1924
1925 /* Swap tables; "search" becomes "active" */
1926 lq_sta->active_tbl = active_tbl;
1927 current_tpt = win->average_tpt;
1928
1929 /* Else poor success; go back to mode in "active" table */
1930 } else {
1931
1932 D_RATE("LQ: GOING BACK TO THE OLD TBL "
1933 "suc=%d cur-tpt=%d old-tpt=%d\n",
1934 win->success_ratio, win->average_tpt,
1935 lq_sta->last_tpt);
1936
1937 /* Nullify "search" table */
1938 tbl->lq_type = LQ_NONE;
1939
1940 /* Revert to "active" table */
1941 active_tbl = lq_sta->active_tbl;
1942 tbl = &(lq_sta->lq_info[active_tbl]);
1943
1944 /* Revert to "active" rate and throughput info */
1945 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
1946 current_tpt = lq_sta->last_tpt;
1947
1948 /* Need to set up a new rate table in uCode */
1949 update_lq = 1;
1950 }
1951
1952 /* Either way, we've made a decision; modulation mode
1953 * search is done, allow rate adjustment next time. */
1954 lq_sta->search_better_tbl = 0;
1955 done_search = 1; /* Don't switch modes below! */
1956 goto lq_update;
1957 }
1958
1959 /* (Else) not in search of better modulation mode, try for better
1960 * starting rate, while staying in this mode. */
1961 high_low =
1962 il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
1963 tbl->lq_type);
1964 low = high_low & 0xff;
1965 high = (high_low >> 8) & 0xff;
1966
1967 /* If user set max rate, dont allow higher than user constrain */
1968 if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
1969 high = RATE_INVALID;
1970
1971 sr = win->success_ratio;
1972
1973 /* Collect measured throughputs for current and adjacent rates */
1974 current_tpt = win->average_tpt;
1975 if (low != RATE_INVALID)
1976 low_tpt = tbl->win[low].average_tpt;
1977 if (high != RATE_INVALID)
1978 high_tpt = tbl->win[high].average_tpt;
1979
1980 scale_action = 0;
1981
1982 /* Too many failures, decrease rate */
1983 if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
1984 D_RATE("decrease rate because of low success_ratio\n");
1985 scale_action = -1;
1986
1987 /* No throughput measured yet for adjacent rates; try increase. */
1988 } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
1989
1990 if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
1991 scale_action = 1;
1992 else if (low != RATE_INVALID)
1993 scale_action = 0;
1994 }
1995
1996 /* Both adjacent throughputs are measured, but neither one has better
1997 * throughput; we're using the best rate, don't change it! */
1998 else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
1999 low_tpt < current_tpt && high_tpt < current_tpt)
2000 scale_action = 0;
2001
2002 /* At least one adjacent rate's throughput is measured,
2003 * and may have better performance. */
2004 else {
2005 /* Higher adjacent rate's throughput is measured */
2006 if (high_tpt != IL_INVALID_VALUE) {
2007 /* Higher rate has better throughput */
2008 if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
2009 scale_action = 1;
2010 else
2011 scale_action = 0;
2012
2013 /* Lower adjacent rate's throughput is measured */
2014 } else if (low_tpt != IL_INVALID_VALUE) {
2015 /* Lower rate has better throughput */
2016 if (low_tpt > current_tpt) {
2017 D_RATE("decrease rate because of low tpt\n");
2018 scale_action = -1;
2019 } else if (sr >= RATE_INCREASE_TH) {
2020 scale_action = 1;
2021 }
2022 }
2023 }
2024
2025 /* Sanity check; asked for decrease, but success rate or throughput
2026 * has been good at old rate. Don't change it. */
2027 if (scale_action == -1 && low != RATE_INVALID &&
2028 (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
2029 scale_action = 0;
2030
2031 switch (scale_action) {
2032 case -1:
2033 /* Decrease starting rate, update uCode's rate table */
2034 if (low != RATE_INVALID) {
2035 update_lq = 1;
2036 idx = low;
2037 }
2038
2039 break;
2040 case 1:
2041 /* Increase starting rate, update uCode's rate table */
2042 if (high != RATE_INVALID) {
2043 update_lq = 1;
2044 idx = high;
2045 }
2046
2047 break;
2048 case 0:
2049 /* No change */
2050 default:
2051 break;
2052 }
2053
2054 D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
2055 idx, scale_action, low, high, tbl->lq_type);
2056
2057lq_update:
2058 /* Replace uCode's rate table for the destination station. */
2059 if (update_lq)
2060 il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
2061 is_green);
2062
2063 /* Should we stay with this modulation mode,
2064 * or search for a new one? */
2065 il4965_rs_stay_in_table(lq_sta, false);
2066
2067 /*
2068 * Search for new modulation mode if we're:
2069 * 1) Not changing rates right now
2070 * 2) Not just finishing up a search
2071 * 3) Allowing a new search
2072 */
2073 if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
2074 /* Save current throughput to compare with "search" throughput */
2075 lq_sta->last_tpt = current_tpt;
2076
2077 /* Select a new "search" modulation mode to try.
2078 * If one is found, set up the new "search" table. */
2079 if (is_legacy(tbl->lq_type))
2080 il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
2081 else if (is_siso(tbl->lq_type))
2082 il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
2083 idx);
2084 else /* (is_mimo2(tbl->lq_type)) */
2085 il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
2086 idx);
2087
2088 /* If new "search" mode was selected, set up in uCode table */
2089 if (lq_sta->search_better_tbl) {
2090 /* Access the "search" table, clear its history. */
2091 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2092 for (i = 0; i < RATE_COUNT; i++)
2093 il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
2094
2095 /* Use new "search" start rate */
2096 idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
2097
2098 D_RATE("Switch current mcs: %X idx: %d\n",
2099 tbl->current_rate, idx);
2100 il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
2101 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
2102 } else
2103 done_search = 1;
2104 }
2105
2106 if (done_search && !lq_sta->stay_in_tbl) {
2107 /* If the "active" (non-search) mode was legacy,
2108 * and we've tried switching antennas,
2109 * but we haven't been able to try HT modes (not available),
2110 * stay with best antenna legacy modulation for a while
2111 * before next round of mode comparisons. */
2112 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2113 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2114 lq_sta->action_counter > tbl1->max_search) {
2115 D_RATE("LQ: STAY in legacy table\n");
2116 il4965_rs_set_stay_in_table(il, 1, lq_sta);
2117 }
2118
2119 /* If we're in an HT mode, and all 3 mode switch actions
2120 * have been tried and compared, stay in this best modulation
2121 * mode for a while before next round of mode comparisons. */
2122 if (lq_sta->enable_counter &&
2123 lq_sta->action_counter >= tbl1->max_search) {
2124 if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
2125 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2126 tid != MAX_TID_COUNT) {
2127 tid_data =
2128 &il->stations[lq_sta->lq.sta_id].tid[tid];
2129 if (tid_data->agg.state == IL_AGG_OFF) {
2130 D_RATE("try to aggregate tid %d\n",
2131 tid);
2132 il4965_rs_tl_turn_on_agg(il, tid,
2133 lq_sta, sta);
2134 }
2135 }
2136 il4965_rs_set_stay_in_table(il, 0, lq_sta);
2137 }
2138 }
2139
2140out:
2141 tbl->current_rate =
2142 il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
2143 i = idx;
2144 lq_sta->last_txrate_idx = i;
2145}
2146
2147/**
2148 * il4965_rs_initialize_lq - Initialize a station's hardware rate table
2149 *
2150 * The uCode's station table contains a table of fallback rates
2151 * for automatic fallback during transmission.
2152 *
2153 * NOTE: This sets up a default set of values. These will be replaced later
2154 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2155 * rc80211_simple.
2156 *
2157 * NOTE: Run C_ADD_STA command to set up station table entry, before
2158 * calling this function (which runs C_TX_LINK_QUALITY_CMD,
2159 * which requires station table entry to exist).
2160 */
2161static void
2162il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
2163 struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
2164{
2165 struct il_scale_tbl_info *tbl;
2166 int rate_idx;
2167 int i;
2168 u32 rate;
2169 u8 use_green = il4965_rs_use_green(sta);
2170 u8 active_tbl = 0;
2171 u8 valid_tx_ant;
2172 struct il_station_priv *sta_priv;
2173 struct il_rxon_context *ctx;
2174
2175 if (!sta || !lq_sta)
2176 return;
2177
2178 sta_priv = (void *)sta->drv_priv;
2179 ctx = sta_priv->common.ctx;
2180
2181 i = lq_sta->last_txrate_idx;
2182
2183 valid_tx_ant = il->hw_params.valid_tx_ant;
2184
2185 if (!lq_sta->search_better_tbl)
2186 active_tbl = lq_sta->active_tbl;
2187 else
2188 active_tbl = 1 - lq_sta->active_tbl;
2189
2190 tbl = &(lq_sta->lq_info[active_tbl]);
2191
2192 if (i < 0 || i >= RATE_COUNT)
2193 i = 0;
2194
2195 rate = il_rates[i].plcp;
2196 tbl->ant_type = il4965_first_antenna(valid_tx_ant);
2197 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2198
2199 if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
2200 rate |= RATE_MCS_CCK_MSK;
2201
2202 il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
2203 if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2204 il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2205
2206 rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
2207 tbl->current_rate = rate;
2208 il4965_rs_set_expected_tpt_table(lq_sta, tbl);
2209 il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2210 il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2211 il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
2212}
2213
2214static void
2215il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
2216 struct ieee80211_tx_rate_control *txrc)
2217{
2218
2219 struct sk_buff *skb = txrc->skb;
2220 struct ieee80211_supported_band *sband = txrc->sband;
2221 struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
2222 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2223 struct il_lq_sta *lq_sta = il_sta;
2224 int rate_idx;
2225
2226 D_RATE("rate scale calculate new rate for skb\n");
2227
2228 /* Get max rate if user set max rate */
2229 if (lq_sta) {
2230 lq_sta->max_rate_idx = txrc->max_rate_idx;
2231 if (sband->band == IEEE80211_BAND_5GHZ &&
2232 lq_sta->max_rate_idx != -1)
2233 lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
2234 if (lq_sta->max_rate_idx < 0 ||
2235 lq_sta->max_rate_idx >= RATE_COUNT)
2236 lq_sta->max_rate_idx = -1;
2237 }
2238
2239 /* Treat uninitialized rate scaling data same as non-existing. */
2240 if (lq_sta && !lq_sta->drv) {
2241 D_RATE("Rate scaling not initialized yet.\n");
2242 il_sta = NULL;
2243 }
2244
2245 /* Send management frames and NO_ACK data using lowest rate. */
2246 if (rate_control_send_low(sta, il_sta, txrc))
2247 return;
2248
2249 if (!lq_sta)
2250 return;
2251
2252 rate_idx = lq_sta->last_txrate_idx;
2253
2254 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2255 rate_idx -= IL_FIRST_OFDM_RATE;
2256 /* 6M and 9M shared same MCS idx */
2257 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2258 if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2259 RATE_MIMO2_6M_PLCP)
2260 rate_idx = rate_idx + MCS_IDX_PER_STREAM;
2261 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2262 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2263 info->control.rates[0].flags |=
2264 IEEE80211_TX_RC_SHORT_GI;
2265 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2266 info->control.rates[0].flags |=
2267 IEEE80211_TX_RC_DUP_DATA;
2268 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2269 info->control.rates[0].flags |=
2270 IEEE80211_TX_RC_40_MHZ_WIDTH;
2271 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2272 info->control.rates[0].flags |=
2273 IEEE80211_TX_RC_GREEN_FIELD;
2274 } else {
2275 /* Check for invalid rates */
2276 if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
2277 (sband->band == IEEE80211_BAND_5GHZ &&
2278 rate_idx < IL_FIRST_OFDM_RATE))
2279 rate_idx = rate_lowest_index(sband, sta);
2280 /* On valid 5 GHz rate, adjust idx */
2281 else if (sband->band == IEEE80211_BAND_5GHZ)
2282 rate_idx -= IL_FIRST_OFDM_RATE;
2283 info->control.rates[0].flags = 0;
2284 }
2285 info->control.rates[0].idx = rate_idx;
2286
2287}
2288
2289static void *
2290il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
2291{
2292 struct il_station_priv *sta_priv =
2293 (struct il_station_priv *)sta->drv_priv;
2294 struct il_priv *il;
2295
2296 il = (struct il_priv *)il_rate;
2297 D_RATE("create station rate scale win\n");
2298
2299 return &sta_priv->lq_sta;
2300}
2301
2302/*
2303 * Called after adding a new station to initialize rate scaling
2304 */
2305void
2306il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
2307{
2308 int i, j;
2309 struct ieee80211_hw *hw = il->hw;
2310 struct ieee80211_conf *conf = &il->hw->conf;
2311 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2312 struct il_station_priv *sta_priv;
2313 struct il_lq_sta *lq_sta;
2314 struct ieee80211_supported_band *sband;
2315
2316 sta_priv = (struct il_station_priv *)sta->drv_priv;
2317 lq_sta = &sta_priv->lq_sta;
2318 sband = hw->wiphy->bands[conf->channel->band];
2319
2320 lq_sta->lq.sta_id = sta_id;
2321
2322 for (j = 0; j < LQ_SIZE; j++)
2323 for (i = 0; i < RATE_COUNT; i++)
2324 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2325 win[i]);
2326
2327 lq_sta->flush_timer = 0;
2328 lq_sta->supp_rates = sta->supp_rates[sband->band];
2329 for (j = 0; j < LQ_SIZE; j++)
2330 for (i = 0; i < RATE_COUNT; i++)
2331 il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
2332 win[i]);
2333
2334 D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
2335 sta_id);
2336 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2337 * the lowest or the highest rate.. Could consider using RSSI from
2338 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2339 * after assoc.. */
2340
2341 lq_sta->is_dup = 0;
2342 lq_sta->max_rate_idx = -1;
2343 lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
2344 lq_sta->is_green = il4965_rs_use_green(sta);
2345 lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
2346 lq_sta->band = il->band;
2347 /*
2348 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2349 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2350 */
2351 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2352 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2353 lq_sta->active_siso_rate &= ~((u16) 0x2);
2354 lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
2355
2356 /* Same here */
2357 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2358 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2359 lq_sta->active_mimo2_rate &= ~((u16) 0x2);
2360 lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
2361
2362 /* These values will be overridden later */
2363 lq_sta->lq.general_params.single_stream_ant_msk =
2364 il4965_first_antenna(il->hw_params.valid_tx_ant);
2365 lq_sta->lq.general_params.dual_stream_ant_msk =
2366 il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
2367 valid_tx_ant);
2368 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2369 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2370 } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
2371 lq_sta->lq.general_params.dual_stream_ant_msk =
2372 il->hw_params.valid_tx_ant;
2373 }
2374
2375 /* as default allow aggregation for all tids */
2376 lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
2377 lq_sta->drv = il;
2378
2379 /* Set last_txrate_idx to lowest rate */
2380 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2381 if (sband->band == IEEE80211_BAND_5GHZ)
2382 lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
2383 lq_sta->is_agg = 0;
2384
2385#ifdef CONFIG_MAC80211_DEBUGFS
2386 lq_sta->dbg_fixed_rate = 0;
2387#endif
2388
2389 il4965_rs_initialize_lq(il, conf, sta, lq_sta);
2390}
2391
2392static void
2393il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
2394 u32 new_rate)
2395{
2396 struct il_scale_tbl_info tbl_type;
2397 int idx = 0;
2398 int rate_idx;
2399 int repeat_rate = 0;
2400 u8 ant_toggle_cnt = 0;
2401 u8 use_ht_possible = 1;
2402 u8 valid_tx_ant = 0;
2403 struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
2404
2405 /* Override starting rate (idx 0) if needed for debug purposes */
2406 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2407
2408 /* Interpret new_rate (rate_n_flags) */
2409 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
2410 &rate_idx);
2411
2412 /* How many times should we repeat the initial rate? */
2413 if (is_legacy(tbl_type.lq_type)) {
2414 ant_toggle_cnt = 1;
2415 repeat_rate = IL_NUMBER_TRY;
2416 } else {
2417 repeat_rate = IL_HT_NUMBER_TRY;
2418 }
2419
2420 lq_cmd->general_params.mimo_delimiter =
2421 is_mimo(tbl_type.lq_type) ? 1 : 0;
2422
2423 /* Fill 1st table entry (idx 0) */
2424 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2425
2426 if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
2427 lq_cmd->general_params.single_stream_ant_msk =
2428 tbl_type.ant_type;
2429 } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
2430 lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
2431 }
2432 /* otherwise we don't modify the existing value */
2433 idx++;
2434 repeat_rate--;
2435 if (il)
2436 valid_tx_ant = il->hw_params.valid_tx_ant;
2437
2438 /* Fill rest of rate table */
2439 while (idx < LINK_QUAL_MAX_RETRY_NUM) {
2440 /* Repeat initial/next rate.
2441 * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
2442 * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
2443 while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
2444 if (is_legacy(tbl_type.lq_type)) {
2445 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2446 ant_toggle_cnt++;
2447 else if (il &&
2448 il4965_rs_toggle_antenna(valid_tx_ant,
2449 &new_rate,
2450 &tbl_type))
2451 ant_toggle_cnt = 1;
2452 }
2453
2454 /* Override next rate if needed for debug purposes */
2455 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2456
2457 /* Fill next table entry */
2458 lq_cmd->rs_table[idx].rate_n_flags =
2459 cpu_to_le32(new_rate);
2460 repeat_rate--;
2461 idx++;
2462 }
2463
2464 il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2465 &tbl_type, &rate_idx);
2466
2467 /* Indicate to uCode which entries might be MIMO.
2468 * If initial rate was MIMO, this will finally end up
2469 * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2470 if (is_mimo(tbl_type.lq_type))
2471 lq_cmd->general_params.mimo_delimiter = idx;
2472
2473 /* Get next rate */
2474 new_rate =
2475 il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
2476 use_ht_possible);
2477
2478 /* How many times should we repeat the next rate? */
2479 if (is_legacy(tbl_type.lq_type)) {
2480 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2481 ant_toggle_cnt++;
2482 else if (il &&
2483 il4965_rs_toggle_antenna(valid_tx_ant,
2484 &new_rate, &tbl_type))
2485 ant_toggle_cnt = 1;
2486
2487 repeat_rate = IL_NUMBER_TRY;
2488 } else {
2489 repeat_rate = IL_HT_NUMBER_TRY;
2490 }
2491
2492 /* Don't allow HT rates after next pass.
2493 * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2494 use_ht_possible = 0;
2495
2496 /* Override next rate if needed for debug purposes */
2497 il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
2498
2499 /* Fill next table entry */
2500 lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
2501
2502 idx++;
2503 repeat_rate--;
2504 }
2505
2506 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2507 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2508
2509 lq_cmd->agg_params.agg_time_limit =
2510 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2511}
2512
2513static void *
2514il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2515{
2516 return hw->priv;
2517}
2518
2519/* rate scale requires free function to be implemented */
2520static void
2521il4965_rs_free(void *il_rate)
2522{
2523 return;
2524}
2525
2526static void
2527il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
2528{
2529 struct il_priv *il __maybe_unused = il_r;
2530
2531 D_RATE("enter\n");
2532 D_RATE("leave\n");
2533}
2534
2535#ifdef CONFIG_MAC80211_DEBUGFS
2536static int
2537il4965_open_file_generic(struct inode *inode, struct file *file)
2538{
2539 file->private_data = inode->i_private;
2540 return 0;
2541}
2542
2543static void
2544il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
2545{
2546 struct il_priv *il;
2547 u8 valid_tx_ant;
2548 u8 ant_sel_tx;
2549
2550 il = lq_sta->drv;
2551 valid_tx_ant = il->hw_params.valid_tx_ant;
2552 if (lq_sta->dbg_fixed_rate) {
2553 ant_sel_tx =
2554 ((lq_sta->
2555 dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
2556 RATE_MCS_ANT_POS);
2557 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2558 *rate_n_flags = lq_sta->dbg_fixed_rate;
2559 D_RATE("Fixed rate ON\n");
2560 } else {
2561 lq_sta->dbg_fixed_rate = 0;
2562 IL_ERR
2563 ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
2564 ant_sel_tx, valid_tx_ant);
2565 D_RATE("Fixed rate OFF\n");
2566 }
2567 } else {
2568 D_RATE("Fixed rate OFF\n");
2569 }
2570}
2571
2572static ssize_t
2573il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2574 const char __user *user_buf,
2575 size_t count, loff_t *ppos)
2576{
2577 struct il_lq_sta *lq_sta = file->private_data;
2578 struct il_priv *il;
2579 char buf[64];
2580 size_t buf_size;
2581 u32 parsed_rate;
2582 struct il_station_priv *sta_priv =
2583 container_of(lq_sta, struct il_station_priv, lq_sta);
2584 struct il_rxon_context *ctx = sta_priv->common.ctx;
2585
2586 il = lq_sta->drv;
2587 memset(buf, 0, sizeof(buf));
2588 buf_size = min(count, sizeof(buf) - 1);
2589 if (copy_from_user(buf, user_buf, buf_size))
2590 return -EFAULT;
2591
2592 if (sscanf(buf, "%x", &parsed_rate) == 1)
2593 lq_sta->dbg_fixed_rate = parsed_rate;
2594 else
2595 lq_sta->dbg_fixed_rate = 0;
2596
2597 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2598 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2599 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2600
2601 D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
2602 lq_sta->dbg_fixed_rate);
2603
2604 if (lq_sta->dbg_fixed_rate) {
2605 il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2606 il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
2607 }
2608
2609 return count;
2610}
2611
2612static ssize_t
2613il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
2614 size_t count, loff_t *ppos)
2615{
2616 char *buff;
2617 int desc = 0;
2618 int i = 0;
2619 int idx = 0;
2620 ssize_t ret;
2621
2622 struct il_lq_sta *lq_sta = file->private_data;
2623 struct il_priv *il;
2624 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2625
2626 il = lq_sta->drv;
2627 buff = kmalloc(1024, GFP_KERNEL);
2628 if (!buff)
2629 return -ENOMEM;
2630
2631 desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
2632 desc +=
2633 sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
2634 lq_sta->total_failed, lq_sta->total_success,
2635 lq_sta->active_legacy_rate);
2636 desc +=
2637 sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
2638 desc +=
2639 sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
2640 (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2641 (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2642 (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2643 desc +=
2644 sprintf(buff + desc, "lq type %s\n",
2645 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2646 if (is_Ht(tbl->lq_type)) {
2647 desc +=
2648 sprintf(buff + desc, " %s",
2649 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2650 desc +=
2651 sprintf(buff + desc, " %s",
2652 (tbl->is_ht40) ? "40MHz" : "20MHz");
2653 desc +=
2654 sprintf(buff + desc, " %s %s %s\n",
2655 (tbl->is_SGI) ? "SGI" : "",
2656 (lq_sta->is_green) ? "GF enabled" : "",
2657 (lq_sta->is_agg) ? "AGG on" : "");
2658 }
2659 desc +=
2660 sprintf(buff + desc, "last tx rate=0x%X\n",
2661 lq_sta->last_rate_n_flags);
2662 desc +=
2663 sprintf(buff + desc,
2664 "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2665 lq_sta->lq.general_params.flags,
2666 lq_sta->lq.general_params.mimo_delimiter,
2667 lq_sta->lq.general_params.single_stream_ant_msk,
2668 lq_sta->lq.general_params.dual_stream_ant_msk);
2669
2670 desc +=
2671 sprintf(buff + desc,
2672 "agg:"
2673 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2674 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2675 lq_sta->lq.agg_params.agg_dis_start_th,
2676 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2677
2678 desc +=
2679 sprintf(buff + desc,
2680 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2681 lq_sta->lq.general_params.start_rate_idx[0],
2682 lq_sta->lq.general_params.start_rate_idx[1],
2683 lq_sta->lq.general_params.start_rate_idx[2],
2684 lq_sta->lq.general_params.start_rate_idx[3]);
2685
2686 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2687 idx =
2688 il4965_hwrate_to_plcp_idx(le32_to_cpu
2689 (lq_sta->lq.rs_table[i].
2690 rate_n_flags));
2691 if (is_legacy(tbl->lq_type)) {
2692 desc +=
2693 sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
2694 le32_to_cpu(lq_sta->lq.rs_table[i].
2695 rate_n_flags),
2696 il_rate_mcs[idx].mbps);
2697 } else {
2698 desc +=
2699 sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
2700 i,
2701 le32_to_cpu(lq_sta->lq.rs_table[i].
2702 rate_n_flags),
2703 il_rate_mcs[idx].mbps,
2704 il_rate_mcs[idx].mcs);
2705 }
2706 }
2707
2708 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2709 kfree(buff);
2710 return ret;
2711}
2712
2713static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2714 .write = il4965_rs_sta_dbgfs_scale_table_write,
2715 .read = il4965_rs_sta_dbgfs_scale_table_read,
2716 .open = il4965_open_file_generic,
2717 .llseek = default_llseek,
2718};
2719
2720static ssize_t
2721il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
2722 size_t count, loff_t *ppos)
2723{
2724 char *buff;
2725 int desc = 0;
2726 int i, j;
2727 ssize_t ret;
2728
2729 struct il_lq_sta *lq_sta = file->private_data;
2730
2731 buff = kmalloc(1024, GFP_KERNEL);
2732 if (!buff)
2733 return -ENOMEM;
2734
2735 for (i = 0; i < LQ_SIZE; i++) {
2736 desc +=
2737 sprintf(buff + desc,
2738 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2739 "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
2740 lq_sta->lq_info[i].lq_type,
2741 lq_sta->lq_info[i].is_SGI,
2742 lq_sta->lq_info[i].is_ht40,
2743 lq_sta->lq_info[i].is_dup, lq_sta->is_green,
2744 lq_sta->lq_info[i].current_rate);
2745 for (j = 0; j < RATE_COUNT; j++) {
2746 desc +=
2747 sprintf(buff + desc,
2748 "counter=%d success=%d %%=%d\n",
2749 lq_sta->lq_info[i].win[j].counter,
2750 lq_sta->lq_info[i].win[j].success_counter,
2751 lq_sta->lq_info[i].win[j].success_ratio);
2752 }
2753 }
2754 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2755 kfree(buff);
2756 return ret;
2757}
2758
2759static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2760 .read = il4965_rs_sta_dbgfs_stats_table_read,
2761 .open = il4965_open_file_generic,
2762 .llseek = default_llseek,
2763};
2764
2765static ssize_t
2766il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2767 char __user *user_buf, size_t count,
2768 loff_t *ppos)
2769{
2770 char buff[120];
2771 int desc = 0;
2772 struct il_lq_sta *lq_sta = file->private_data;
2773 struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2774
2775 if (is_Ht(tbl->lq_type))
2776 desc +=
2777 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2778 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2779 else
2780 desc +=
2781 sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
2782 il_rates[lq_sta->last_txrate_idx].ieee >> 1);
2783
2784 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2785}
2786
2787static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2788 .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
2789 .open = il4965_open_file_generic,
2790 .llseek = default_llseek,
2791};
2792
2793static void
2794il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
2795{
2796 struct il_lq_sta *lq_sta = il_sta;
2797 lq_sta->rs_sta_dbgfs_scale_table_file =
2798 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2799 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2800 lq_sta->rs_sta_dbgfs_stats_table_file =
2801 debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
2802 &rs_sta_dbgfs_stats_table_ops);
2803 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2804 debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
2805 &rs_sta_dbgfs_rate_scale_data_ops);
2806 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2807 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2808 &lq_sta->tx_agg_tid_en);
2809
2810}
2811
2812static void
2813il4965_rs_remove_debugfs(void *il, void *il_sta)
2814{
2815 struct il_lq_sta *lq_sta = il_sta;
2816 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2817 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2818 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2819 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2820}
2821#endif
2822
2823/*
2824 * Initialization of rate scaling information is done by driver after
2825 * the station is added. Since mac80211 calls this function before a
2826 * station is added we ignore it.
2827 */
2828static void
2829il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
2830 struct ieee80211_sta *sta, void *il_sta)
2831{
2832}
2833
2834static struct rate_control_ops rs_4965_ops = {
2835 .module = NULL,
2836 .name = IL4965_RS_NAME,
2837 .tx_status = il4965_rs_tx_status,
2838 .get_rate = il4965_rs_get_rate,
2839 .rate_init = il4965_rs_rate_init_stub,
2840 .alloc = il4965_rs_alloc,
2841 .free = il4965_rs_free,
2842 .alloc_sta = il4965_rs_alloc_sta,
2843 .free_sta = il4965_rs_free_sta,
2844#ifdef CONFIG_MAC80211_DEBUGFS
2845 .add_sta_debugfs = il4965_rs_add_debugfs,
2846 .remove_sta_debugfs = il4965_rs_remove_debugfs,
2847#endif
2848};
2849
2850int
2851il4965_rate_control_register(void)
2852{
2853 return ieee80211_rate_control_register(&rs_4965_ops);
2854}
2855
2856void
2857il4965_rate_control_unregister(void)
2858{
2859 ieee80211_rate_control_unregister(&rs_4965_ops);
2860}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 000000000000..84c54dccf195
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2421 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "common.h"
41#include "4965.h"
42
43/**
44 * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
45 * using sample data 100 bytes apart. If these sample points are good,
46 * it's a pretty good bet that everything between them is good, too.
47 */
48static int
49il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
50{
51 u32 val;
52 int ret = 0;
53 u32 errcnt = 0;
54 u32 i;
55
56 D_INFO("ucode inst image size is %u\n", len);
57
58 for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
59 /* read data comes through single port, auto-incr addr */
60 /* NOTE: Use the debugless read so we don't flood kernel log
61 * if IL_DL_IO is set */
62 il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
63 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
64 if (val != le32_to_cpu(*image)) {
65 ret = -EIO;
66 errcnt++;
67 if (errcnt >= 3)
68 break;
69 }
70 }
71
72 return ret;
73}
74
75/**
76 * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
77 * looking at all data.
78 */
79static int
80il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
81{
82 u32 val;
83 u32 save_len = len;
84 int ret = 0;
85 u32 errcnt;
86
87 D_INFO("ucode inst image size is %u\n", len);
88
89 il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
90
91 errcnt = 0;
92 for (; len > 0; len -= sizeof(u32), image++) {
93 /* read data comes through single port, auto-incr addr */
94 /* NOTE: Use the debugless read so we don't flood kernel log
95 * if IL_DL_IO is set */
96 val = _il_rd(il, HBUS_TARG_MEM_RDAT);
97 if (val != le32_to_cpu(*image)) {
98 IL_ERR("uCode INST section is invalid at "
99 "offset 0x%x, is 0x%x, s/b 0x%x\n",
100 save_len - len, val, le32_to_cpu(*image));
101 ret = -EIO;
102 errcnt++;
103 if (errcnt >= 20)
104 break;
105 }
106 }
107
108 if (!errcnt)
109 D_INFO("ucode image in INSTRUCTION memory is good\n");
110
111 return ret;
112}
113
114/**
115 * il4965_verify_ucode - determine which instruction image is in SRAM,
116 * and verify its contents
117 */
118int
119il4965_verify_ucode(struct il_priv *il)
120{
121 __le32 *image;
122 u32 len;
123 int ret;
124
125 /* Try bootstrap */
126 image = (__le32 *) il->ucode_boot.v_addr;
127 len = il->ucode_boot.len;
128 ret = il4965_verify_inst_sparse(il, image, len);
129 if (!ret) {
130 D_INFO("Bootstrap uCode is good in inst SRAM\n");
131 return 0;
132 }
133
134 /* Try initialize */
135 image = (__le32 *) il->ucode_init.v_addr;
136 len = il->ucode_init.len;
137 ret = il4965_verify_inst_sparse(il, image, len);
138 if (!ret) {
139 D_INFO("Initialize uCode is good in inst SRAM\n");
140 return 0;
141 }
142
143 /* Try runtime/protocol */
144 image = (__le32 *) il->ucode_code.v_addr;
145 len = il->ucode_code.len;
146 ret = il4965_verify_inst_sparse(il, image, len);
147 if (!ret) {
148 D_INFO("Runtime uCode is good in inst SRAM\n");
149 return 0;
150 }
151
152 IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
153
154 /* Since nothing seems to match, show first several data entries in
155 * instruction SRAM, so maybe visual inspection will give a clue.
156 * Selection of bootstrap image (vs. other images) is arbitrary. */
157 image = (__le32 *) il->ucode_boot.v_addr;
158 len = il->ucode_boot.len;
159 ret = il4965_verify_inst_full(il, image, len);
160
161 return ret;
162}
163
164/******************************************************************************
165 *
166 * EEPROM related functions
167 *
168******************************************************************************/
169
170/*
171 * The device's EEPROM semaphore prevents conflicts between driver and uCode
172 * when accessing the EEPROM; each access is a series of pulses to/from the
173 * EEPROM chip, not a single event, so even reads could conflict if they
174 * weren't arbitrated by the semaphore.
175 */
176int
177il4965_eeprom_acquire_semaphore(struct il_priv *il)
178{
179 u16 count;
180 int ret;
181
182 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
183 /* Request semaphore */
184 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
185 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
186
187 /* See if we got it */
188 ret =
189 _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
190 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
191 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
192 EEPROM_SEM_TIMEOUT);
193 if (ret >= 0)
194 return ret;
195 }
196
197 return ret;
198}
199
200void
201il4965_eeprom_release_semaphore(struct il_priv *il)
202{
203 il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
204 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
205
206}
207
208int
209il4965_eeprom_check_version(struct il_priv *il)
210{
211 u16 eeprom_ver;
212 u16 calib_ver;
213
214 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
215 calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
216
217 if (eeprom_ver < il->cfg->eeprom_ver ||
218 calib_ver < il->cfg->eeprom_calib_ver)
219 goto err;
220
221 IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
222
223 return 0;
224err:
225 IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
226 "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
227 calib_ver, il->cfg->eeprom_calib_ver);
228 return -EINVAL;
229
230}
231
232void
233il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
234{
235 const u8 *addr = il_eeprom_query_addr(il,
236 EEPROM_MAC_ADDRESS);
237 memcpy(mac, addr, ETH_ALEN);
238}
239
240/* Send led command */
241static int
242il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
243{
244 struct il_host_cmd cmd = {
245 .id = C_LEDS,
246 .len = sizeof(struct il_led_cmd),
247 .data = led_cmd,
248 .flags = CMD_ASYNC,
249 .callback = NULL,
250 };
251 u32 reg;
252
253 reg = _il_rd(il, CSR_LED_REG);
254 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
255 _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
256
257 return il_send_cmd(il, &cmd);
258}
259
260/* Set led register off */
261void
262il4965_led_enable(struct il_priv *il)
263{
264 _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
265}
266
267const struct il_led_ops il4965_led_ops = {
268 .cmd = il4965_send_led_cmd,
269};
270
271static int il4965_send_tx_power(struct il_priv *il);
272static int il4965_hw_get_temperature(struct il_priv *il);
273
274/* Highest firmware API version supported */
275#define IL4965_UCODE_API_MAX 2
276
277/* Lowest firmware API version supported */
278#define IL4965_UCODE_API_MIN 2
279
280#define IL4965_FW_PRE "iwlwifi-4965-"
281#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
282#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
283
284/* check contents of special bootstrap uCode SRAM */
285static int
286il4965_verify_bsm(struct il_priv *il)
287{
288 __le32 *image = il->ucode_boot.v_addr;
289 u32 len = il->ucode_boot.len;
290 u32 reg;
291 u32 val;
292
293 D_INFO("Begin verify bsm\n");
294
295 /* verify BSM SRAM contents */
296 val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
297 for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
298 reg += sizeof(u32), image++) {
299 val = il_rd_prph(il, reg);
300 if (val != le32_to_cpu(*image)) {
301 IL_ERR("BSM uCode verification failed at "
302 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
303 BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
304 len, val, le32_to_cpu(*image));
305 return -EIO;
306 }
307 }
308
309 D_INFO("BSM bootstrap uCode image OK\n");
310
311 return 0;
312}
313
314/**
315 * il4965_load_bsm - Load bootstrap instructions
316 *
317 * BSM operation:
318 *
319 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
320 * in special SRAM that does not power down during RFKILL. When powering back
321 * up after power-saving sleeps (or during initial uCode load), the BSM loads
322 * the bootstrap program into the on-board processor, and starts it.
323 *
324 * The bootstrap program loads (via DMA) instructions and data for a new
325 * program from host DRAM locations indicated by the host driver in the
326 * BSM_DRAM_* registers. Once the new program is loaded, it starts
327 * automatically.
328 *
329 * When initializing the NIC, the host driver points the BSM to the
330 * "initialize" uCode image. This uCode sets up some internal data, then
331 * notifies host via "initialize alive" that it is complete.
332 *
333 * The host then replaces the BSM_DRAM_* pointer values to point to the
334 * normal runtime uCode instructions and a backup uCode data cache buffer
335 * (filled initially with starting data values for the on-board processor),
336 * then triggers the "initialize" uCode to load and launch the runtime uCode,
337 * which begins normal operation.
338 *
339 * When doing a power-save shutdown, runtime uCode saves data SRAM into
340 * the backup data cache in DRAM before SRAM is powered down.
341 *
342 * When powering back up, the BSM loads the bootstrap program. This reloads
343 * the runtime uCode instructions and the backup data cache into SRAM,
344 * and re-launches the runtime uCode from where it left off.
345 */
346static int
347il4965_load_bsm(struct il_priv *il)
348{
349 __le32 *image = il->ucode_boot.v_addr;
350 u32 len = il->ucode_boot.len;
351 dma_addr_t pinst;
352 dma_addr_t pdata;
353 u32 inst_len;
354 u32 data_len;
355 int i;
356 u32 done;
357 u32 reg_offset;
358 int ret;
359
360 D_INFO("Begin load bsm\n");
361
362 il->ucode_type = UCODE_RT;
363
364 /* make sure bootstrap program is no larger than BSM's SRAM size */
365 if (len > IL49_MAX_BSM_SIZE)
366 return -EINVAL;
367
368 /* Tell bootstrap uCode where to find the "Initialize" uCode
369 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
370 * NOTE: il_init_alive_start() will replace these values,
371 * after the "initialize" uCode has run, to point to
372 * runtime/protocol instructions and backup data cache.
373 */
374 pinst = il->ucode_init.p_addr >> 4;
375 pdata = il->ucode_init_data.p_addr >> 4;
376 inst_len = il->ucode_init.len;
377 data_len = il->ucode_init_data.len;
378
379 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
380 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
381 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
382 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
383
384 /* Fill BSM memory with bootstrap instructions */
385 for (reg_offset = BSM_SRAM_LOWER_BOUND;
386 reg_offset < BSM_SRAM_LOWER_BOUND + len;
387 reg_offset += sizeof(u32), image++)
388 _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
389
390 ret = il4965_verify_bsm(il);
391 if (ret)
392 return ret;
393
394 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
395 il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
396 il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
397 il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
398
399 /* Load bootstrap code into instruction SRAM now,
400 * to prepare to load "initialize" uCode */
401 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
402
403 /* Wait for load of bootstrap uCode to finish */
404 for (i = 0; i < 100; i++) {
405 done = il_rd_prph(il, BSM_WR_CTRL_REG);
406 if (!(done & BSM_WR_CTRL_REG_BIT_START))
407 break;
408 udelay(10);
409 }
410 if (i < 100)
411 D_INFO("BSM write complete, poll %d iterations\n", i);
412 else {
413 IL_ERR("BSM write did not complete!\n");
414 return -EIO;
415 }
416
417 /* Enable future boot loads whenever power management unit triggers it
418 * (e.g. when powering back up after power-save shutdown) */
419 il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
420
421 return 0;
422}
423
424/**
425 * il4965_set_ucode_ptrs - Set uCode address location
426 *
427 * Tell initialization uCode where to find runtime uCode.
428 *
429 * BSM registers initially contain pointers to initialization uCode.
430 * We need to replace them to load runtime uCode inst and data,
431 * and to save runtime data when powering down.
432 */
433static int
434il4965_set_ucode_ptrs(struct il_priv *il)
435{
436 dma_addr_t pinst;
437 dma_addr_t pdata;
438 int ret = 0;
439
440 /* bits 35:4 for 4965 */
441 pinst = il->ucode_code.p_addr >> 4;
442 pdata = il->ucode_data_backup.p_addr >> 4;
443
444 /* Tell bootstrap uCode where to find image to load */
445 il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
446 il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
447 il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
448
449 /* Inst byte count must be last to set up, bit 31 signals uCode
450 * that all new ptr/size info is in place */
451 il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
452 il->ucode_code.len | BSM_DRAM_INST_LOAD);
453 D_INFO("Runtime uCode pointers are set.\n");
454
455 return ret;
456}
457
458/**
459 * il4965_init_alive_start - Called after N_ALIVE notification received
460 *
461 * Called after N_ALIVE notification received from "initialize" uCode.
462 *
463 * The 4965 "initialize" ALIVE reply contains calibration data for:
464 * Voltage, temperature, and MIMO tx gain correction, now stored in il
465 * (3945 does not contain this data).
466 *
467 * Tell "initialize" uCode to go ahead and load the runtime uCode.
468*/
469static void
470il4965_init_alive_start(struct il_priv *il)
471{
472 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
473 * This is a paranoid check, because we would not have gotten the
474 * "initialize" alive if code weren't properly loaded. */
475 if (il4965_verify_ucode(il)) {
476 /* Runtime instruction load was bad;
477 * take it all the way back down so we can try again */
478 D_INFO("Bad \"initialize\" uCode load.\n");
479 goto restart;
480 }
481
482 /* Calculate temperature */
483 il->temperature = il4965_hw_get_temperature(il);
484
485 /* Send pointers to protocol/runtime uCode image ... init code will
486 * load and launch runtime uCode, which will send us another "Alive"
487 * notification. */
488 D_INFO("Initialization Alive received.\n");
489 if (il4965_set_ucode_ptrs(il)) {
490 /* Runtime instruction load won't happen;
491 * take it all the way back down so we can try again */
492 D_INFO("Couldn't set up uCode pointers.\n");
493 goto restart;
494 }
495 return;
496
497restart:
498 queue_work(il->workqueue, &il->restart);
499}
500
501static bool
502iw4965_is_ht40_channel(__le32 rxon_flags)
503{
504 int chan_mod =
505 le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
506 RXON_FLG_CHANNEL_MODE_POS;
507 return (chan_mod == CHANNEL_MODE_PURE_40 ||
508 chan_mod == CHANNEL_MODE_MIXED);
509}
510
511static void
512il4965_nic_config(struct il_priv *il)
513{
514 unsigned long flags;
515 u16 radio_cfg;
516
517 spin_lock_irqsave(&il->lock, flags);
518
519 radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
520
521 /* write radio config values to register */
522 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
523 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
524 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
525 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
526 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
527
528 /* set CSR_HW_CONFIG_REG for uCode use */
529 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
530 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
531 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
532
533 il->calib_info =
534 (struct il_eeprom_calib_info *)
535 il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
536
537 spin_unlock_irqrestore(&il->lock, flags);
538}
539
540/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
541 * Called after every association, but this runs only once!
542 * ... once chain noise is calibrated the first time, it's good forever. */
543static void
544il4965_chain_noise_reset(struct il_priv *il)
545{
546 struct il_chain_noise_data *data = &(il->chain_noise_data);
547
548 if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
549 struct il_calib_diff_gain_cmd cmd;
550
551 /* clear data for chain noise calibration algorithm */
552 data->chain_noise_a = 0;
553 data->chain_noise_b = 0;
554 data->chain_noise_c = 0;
555 data->chain_signal_a = 0;
556 data->chain_signal_b = 0;
557 data->chain_signal_c = 0;
558 data->beacon_count = 0;
559
560 memset(&cmd, 0, sizeof(cmd));
561 cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
562 cmd.diff_gain_a = 0;
563 cmd.diff_gain_b = 0;
564 cmd.diff_gain_c = 0;
565 if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
566 IL_ERR("Could not send C_PHY_CALIBRATION\n");
567 data->state = IL_CHAIN_NOISE_ACCUMULATE;
568 D_CALIB("Run chain_noise_calibrate\n");
569 }
570}
571
572static struct il_sensitivity_ranges il4965_sensitivity = {
573 .min_nrg_cck = 97,
574 .max_nrg_cck = 0, /* not used, set to 0 */
575
576 .auto_corr_min_ofdm = 85,
577 .auto_corr_min_ofdm_mrc = 170,
578 .auto_corr_min_ofdm_x1 = 105,
579 .auto_corr_min_ofdm_mrc_x1 = 220,
580
581 .auto_corr_max_ofdm = 120,
582 .auto_corr_max_ofdm_mrc = 210,
583 .auto_corr_max_ofdm_x1 = 140,
584 .auto_corr_max_ofdm_mrc_x1 = 270,
585
586 .auto_corr_min_cck = 125,
587 .auto_corr_max_cck = 200,
588 .auto_corr_min_cck_mrc = 200,
589 .auto_corr_max_cck_mrc = 400,
590
591 .nrg_th_cck = 100,
592 .nrg_th_ofdm = 100,
593
594 .barker_corr_th_min = 190,
595 .barker_corr_th_min_mrc = 390,
596 .nrg_th_cca = 62,
597};
598
599static void
600il4965_set_ct_threshold(struct il_priv *il)
601{
602 /* want Kelvin */
603 il->hw_params.ct_kill_threshold =
604 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
605}
606
607/**
608 * il4965_hw_set_hw_params
609 *
610 * Called when initializing driver
611 */
612static int
613il4965_hw_set_hw_params(struct il_priv *il)
614{
615 if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
616 il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
617 il->cfg->base_params->num_of_queues =
618 il->cfg->mod_params->num_of_queues;
619
620 il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
621 il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
622 il->hw_params.scd_bc_tbls_size =
623 il->cfg->base_params->num_of_queues *
624 sizeof(struct il4965_scd_bc_tbl);
625 il->hw_params.tfd_size = sizeof(struct il_tfd);
626 il->hw_params.max_stations = IL4965_STATION_COUNT;
627 il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
628 il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
629 il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
630 il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
631 il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
632
633 il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
634
635 il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
636 il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
637 il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
638 il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
639
640 il4965_set_ct_threshold(il);
641
642 il->hw_params.sens = &il4965_sensitivity;
643 il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
644
645 return 0;
646}
647
648static s32
649il4965_math_div_round(s32 num, s32 denom, s32 * res)
650{
651 s32 sign = 1;
652
653 if (num < 0) {
654 sign = -sign;
655 num = -num;
656 }
657 if (denom < 0) {
658 sign = -sign;
659 denom = -denom;
660 }
661 *res = 1;
662 *res = ((num * 2 + denom) / (denom * 2)) * sign;
663
664 return 1;
665}
666
667/**
668 * il4965_get_voltage_compensation - Power supply voltage comp for txpower
669 *
670 * Determines power supply voltage compensation for txpower calculations.
671 * Returns number of 1/2-dB steps to subtract from gain table idx,
672 * to compensate for difference between power supply voltage during
673 * factory measurements, vs. current power supply voltage.
674 *
675 * Voltage indication is higher for lower voltage.
676 * Lower voltage requires more gain (lower gain table idx).
677 */
678static s32
679il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
680{
681 s32 comp = 0;
682
683 if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
684 TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
685 return 0;
686
687 il4965_math_div_round(current_voltage - eeprom_voltage,
688 TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
689
690 if (current_voltage > eeprom_voltage)
691 comp *= 2;
692 if ((comp < -2) || (comp > 2))
693 comp = 0;
694
695 return comp;
696}
697
698static s32
699il4965_get_tx_atten_grp(u16 channel)
700{
701 if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
702 channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
703 return CALIB_CH_GROUP_5;
704
705 if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
706 channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
707 return CALIB_CH_GROUP_1;
708
709 if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
710 channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
711 return CALIB_CH_GROUP_2;
712
713 if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
714 channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
715 return CALIB_CH_GROUP_3;
716
717 if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
718 channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
719 return CALIB_CH_GROUP_4;
720
721 return -EINVAL;
722}
723
724static u32
725il4965_get_sub_band(const struct il_priv *il, u32 channel)
726{
727 s32 b = -1;
728
729 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
730 if (il->calib_info->band_info[b].ch_from == 0)
731 continue;
732
733 if (channel >= il->calib_info->band_info[b].ch_from &&
734 channel <= il->calib_info->band_info[b].ch_to)
735 break;
736 }
737
738 return b;
739}
740
741static s32
742il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
743{
744 s32 val;
745
746 if (x2 == x1)
747 return y1;
748 else {
749 il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
750 return val + y2;
751 }
752}
753
754/**
755 * il4965_interpolate_chan - Interpolate factory measurements for one channel
756 *
757 * Interpolates factory measurements from the two sample channels within a
758 * sub-band, to apply to channel of interest. Interpolation is proportional to
759 * differences in channel frequencies, which is proportional to differences
760 * in channel number.
761 */
762static int
763il4965_interpolate_chan(struct il_priv *il, u32 channel,
764 struct il_eeprom_calib_ch_info *chan_info)
765{
766 s32 s = -1;
767 u32 c;
768 u32 m;
769 const struct il_eeprom_calib_measure *m1;
770 const struct il_eeprom_calib_measure *m2;
771 struct il_eeprom_calib_measure *omeas;
772 u32 ch_i1;
773 u32 ch_i2;
774
775 s = il4965_get_sub_band(il, channel);
776 if (s >= EEPROM_TX_POWER_BANDS) {
777 IL_ERR("Tx Power can not find channel %d\n", channel);
778 return -1;
779 }
780
781 ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
782 ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
783 chan_info->ch_num = (u8) channel;
784
785 D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
786 ch_i1, ch_i2);
787
788 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
789 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
790 m1 = &(il->calib_info->band_info[s].ch1.
791 measurements[c][m]);
792 m2 = &(il->calib_info->band_info[s].ch2.
793 measurements[c][m]);
794 omeas = &(chan_info->measurements[c][m]);
795
796 omeas->actual_pow =
797 (u8) il4965_interpolate_value(channel, ch_i1,
798 m1->actual_pow, ch_i2,
799 m2->actual_pow);
800 omeas->gain_idx =
801 (u8) il4965_interpolate_value(channel, ch_i1,
802 m1->gain_idx, ch_i2,
803 m2->gain_idx);
804 omeas->temperature =
805 (u8) il4965_interpolate_value(channel, ch_i1,
806 m1->temperature,
807 ch_i2,
808 m2->temperature);
809 omeas->pa_det =
810 (s8) il4965_interpolate_value(channel, ch_i1,
811 m1->pa_det, ch_i2,
812 m2->pa_det);
813
814 D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
815 m, m1->actual_pow, m2->actual_pow,
816 omeas->actual_pow);
817 D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
818 m, m1->gain_idx, m2->gain_idx,
819 omeas->gain_idx);
820 D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
821 m, m1->pa_det, m2->pa_det, omeas->pa_det);
822 D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
823 m, m1->temperature, m2->temperature,
824 omeas->temperature);
825 }
826 }
827
828 return 0;
829}
830
831/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
832 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
833static s32 back_off_table[] = {
834 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
835 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
836 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
837 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
838 10 /* CCK */
839};
840
841/* Thermal compensation values for txpower for various frequency ranges ...
842 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
843static struct il4965_txpower_comp_entry {
844 s32 degrees_per_05db_a;
845 s32 degrees_per_05db_a_denom;
846} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
847 {
848 9, 2}, /* group 0 5.2, ch 34-43 */
849 {
850 4, 1}, /* group 1 5.2, ch 44-70 */
851 {
852 4, 1}, /* group 2 5.2, ch 71-124 */
853 {
854 4, 1}, /* group 3 5.2, ch 125-200 */
855 {
856 3, 1} /* group 4 2.4, ch all */
857};
858
859static s32
860get_min_power_idx(s32 rate_power_idx, u32 band)
861{
862 if (!band) {
863 if ((rate_power_idx & 7) <= 4)
864 return MIN_TX_GAIN_IDX_52GHZ_EXT;
865 }
866 return MIN_TX_GAIN_IDX;
867}
868
869struct gain_entry {
870 u8 dsp;
871 u8 radio;
872};
873
874static const struct gain_entry gain_table[2][108] = {
875 /* 5.2GHz power gain idx table */
876 {
877 {123, 0x3F}, /* highest txpower */
878 {117, 0x3F},
879 {110, 0x3F},
880 {104, 0x3F},
881 {98, 0x3F},
882 {110, 0x3E},
883 {104, 0x3E},
884 {98, 0x3E},
885 {110, 0x3D},
886 {104, 0x3D},
887 {98, 0x3D},
888 {110, 0x3C},
889 {104, 0x3C},
890 {98, 0x3C},
891 {110, 0x3B},
892 {104, 0x3B},
893 {98, 0x3B},
894 {110, 0x3A},
895 {104, 0x3A},
896 {98, 0x3A},
897 {110, 0x39},
898 {104, 0x39},
899 {98, 0x39},
900 {110, 0x38},
901 {104, 0x38},
902 {98, 0x38},
903 {110, 0x37},
904 {104, 0x37},
905 {98, 0x37},
906 {110, 0x36},
907 {104, 0x36},
908 {98, 0x36},
909 {110, 0x35},
910 {104, 0x35},
911 {98, 0x35},
912 {110, 0x34},
913 {104, 0x34},
914 {98, 0x34},
915 {110, 0x33},
916 {104, 0x33},
917 {98, 0x33},
918 {110, 0x32},
919 {104, 0x32},
920 {98, 0x32},
921 {110, 0x31},
922 {104, 0x31},
923 {98, 0x31},
924 {110, 0x30},
925 {104, 0x30},
926 {98, 0x30},
927 {110, 0x25},
928 {104, 0x25},
929 {98, 0x25},
930 {110, 0x24},
931 {104, 0x24},
932 {98, 0x24},
933 {110, 0x23},
934 {104, 0x23},
935 {98, 0x23},
936 {110, 0x22},
937 {104, 0x18},
938 {98, 0x18},
939 {110, 0x17},
940 {104, 0x17},
941 {98, 0x17},
942 {110, 0x16},
943 {104, 0x16},
944 {98, 0x16},
945 {110, 0x15},
946 {104, 0x15},
947 {98, 0x15},
948 {110, 0x14},
949 {104, 0x14},
950 {98, 0x14},
951 {110, 0x13},
952 {104, 0x13},
953 {98, 0x13},
954 {110, 0x12},
955 {104, 0x08},
956 {98, 0x08},
957 {110, 0x07},
958 {104, 0x07},
959 {98, 0x07},
960 {110, 0x06},
961 {104, 0x06},
962 {98, 0x06},
963 {110, 0x05},
964 {104, 0x05},
965 {98, 0x05},
966 {110, 0x04},
967 {104, 0x04},
968 {98, 0x04},
969 {110, 0x03},
970 {104, 0x03},
971 {98, 0x03},
972 {110, 0x02},
973 {104, 0x02},
974 {98, 0x02},
975 {110, 0x01},
976 {104, 0x01},
977 {98, 0x01},
978 {110, 0x00},
979 {104, 0x00},
980 {98, 0x00},
981 {93, 0x00},
982 {88, 0x00},
983 {83, 0x00},
984 {78, 0x00},
985 },
986 /* 2.4GHz power gain idx table */
987 {
988 {110, 0x3f}, /* highest txpower */
989 {104, 0x3f},
990 {98, 0x3f},
991 {110, 0x3e},
992 {104, 0x3e},
993 {98, 0x3e},
994 {110, 0x3d},
995 {104, 0x3d},
996 {98, 0x3d},
997 {110, 0x3c},
998 {104, 0x3c},
999 {98, 0x3c},
1000 {110, 0x3b},
1001 {104, 0x3b},
1002 {98, 0x3b},
1003 {110, 0x3a},
1004 {104, 0x3a},
1005 {98, 0x3a},
1006 {110, 0x39},
1007 {104, 0x39},
1008 {98, 0x39},
1009 {110, 0x38},
1010 {104, 0x38},
1011 {98, 0x38},
1012 {110, 0x37},
1013 {104, 0x37},
1014 {98, 0x37},
1015 {110, 0x36},
1016 {104, 0x36},
1017 {98, 0x36},
1018 {110, 0x35},
1019 {104, 0x35},
1020 {98, 0x35},
1021 {110, 0x34},
1022 {104, 0x34},
1023 {98, 0x34},
1024 {110, 0x33},
1025 {104, 0x33},
1026 {98, 0x33},
1027 {110, 0x32},
1028 {104, 0x32},
1029 {98, 0x32},
1030 {110, 0x31},
1031 {104, 0x31},
1032 {98, 0x31},
1033 {110, 0x30},
1034 {104, 0x30},
1035 {98, 0x30},
1036 {110, 0x6},
1037 {104, 0x6},
1038 {98, 0x6},
1039 {110, 0x5},
1040 {104, 0x5},
1041 {98, 0x5},
1042 {110, 0x4},
1043 {104, 0x4},
1044 {98, 0x4},
1045 {110, 0x3},
1046 {104, 0x3},
1047 {98, 0x3},
1048 {110, 0x2},
1049 {104, 0x2},
1050 {98, 0x2},
1051 {110, 0x1},
1052 {104, 0x1},
1053 {98, 0x1},
1054 {110, 0x0},
1055 {104, 0x0},
1056 {98, 0x0},
1057 {97, 0},
1058 {96, 0},
1059 {95, 0},
1060 {94, 0},
1061 {93, 0},
1062 {92, 0},
1063 {91, 0},
1064 {90, 0},
1065 {89, 0},
1066 {88, 0},
1067 {87, 0},
1068 {86, 0},
1069 {85, 0},
1070 {84, 0},
1071 {83, 0},
1072 {82, 0},
1073 {81, 0},
1074 {80, 0},
1075 {79, 0},
1076 {78, 0},
1077 {77, 0},
1078 {76, 0},
1079 {75, 0},
1080 {74, 0},
1081 {73, 0},
1082 {72, 0},
1083 {71, 0},
1084 {70, 0},
1085 {69, 0},
1086 {68, 0},
1087 {67, 0},
1088 {66, 0},
1089 {65, 0},
1090 {64, 0},
1091 {63, 0},
1092 {62, 0},
1093 {61, 0},
1094 {60, 0},
1095 {59, 0},
1096 }
1097};
1098
1099static int
1100il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
1101 u8 ctrl_chan_high,
1102 struct il4965_tx_power_db *tx_power_tbl)
1103{
1104 u8 saturation_power;
1105 s32 target_power;
1106 s32 user_target_power;
1107 s32 power_limit;
1108 s32 current_temp;
1109 s32 reg_limit;
1110 s32 current_regulatory;
1111 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1112 int i;
1113 int c;
1114 const struct il_channel_info *ch_info = NULL;
1115 struct il_eeprom_calib_ch_info ch_eeprom_info;
1116 const struct il_eeprom_calib_measure *measurement;
1117 s16 voltage;
1118 s32 init_voltage;
1119 s32 voltage_compensation;
1120 s32 degrees_per_05db_num;
1121 s32 degrees_per_05db_denom;
1122 s32 factory_temp;
1123 s32 temperature_comp[2];
1124 s32 factory_gain_idx[2];
1125 s32 factory_actual_pwr[2];
1126 s32 power_idx;
1127
1128 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
1129 * are used for idxing into txpower table) */
1130 user_target_power = 2 * il->tx_power_user_lmt;
1131
1132 /* Get current (RXON) channel, band, width */
1133 D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
1134
1135 ch_info = il_get_channel_info(il, il->band, channel);
1136
1137 if (!il_is_channel_valid(ch_info))
1138 return -EINVAL;
1139
1140 /* get txatten group, used to select 1) thermal txpower adjustment
1141 * and 2) mimo txpower balance between Tx chains. */
1142 txatten_grp = il4965_get_tx_atten_grp(channel);
1143 if (txatten_grp < 0) {
1144 IL_ERR("Can't find txatten group for channel %d.\n", channel);
1145 return txatten_grp;
1146 }
1147
1148 D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
1149 txatten_grp);
1150
1151 if (is_ht40) {
1152 if (ctrl_chan_high)
1153 channel -= 2;
1154 else
1155 channel += 2;
1156 }
1157
1158 /* hardware txpower limits ...
1159 * saturation (clipping distortion) txpowers are in half-dBm */
1160 if (band)
1161 saturation_power = il->calib_info->saturation_power24;
1162 else
1163 saturation_power = il->calib_info->saturation_power52;
1164
1165 if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
1166 saturation_power > IL_TX_POWER_SATURATION_MAX) {
1167 if (band)
1168 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
1169 else
1170 saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
1171 }
1172
1173 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1174 * max_power_avg values are in dBm, convert * 2 */
1175 if (is_ht40)
1176 reg_limit = ch_info->ht40_max_power_avg * 2;
1177 else
1178 reg_limit = ch_info->max_power_avg * 2;
1179
1180 if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
1181 (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
1182 if (band)
1183 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
1184 else
1185 reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
1186 }
1187
1188 /* Interpolate txpower calibration values for this channel,
1189 * based on factory calibration tests on spaced channels. */
1190 il4965_interpolate_chan(il, channel, &ch_eeprom_info);
1191
1192 /* calculate tx gain adjustment based on power supply voltage */
1193 voltage = le16_to_cpu(il->calib_info->voltage);
1194 init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
1195 voltage_compensation =
1196 il4965_get_voltage_compensation(voltage, init_voltage);
1197
1198 D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
1199 voltage, voltage_compensation);
1200
1201 /* get current temperature (Celsius) */
1202 current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
1203 current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
1204 current_temp = KELVIN_TO_CELSIUS(current_temp);
1205
1206 /* select thermal txpower adjustment params, based on channel group
1207 * (same frequency group used for mimo txatten adjustment) */
1208 degrees_per_05db_num =
1209 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1210 degrees_per_05db_denom =
1211 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1212
1213 /* get per-chain txpower values from factory measurements */
1214 for (c = 0; c < 2; c++) {
1215 measurement = &ch_eeprom_info.measurements[c][1];
1216
1217 /* txgain adjustment (in half-dB steps) based on difference
1218 * between factory and current temperature */
1219 factory_temp = measurement->temperature;
1220 il4965_math_div_round((current_temp -
1221 factory_temp) * degrees_per_05db_denom,
1222 degrees_per_05db_num,
1223 &temperature_comp[c]);
1224
1225 factory_gain_idx[c] = measurement->gain_idx;
1226 factory_actual_pwr[c] = measurement->actual_pow;
1227
1228 D_TXPOWER("chain = %d\n", c);
1229 D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
1230 factory_temp, current_temp, temperature_comp[c]);
1231
1232 D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
1233 factory_actual_pwr[c]);
1234 }
1235
1236 /* for each of 33 bit-rates (including 1 for CCK) */
1237 for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
1238 u8 is_mimo_rate;
1239 union il4965_tx_power_dual_stream tx_power;
1240
1241 /* for mimo, reduce each chain's txpower by half
1242 * (3dB, 6 steps), so total output power is regulatory
1243 * compliant. */
1244 if (i & 0x8) {
1245 current_regulatory =
1246 reg_limit -
1247 IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1248 is_mimo_rate = 1;
1249 } else {
1250 current_regulatory = reg_limit;
1251 is_mimo_rate = 0;
1252 }
1253
1254 /* find txpower limit, either hardware or regulatory */
1255 power_limit = saturation_power - back_off_table[i];
1256 if (power_limit > current_regulatory)
1257 power_limit = current_regulatory;
1258
1259 /* reduce user's txpower request if necessary
1260 * for this rate on this channel */
1261 target_power = user_target_power;
1262 if (target_power > power_limit)
1263 target_power = power_limit;
1264
1265 D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
1266 saturation_power - back_off_table[i],
1267 current_regulatory, user_target_power, target_power);
1268
1269 /* for each of 2 Tx chains (radio transmitters) */
1270 for (c = 0; c < 2; c++) {
1271 s32 atten_value;
1272
1273 if (is_mimo_rate)
1274 atten_value =
1275 (s32) le32_to_cpu(il->card_alive_init.
1276 tx_atten[txatten_grp][c]);
1277 else
1278 atten_value = 0;
1279
1280 /* calculate idx; higher idx means lower txpower */
1281 power_idx =
1282 (u8) (factory_gain_idx[c] -
1283 (target_power - factory_actual_pwr[c]) -
1284 temperature_comp[c] - voltage_compensation +
1285 atten_value);
1286
1287/* D_TXPOWER("calculated txpower idx %d\n",
1288 power_idx); */
1289
1290 if (power_idx < get_min_power_idx(i, band))
1291 power_idx = get_min_power_idx(i, band);
1292
1293 /* adjust 5 GHz idx to support negative idxes */
1294 if (!band)
1295 power_idx += 9;
1296
1297 /* CCK, rate 32, reduce txpower for CCK */
1298 if (i == POWER_TBL_CCK_ENTRY)
1299 power_idx +=
1300 IL_TX_POWER_CCK_COMPENSATION_C_STEP;
1301
1302 /* stay within the table! */
1303 if (power_idx > 107) {
1304 IL_WARN("txpower idx %d > 107\n", power_idx);
1305 power_idx = 107;
1306 }
1307 if (power_idx < 0) {
1308 IL_WARN("txpower idx %d < 0\n", power_idx);
1309 power_idx = 0;
1310 }
1311
1312 /* fill txpower command for this rate/chain */
1313 tx_power.s.radio_tx_gain[c] =
1314 gain_table[band][power_idx].radio;
1315 tx_power.s.dsp_predis_atten[c] =
1316 gain_table[band][power_idx].dsp;
1317
1318 D_TXPOWER("chain %d mimo %d idx %d "
1319 "gain 0x%02x dsp %d\n", c, atten_value,
1320 power_idx, tx_power.s.radio_tx_gain[c],
1321 tx_power.s.dsp_predis_atten[c]);
1322 } /* for each chain */
1323
1324 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1325
1326 } /* for each rate */
1327
1328 return 0;
1329}
1330
1331/**
1332 * il4965_send_tx_power - Configure the TXPOWER level user limit
1333 *
1334 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1335 * The power limit is taken from il->tx_power_user_lmt.
1336 */
1337static int
1338il4965_send_tx_power(struct il_priv *il)
1339{
1340 struct il4965_txpowertable_cmd cmd = { 0 };
1341 int ret;
1342 u8 band = 0;
1343 bool is_ht40 = false;
1344 u8 ctrl_chan_high = 0;
1345 struct il_rxon_context *ctx = &il->ctx;
1346
1347 if (WARN_ONCE
1348 (test_bit(S_SCAN_HW, &il->status),
1349 "TX Power requested while scanning!\n"))
1350 return -EAGAIN;
1351
1352 band = il->band == IEEE80211_BAND_2GHZ;
1353
1354 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1355
1356 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1357 ctrl_chan_high = 1;
1358
1359 cmd.band = band;
1360 cmd.channel = ctx->active.channel;
1361
1362 ret =
1363 il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
1364 is_ht40, ctrl_chan_high, &cmd.tx_power);
1365 if (ret)
1366 goto out;
1367
1368 ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
1369
1370out:
1371 return ret;
1372}
1373
1374static int
1375il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
1376{
1377 int ret = 0;
1378 struct il4965_rxon_assoc_cmd rxon_assoc;
1379 const struct il_rxon_cmd *rxon1 = &ctx->staging;
1380 const struct il_rxon_cmd *rxon2 = &ctx->active;
1381
1382 if (rxon1->flags == rxon2->flags &&
1383 rxon1->filter_flags == rxon2->filter_flags &&
1384 rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
1385 rxon1->ofdm_ht_single_stream_basic_rates ==
1386 rxon2->ofdm_ht_single_stream_basic_rates &&
1387 rxon1->ofdm_ht_dual_stream_basic_rates ==
1388 rxon2->ofdm_ht_dual_stream_basic_rates &&
1389 rxon1->rx_chain == rxon2->rx_chain &&
1390 rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
1391 D_INFO("Using current RXON_ASSOC. Not resending.\n");
1392 return 0;
1393 }
1394
1395 rxon_assoc.flags = ctx->staging.flags;
1396 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1397 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1398 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1399 rxon_assoc.reserved = 0;
1400 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1401 ctx->staging.ofdm_ht_single_stream_basic_rates;
1402 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1403 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1404 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1405
1406 ret =
1407 il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
1408 &rxon_assoc, NULL);
1409
1410 return ret;
1411}
1412
1413static int
1414il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
1415{
1416 /* cast away the const for active_rxon in this function */
1417 struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
1418 int ret;
1419 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1420
1421 if (!il_is_alive(il))
1422 return -EBUSY;
1423
1424 if (!ctx->is_active)
1425 return 0;
1426
1427 /* always get timestamp with Rx frame */
1428 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1429
1430 ret = il_check_rxon_cmd(il, ctx);
1431 if (ret) {
1432 IL_ERR("Invalid RXON configuration. Not committing.\n");
1433 return -EINVAL;
1434 }
1435
1436 /*
1437 * receive commit_rxon request
1438 * abort any previous channel switch if still in process
1439 */
1440 if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
1441 il->switch_channel != ctx->staging.channel) {
1442 D_11H("abort channel switch on %d\n",
1443 le16_to_cpu(il->switch_channel));
1444 il_chswitch_done(il, false);
1445 }
1446
1447 /* If we don't need to send a full RXON, we can use
1448 * il_rxon_assoc_cmd which is used to reconfigure filter
1449 * and other flags for the current radio configuration. */
1450 if (!il_full_rxon_required(il, ctx)) {
1451 ret = il_send_rxon_assoc(il, ctx);
1452 if (ret) {
1453 IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
1454 return ret;
1455 }
1456
1457 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1458 il_print_rx_config_cmd(il, ctx);
1459 /*
1460 * We do not commit tx power settings while channel changing,
1461 * do it now if tx power changed.
1462 */
1463 il_set_tx_power(il, il->tx_power_next, false);
1464 return 0;
1465 }
1466
1467 /* If we are currently associated and the new config requires
1468 * an RXON_ASSOC and the new config wants the associated mask enabled,
1469 * we must clear the associated from the active configuration
1470 * before we apply the new config */
1471 if (il_is_associated_ctx(ctx) && new_assoc) {
1472 D_INFO("Toggling associated bit on current RXON\n");
1473 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1474
1475 ret =
1476 il_send_cmd_pdu(il, ctx->rxon_cmd,
1477 sizeof(struct il_rxon_cmd), active_rxon);
1478
1479 /* If the mask clearing failed then we set
1480 * active_rxon back to what it was previously */
1481 if (ret) {
1482 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1483 IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
1484 return ret;
1485 }
1486 il_clear_ucode_stations(il, ctx);
1487 il_restore_stations(il, ctx);
1488 ret = il4965_restore_default_wep_keys(il, ctx);
1489 if (ret) {
1490 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1491 return ret;
1492 }
1493 }
1494
1495 D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
1496 "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
1497 le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
1498
1499 il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
1500
1501 /* Apply the new configuration
1502 * RXON unassoc clears the station table in uCode so restoration of
1503 * stations is needed after it (the RXON command) completes
1504 */
1505 if (!new_assoc) {
1506 ret =
1507 il_send_cmd_pdu(il, ctx->rxon_cmd,
1508 sizeof(struct il_rxon_cmd), &ctx->staging);
1509 if (ret) {
1510 IL_ERR("Error setting new RXON (%d)\n", ret);
1511 return ret;
1512 }
1513 D_INFO("Return from !new_assoc RXON.\n");
1514 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1515 il_clear_ucode_stations(il, ctx);
1516 il_restore_stations(il, ctx);
1517 ret = il4965_restore_default_wep_keys(il, ctx);
1518 if (ret) {
1519 IL_ERR("Failed to restore WEP keys (%d)\n", ret);
1520 return ret;
1521 }
1522 }
1523 if (new_assoc) {
1524 il->start_calib = 0;
1525 /* Apply the new configuration
1526 * RXON assoc doesn't clear the station table in uCode,
1527 */
1528 ret =
1529 il_send_cmd_pdu(il, ctx->rxon_cmd,
1530 sizeof(struct il_rxon_cmd), &ctx->staging);
1531 if (ret) {
1532 IL_ERR("Error setting new RXON (%d)\n", ret);
1533 return ret;
1534 }
1535 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1536 }
1537 il_print_rx_config_cmd(il, ctx);
1538
1539 il4965_init_sensitivity(il);
1540
1541 /* If we issue a new RXON command which required a tune then we must
1542 * send a new TXPOWER command or we won't be able to Tx any frames */
1543 ret = il_set_tx_power(il, il->tx_power_next, true);
1544 if (ret) {
1545 IL_ERR("Error sending TX power (%d)\n", ret);
1546 return ret;
1547 }
1548
1549 return 0;
1550}
1551
1552static int
1553il4965_hw_channel_switch(struct il_priv *il,
1554 struct ieee80211_channel_switch *ch_switch)
1555{
1556 struct il_rxon_context *ctx = &il->ctx;
1557 int rc;
1558 u8 band = 0;
1559 bool is_ht40 = false;
1560 u8 ctrl_chan_high = 0;
1561 struct il4965_channel_switch_cmd cmd;
1562 const struct il_channel_info *ch_info;
1563 u32 switch_time_in_usec, ucode_switch_time;
1564 u16 ch;
1565 u32 tsf_low;
1566 u8 switch_count;
1567 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1568 struct ieee80211_vif *vif = ctx->vif;
1569 band = il->band == IEEE80211_BAND_2GHZ;
1570
1571 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1572
1573 if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1574 ctrl_chan_high = 1;
1575
1576 cmd.band = band;
1577 cmd.expect_beacon = 0;
1578 ch = ch_switch->channel->hw_value;
1579 cmd.channel = cpu_to_le16(ch);
1580 cmd.rxon_flags = ctx->staging.flags;
1581 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1582 switch_count = ch_switch->count;
1583 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1584 /*
1585 * calculate the ucode channel switch time
1586 * adding TSF as one of the factor for when to switch
1587 */
1588 if (il->ucode_beacon_time > tsf_low && beacon_interval) {
1589 if (switch_count >
1590 ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
1591 switch_count -=
1592 (il->ucode_beacon_time - tsf_low) / beacon_interval;
1593 } else
1594 switch_count = 0;
1595 }
1596 if (switch_count <= 1)
1597 cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
1598 else {
1599 switch_time_in_usec =
1600 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1601 ucode_switch_time =
1602 il_usecs_to_beacons(il, switch_time_in_usec,
1603 beacon_interval);
1604 cmd.switch_time =
1605 il_add_beacon_time(il, il->ucode_beacon_time,
1606 ucode_switch_time, beacon_interval);
1607 }
1608 D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
1609 ch_info = il_get_channel_info(il, il->band, ch);
1610 if (ch_info)
1611 cmd.expect_beacon = il_is_channel_radar(ch_info);
1612 else {
1613 IL_ERR("invalid channel switch from %u to %u\n",
1614 ctx->active.channel, ch);
1615 return -EFAULT;
1616 }
1617
1618 rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
1619 &cmd.tx_power);
1620 if (rc) {
1621 D_11H("error:%d fill txpower_tbl\n", rc);
1622 return rc;
1623 }
1624
1625 return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1626}
1627
1628/**
1629 * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1630 */
1631static void
1632il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
1633 u16 byte_cnt)
1634{
1635 struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
1636 int txq_id = txq->q.id;
1637 int write_ptr = txq->q.write_ptr;
1638 int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
1639 __le16 bc_ent;
1640
1641 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1642
1643 bc_ent = cpu_to_le16(len & 0xFFF);
1644 /* Set up byte count within first 256 entries */
1645 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1646
1647 /* If within first 64 entries, duplicate at end */
1648 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1649 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1650 bc_ent;
1651}
1652
1653/**
1654 * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1655 * @stats: Provides the temperature reading from the uCode
1656 *
1657 * A return of <0 indicates bogus data in the stats
1658 */
1659static int
1660il4965_hw_get_temperature(struct il_priv *il)
1661{
1662 s32 temperature;
1663 s32 vt;
1664 s32 R1, R2, R3;
1665 u32 R4;
1666
1667 if (test_bit(S_TEMPERATURE, &il->status) &&
1668 (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
1669 D_TEMP("Running HT40 temperature calibration\n");
1670 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
1671 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
1672 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
1673 R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
1674 } else {
1675 D_TEMP("Running temperature calibration\n");
1676 R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
1677 R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
1678 R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
1679 R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
1680 }
1681
1682 /*
1683 * Temperature is only 23 bits, so sign extend out to 32.
1684 *
1685 * NOTE If we haven't received a stats notification yet
1686 * with an updated temperature, use R4 provided to us in the
1687 * "initialize" ALIVE response.
1688 */
1689 if (!test_bit(S_TEMPERATURE, &il->status))
1690 vt = sign_extend32(R4, 23);
1691 else
1692 vt = sign_extend32(le32_to_cpu
1693 (il->_4965.stats.general.common.temperature),
1694 23);
1695
1696 D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1697
1698 if (R3 == R1) {
1699 IL_ERR("Calibration conflict R1 == R3\n");
1700 return -1;
1701 }
1702
1703 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1704 * Add offset to center the adjustment around 0 degrees Centigrade. */
1705 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1706 temperature /= (R3 - R1);
1707 temperature =
1708 (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1709
1710 D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
1711 KELVIN_TO_CELSIUS(temperature));
1712
1713 return temperature;
1714}
1715
1716/* Adjust Txpower only if temperature variance is greater than threshold. */
1717#define IL_TEMPERATURE_THRESHOLD 3
1718
1719/**
1720 * il4965_is_temp_calib_needed - determines if new calibration is needed
1721 *
1722 * If the temperature changed has changed sufficiently, then a recalibration
1723 * is needed.
1724 *
1725 * Assumes caller will replace il->last_temperature once calibration
1726 * executed.
1727 */
1728static int
1729il4965_is_temp_calib_needed(struct il_priv *il)
1730{
1731 int temp_diff;
1732
1733 if (!test_bit(S_STATS, &il->status)) {
1734 D_TEMP("Temperature not updated -- no stats.\n");
1735 return 0;
1736 }
1737
1738 temp_diff = il->temperature - il->last_temperature;
1739
1740 /* get absolute value */
1741 if (temp_diff < 0) {
1742 D_POWER("Getting cooler, delta %d\n", temp_diff);
1743 temp_diff = -temp_diff;
1744 } else if (temp_diff == 0)
1745 D_POWER("Temperature unchanged\n");
1746 else
1747 D_POWER("Getting warmer, delta %d\n", temp_diff);
1748
1749 if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
1750 D_POWER(" => thermal txpower calib not needed\n");
1751 return 0;
1752 }
1753
1754 D_POWER(" => thermal txpower calib needed\n");
1755
1756 return 1;
1757}
1758
1759static void
1760il4965_temperature_calib(struct il_priv *il)
1761{
1762 s32 temp;
1763
1764 temp = il4965_hw_get_temperature(il);
1765 if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1766 return;
1767
1768 if (il->temperature != temp) {
1769 if (il->temperature)
1770 D_TEMP("Temperature changed " "from %dC to %dC\n",
1771 KELVIN_TO_CELSIUS(il->temperature),
1772 KELVIN_TO_CELSIUS(temp));
1773 else
1774 D_TEMP("Temperature " "initialized to %dC\n",
1775 KELVIN_TO_CELSIUS(temp));
1776 }
1777
1778 il->temperature = temp;
1779 set_bit(S_TEMPERATURE, &il->status);
1780
1781 if (!il->disable_tx_power_cal &&
1782 unlikely(!test_bit(S_SCANNING, &il->status)) &&
1783 il4965_is_temp_calib_needed(il))
1784 queue_work(il->workqueue, &il->txpower_work);
1785}
1786
1787static u16
1788il4965_get_hcmd_size(u8 cmd_id, u16 len)
1789{
1790 switch (cmd_id) {
1791 case C_RXON:
1792 return (u16) sizeof(struct il4965_rxon_cmd);
1793 default:
1794 return len;
1795 }
1796}
1797
1798static u16
1799il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
1800{
1801 struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
1802 addsta->mode = cmd->mode;
1803 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1804 memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
1805 addsta->station_flags = cmd->station_flags;
1806 addsta->station_flags_msk = cmd->station_flags_msk;
1807 addsta->tid_disable_tx = cmd->tid_disable_tx;
1808 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1809 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1810 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1811 addsta->sleep_tx_count = cmd->sleep_tx_count;
1812 addsta->reserved1 = cpu_to_le16(0);
1813 addsta->reserved2 = cpu_to_le16(0);
1814
1815 return (u16) sizeof(struct il4965_addsta_cmd);
1816}
1817
1818static inline u32
1819il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
1820{
1821 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1822}
1823
1824static inline u32
1825il4965_tx_status_to_mac80211(u32 status)
1826{
1827 status &= TX_STATUS_MSK;
1828
1829 switch (status) {
1830 case TX_STATUS_SUCCESS:
1831 case TX_STATUS_DIRECT_DONE:
1832 return IEEE80211_TX_STAT_ACK;
1833 case TX_STATUS_FAIL_DEST_PS:
1834 return IEEE80211_TX_STAT_TX_FILTERED;
1835 default:
1836 return 0;
1837 }
1838}
1839
1840static inline bool
1841il4965_is_tx_success(u32 status)
1842{
1843 status &= TX_STATUS_MSK;
1844 return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
1845}
1846
1847/**
1848 * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1849 */
1850static int
1851il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
1852 struct il4965_tx_resp *tx_resp, int txq_id,
1853 u16 start_idx)
1854{
1855 u16 status;
1856 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1857 struct ieee80211_tx_info *info = NULL;
1858 struct ieee80211_hdr *hdr = NULL;
1859 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1860 int i, sh, idx;
1861 u16 seq;
1862 if (agg->wait_for_ba)
1863 D_TX_REPLY("got tx response w/o block-ack\n");
1864
1865 agg->frame_count = tx_resp->frame_count;
1866 agg->start_idx = start_idx;
1867 agg->rate_n_flags = rate_n_flags;
1868 agg->bitmap = 0;
1869
1870 /* num frames attempted by Tx command */
1871 if (agg->frame_count == 1) {
1872 /* Only one frame was attempted; no block-ack will arrive */
1873 status = le16_to_cpu(frame_status[0].status);
1874 idx = start_idx;
1875
1876 D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
1877 agg->frame_count, agg->start_idx, idx);
1878
1879 info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
1880 info->status.rates[0].count = tx_resp->failure_frame + 1;
1881 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1882 info->flags |= il4965_tx_status_to_mac80211(status);
1883 il4965_hwrate_to_tx_control(il, rate_n_flags, info);
1884
1885 D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
1886 tx_resp->failure_frame);
1887 D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
1888
1889 agg->wait_for_ba = 0;
1890 } else {
1891 /* Two or more frames were attempted; expect block-ack */
1892 u64 bitmap = 0;
1893 int start = agg->start_idx;
1894
1895 /* Construct bit-map of pending frames within Tx win */
1896 for (i = 0; i < agg->frame_count; i++) {
1897 u16 sc;
1898 status = le16_to_cpu(frame_status[i].status);
1899 seq = le16_to_cpu(frame_status[i].sequence);
1900 idx = SEQ_TO_IDX(seq);
1901 txq_id = SEQ_TO_QUEUE(seq);
1902
1903 if (status &
1904 (AGG_TX_STATE_FEW_BYTES_MSK |
1905 AGG_TX_STATE_ABORT_MSK))
1906 continue;
1907
1908 D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
1909 agg->frame_count, txq_id, idx);
1910
1911 hdr = il_tx_queue_get_hdr(il, txq_id, idx);
1912 if (!hdr) {
1913 IL_ERR("BUG_ON idx doesn't point to valid skb"
1914 " idx=%d, txq_id=%d\n", idx, txq_id);
1915 return -1;
1916 }
1917
1918 sc = le16_to_cpu(hdr->seq_ctrl);
1919 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1920 IL_ERR("BUG_ON idx doesn't match seq control"
1921 " idx=%d, seq_idx=%d, seq=%d\n", idx,
1922 SEQ_TO_SN(sc), hdr->seq_ctrl);
1923 return -1;
1924 }
1925
1926 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
1927 SEQ_TO_SN(sc));
1928
1929 sh = idx - start;
1930 if (sh > 64) {
1931 sh = (start - idx) + 0xff;
1932 bitmap = bitmap << sh;
1933 sh = 0;
1934 start = idx;
1935 } else if (sh < -64)
1936 sh = 0xff - (start - idx);
1937 else if (sh < 0) {
1938 sh = start - idx;
1939 start = idx;
1940 bitmap = bitmap << sh;
1941 sh = 0;
1942 }
1943 bitmap |= 1ULL << sh;
1944 D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
1945 (unsigned long long)bitmap);
1946 }
1947
1948 agg->bitmap = bitmap;
1949 agg->start_idx = start;
1950 D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
1951 agg->frame_count, agg->start_idx,
1952 (unsigned long long)agg->bitmap);
1953
1954 if (bitmap)
1955 agg->wait_for_ba = 1;
1956 }
1957 return 0;
1958}
1959
1960static u8
1961il4965_find_station(struct il_priv *il, const u8 * addr)
1962{
1963 int i;
1964 int start = 0;
1965 int ret = IL_INVALID_STATION;
1966 unsigned long flags;
1967
1968 if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
1969 start = IL_STA_ID;
1970
1971 if (is_broadcast_ether_addr(addr))
1972 return il->ctx.bcast_sta_id;
1973
1974 spin_lock_irqsave(&il->sta_lock, flags);
1975 for (i = start; i < il->hw_params.max_stations; i++)
1976 if (il->stations[i].used &&
1977 (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
1978 ret = i;
1979 goto out;
1980 }
1981
1982 D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
1983
1984out:
1985 /*
1986 * It may be possible that more commands interacting with stations
1987 * arrive before we completed processing the adding of
1988 * station
1989 */
1990 if (ret != IL_INVALID_STATION &&
1991 (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
1992 ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
1993 (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
1994 IL_ERR("Requested station info for sta %d before ready.\n",
1995 ret);
1996 ret = IL_INVALID_STATION;
1997 }
1998 spin_unlock_irqrestore(&il->sta_lock, flags);
1999 return ret;
2000}
2001
2002static int
2003il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2004{
2005 if (il->iw_mode == NL80211_IFTYPE_STATION) {
2006 return IL_AP_ID;
2007 } else {
2008 u8 *da = ieee80211_get_DA(hdr);
2009 return il4965_find_station(il, da);
2010 }
2011}
2012
2013/**
2014 * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
2015 */
2016static void
2017il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
2018{
2019 struct il_rx_pkt *pkt = rxb_addr(rxb);
2020 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2021 int txq_id = SEQ_TO_QUEUE(sequence);
2022 int idx = SEQ_TO_IDX(sequence);
2023 struct il_tx_queue *txq = &il->txq[txq_id];
2024 struct ieee80211_hdr *hdr;
2025 struct ieee80211_tx_info *info;
2026 struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2027 u32 status = le32_to_cpu(tx_resp->u.status);
2028 int uninitialized_var(tid);
2029 int sta_id;
2030 int freed;
2031 u8 *qc = NULL;
2032 unsigned long flags;
2033
2034 if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
2035 IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
2036 "is out of range [0-%d] %d %d\n", txq_id, idx,
2037 txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
2038 return;
2039 }
2040
2041 txq->time_stamp = jiffies;
2042 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2043 memset(&info->status, 0, sizeof(info->status));
2044
2045 hdr = il_tx_queue_get_hdr(il, txq_id, idx);
2046 if (ieee80211_is_data_qos(hdr->frame_control)) {
2047 qc = ieee80211_get_qos_ctl(hdr);
2048 tid = qc[0] & 0xf;
2049 }
2050
2051 sta_id = il4965_get_ra_sta_id(il, hdr);
2052 if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
2053 IL_ERR("Station not known\n");
2054 return;
2055 }
2056
2057 spin_lock_irqsave(&il->sta_lock, flags);
2058 if (txq->sched_retry) {
2059 const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
2060 struct il_ht_agg *agg = NULL;
2061 WARN_ON(!qc);
2062
2063 agg = &il->stations[sta_id].tid[tid].agg;
2064
2065 il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
2066
2067 /* check if BAR is needed */
2068 if ((tx_resp->frame_count == 1) &&
2069 !il4965_is_tx_success(status))
2070 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2071
2072 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2073 idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2074 D_TX_REPLY("Retry scheduler reclaim scd_ssn "
2075 "%d idx %d\n", scd_ssn, idx);
2076 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2077 if (qc)
2078 il4965_free_tfds_in_queue(il, sta_id, tid,
2079 freed);
2080
2081 if (il->mac80211_registered &&
2082 il_queue_space(&txq->q) > txq->q.low_mark &&
2083 agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
2084 il_wake_queue(il, txq);
2085 }
2086 } else {
2087 info->status.rates[0].count = tx_resp->failure_frame + 1;
2088 info->flags |= il4965_tx_status_to_mac80211(status);
2089 il4965_hwrate_to_tx_control(il,
2090 le32_to_cpu(tx_resp->rate_n_flags),
2091 info);
2092
2093 D_TX_REPLY("TXQ %d status %s (0x%08x) "
2094 "rate_n_flags 0x%x retries %d\n", txq_id,
2095 il4965_get_tx_fail_reason(status), status,
2096 le32_to_cpu(tx_resp->rate_n_flags),
2097 tx_resp->failure_frame);
2098
2099 freed = il4965_tx_queue_reclaim(il, txq_id, idx);
2100 if (qc && likely(sta_id != IL_INVALID_STATION))
2101 il4965_free_tfds_in_queue(il, sta_id, tid, freed);
2102 else if (sta_id == IL_INVALID_STATION)
2103 D_TX_REPLY("Station not known\n");
2104
2105 if (il->mac80211_registered &&
2106 il_queue_space(&txq->q) > txq->q.low_mark)
2107 il_wake_queue(il, txq);
2108 }
2109 if (qc && likely(sta_id != IL_INVALID_STATION))
2110 il4965_txq_check_empty(il, sta_id, tid, txq_id);
2111
2112 il4965_check_abort_status(il, tx_resp->frame_count, status);
2113
2114 spin_unlock_irqrestore(&il->sta_lock, flags);
2115}
2116
2117static void
2118il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
2119{
2120 struct il_rx_pkt *pkt = rxb_addr(rxb);
2121 struct il4965_beacon_notif *beacon = (void *)pkt->u.raw;
2122 u8 rate __maybe_unused =
2123 il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
2124
2125 D_RX("beacon status %#x, retries:%d ibssmgr:%d "
2126 "tsf:0x%.8x%.8x rate:%d\n",
2127 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
2128 beacon->beacon_notify_hdr.failure_frame,
2129 le32_to_cpu(beacon->ibss_mgr_status),
2130 le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
2131
2132 il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
2133}
2134
2135/* Set up 4965-specific Rx frame reply handlers */
2136static void
2137il4965_handler_setup(struct il_priv *il)
2138{
2139 /* Legacy Rx frames */
2140 il->handlers[N_RX] = il4965_hdl_rx;
2141 /* Tx response */
2142 il->handlers[C_TX] = il4965_hdl_tx;
2143 il->handlers[N_BEACON] = il4965_hdl_beacon;
2144}
2145
2146static struct il_hcmd_ops il4965_hcmd = {
2147 .rxon_assoc = il4965_send_rxon_assoc,
2148 .commit_rxon = il4965_commit_rxon,
2149 .set_rxon_chain = il4965_set_rxon_chain,
2150};
2151
2152static void
2153il4965_post_scan(struct il_priv *il)
2154{
2155 struct il_rxon_context *ctx = &il->ctx;
2156
2157 /*
2158 * Since setting the RXON may have been deferred while
2159 * performing the scan, fire one off if needed
2160 */
2161 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2162 il_commit_rxon(il, ctx);
2163}
2164
2165static void
2166il4965_post_associate(struct il_priv *il)
2167{
2168 struct il_rxon_context *ctx = &il->ctx;
2169 struct ieee80211_vif *vif = ctx->vif;
2170 struct ieee80211_conf *conf = NULL;
2171 int ret = 0;
2172
2173 if (!vif || !il->is_open)
2174 return;
2175
2176 if (test_bit(S_EXIT_PENDING, &il->status))
2177 return;
2178
2179 il_scan_cancel_timeout(il, 200);
2180
2181 conf = &il->hw->conf;
2182
2183 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2184 il_commit_rxon(il, ctx);
2185
2186 ret = il_send_rxon_timing(il, ctx);
2187 if (ret)
2188 IL_WARN("RXON timing - " "Attempting to continue.\n");
2189
2190 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2191
2192 il_set_rxon_ht(il, &il->current_ht_config);
2193
2194 if (il->cfg->ops->hcmd->set_rxon_chain)
2195 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
2196
2197 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2198
2199 D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
2200 vif->bss_conf.beacon_int);
2201
2202 if (vif->bss_conf.use_short_preamble)
2203 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2204 else
2205 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2206
2207 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2208 if (vif->bss_conf.use_short_slot)
2209 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2210 else
2211 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2212 }
2213
2214 il_commit_rxon(il, ctx);
2215
2216 D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
2217 ctx->active.bssid_addr);
2218
2219 switch (vif->type) {
2220 case NL80211_IFTYPE_STATION:
2221 break;
2222 case NL80211_IFTYPE_ADHOC:
2223 il4965_send_beacon_cmd(il);
2224 break;
2225 default:
2226 IL_ERR("%s Should not be called in %d mode\n", __func__,
2227 vif->type);
2228 break;
2229 }
2230
2231 /* the chain noise calibration will enabled PM upon completion
2232 * If chain noise has already been run, then we need to enable
2233 * power management here */
2234 if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
2235 il_power_update_mode(il, false);
2236
2237 /* Enable Rx differential gain and sensitivity calibrations */
2238 il4965_chain_noise_reset(il);
2239 il->start_calib = 1;
2240}
2241
2242static void
2243il4965_config_ap(struct il_priv *il)
2244{
2245 struct il_rxon_context *ctx = &il->ctx;
2246 struct ieee80211_vif *vif = ctx->vif;
2247 int ret = 0;
2248
2249 lockdep_assert_held(&il->mutex);
2250
2251 if (test_bit(S_EXIT_PENDING, &il->status))
2252 return;
2253
2254 /* The following should be done only at AP bring up */
2255 if (!il_is_associated_ctx(ctx)) {
2256
2257 /* RXON - unassoc (to set timing command) */
2258 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2259 il_commit_rxon(il, ctx);
2260
2261 /* RXON Timing */
2262 ret = il_send_rxon_timing(il, ctx);
2263 if (ret)
2264 IL_WARN("RXON timing failed - "
2265 "Attempting to continue.\n");
2266
2267 /* AP has all antennas */
2268 il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
2269 il_set_rxon_ht(il, &il->current_ht_config);
2270 if (il->cfg->ops->hcmd->set_rxon_chain)
2271 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
2272
2273 ctx->staging.assoc_id = 0;
2274
2275 if (vif->bss_conf.use_short_preamble)
2276 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2277 else
2278 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2279
2280 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2281 if (vif->bss_conf.use_short_slot)
2282 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2283 else
2284 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2285 }
2286 /* need to send beacon cmd before committing assoc RXON! */
2287 il4965_send_beacon_cmd(il);
2288 /* restore RXON assoc */
2289 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2290 il_commit_rxon(il, ctx);
2291 }
2292 il4965_send_beacon_cmd(il);
2293}
2294
2295static struct il_hcmd_utils_ops il4965_hcmd_utils = {
2296 .get_hcmd_size = il4965_get_hcmd_size,
2297 .build_addsta_hcmd = il4965_build_addsta_hcmd,
2298 .request_scan = il4965_request_scan,
2299 .post_scan = il4965_post_scan,
2300};
2301
2302static struct il_lib_ops il4965_lib = {
2303 .set_hw_params = il4965_hw_set_hw_params,
2304 .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
2305 .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
2306 .txq_free_tfd = il4965_hw_txq_free_tfd,
2307 .txq_init = il4965_hw_tx_queue_init,
2308 .handler_setup = il4965_handler_setup,
2309 .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
2310 .init_alive_start = il4965_init_alive_start,
2311 .load_ucode = il4965_load_bsm,
2312 .dump_nic_error_log = il4965_dump_nic_error_log,
2313 .dump_fh = il4965_dump_fh,
2314 .set_channel_switch = il4965_hw_channel_switch,
2315 .apm_ops = {
2316 .init = il_apm_init,
2317 .config = il4965_nic_config,
2318 },
2319 .eeprom_ops = {
2320 .regulatory_bands = {
2321 EEPROM_REGULATORY_BAND_1_CHANNELS,
2322 EEPROM_REGULATORY_BAND_2_CHANNELS,
2323 EEPROM_REGULATORY_BAND_3_CHANNELS,
2324 EEPROM_REGULATORY_BAND_4_CHANNELS,
2325 EEPROM_REGULATORY_BAND_5_CHANNELS,
2326 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2327 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
2328 .acquire_semaphore = il4965_eeprom_acquire_semaphore,
2329 .release_semaphore = il4965_eeprom_release_semaphore,
2330 },
2331 .send_tx_power = il4965_send_tx_power,
2332 .update_chain_flags = il4965_update_chain_flags,
2333 .temp_ops = {
2334 .temperature = il4965_temperature_calib,
2335 },
2336#ifdef CONFIG_IWLEGACY_DEBUGFS
2337 .debugfs_ops = {
2338 .rx_stats_read = il4965_ucode_rx_stats_read,
2339 .tx_stats_read = il4965_ucode_tx_stats_read,
2340 .general_stats_read = il4965_ucode_general_stats_read,
2341 },
2342#endif
2343};
2344
2345static const struct il_legacy_ops il4965_legacy_ops = {
2346 .post_associate = il4965_post_associate,
2347 .config_ap = il4965_config_ap,
2348 .manage_ibss_station = il4965_manage_ibss_station,
2349 .update_bcast_stations = il4965_update_bcast_stations,
2350};
2351
2352struct ieee80211_ops il4965_hw_ops = {
2353 .tx = il4965_mac_tx,
2354 .start = il4965_mac_start,
2355 .stop = il4965_mac_stop,
2356 .add_interface = il_mac_add_interface,
2357 .remove_interface = il_mac_remove_interface,
2358 .change_interface = il_mac_change_interface,
2359 .config = il_mac_config,
2360 .configure_filter = il4965_configure_filter,
2361 .set_key = il4965_mac_set_key,
2362 .update_tkip_key = il4965_mac_update_tkip_key,
2363 .conf_tx = il_mac_conf_tx,
2364 .reset_tsf = il_mac_reset_tsf,
2365 .bss_info_changed = il_mac_bss_info_changed,
2366 .ampdu_action = il4965_mac_ampdu_action,
2367 .hw_scan = il_mac_hw_scan,
2368 .sta_add = il4965_mac_sta_add,
2369 .sta_remove = il_mac_sta_remove,
2370 .channel_switch = il4965_mac_channel_switch,
2371 .tx_last_beacon = il_mac_tx_last_beacon,
2372};
2373
2374static const struct il_ops il4965_ops = {
2375 .lib = &il4965_lib,
2376 .hcmd = &il4965_hcmd,
2377 .utils = &il4965_hcmd_utils,
2378 .led = &il4965_led_ops,
2379 .legacy = &il4965_legacy_ops,
2380 .ieee80211_ops = &il4965_hw_ops,
2381};
2382
2383static struct il_base_params il4965_base_params = {
2384 .eeprom_size = IL4965_EEPROM_IMG_SIZE,
2385 .num_of_queues = IL49_NUM_QUEUES,
2386 .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
2387 .pll_cfg_val = 0,
2388 .set_l0s = true,
2389 .use_bsm = true,
2390 .led_compensation = 61,
2391 .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
2392 .wd_timeout = IL_DEF_WD_TIMEOUT,
2393 .temperature_kelvin = true,
2394 .ucode_tracing = true,
2395 .sensitivity_calib_by_driver = true,
2396 .chain_noise_calib_by_driver = true,
2397};
2398
2399struct il_cfg il4965_cfg = {
2400 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2401 .fw_name_pre = IL4965_FW_PRE,
2402 .ucode_api_max = IL4965_UCODE_API_MAX,
2403 .ucode_api_min = IL4965_UCODE_API_MIN,
2404 .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
2405 .valid_tx_ant = ANT_AB,
2406 .valid_rx_ant = ANT_ABC,
2407 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2408 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2409 .ops = &il4965_ops,
2410 .mod_params = &il4965_mod_params,
2411 .base_params = &il4965_base_params,
2412 .led_mode = IL_LED_BLINK,
2413 /*
2414 * Force use of chains B and C for scan RX on 5 GHz band
2415 * because the device has off-channel reception on chain A.
2416 */
2417 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2418};
2419
2420/* Module firmware */
2421MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 000000000000..74472314bc37
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1309 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __il_4965_h__
31#define __il_4965_h__
32
33struct il_rx_queue;
34struct il_rx_buf;
35struct il_rx_pkt;
36struct il_tx_queue;
37struct il_rxon_context;
38
39/* configuration for the _4965 devices */
40extern struct il_cfg il4965_cfg;
41
42extern struct il_mod_params il4965_mod_params;
43
44extern struct ieee80211_ops il4965_hw_ops;
45
46/* tx queue */
47void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
48 int freed);
49
50/* RXON */
51void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
52
53/* uCode */
54int il4965_verify_ucode(struct il_priv *il);
55
56/* lib */
57void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
58
59void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
60int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
61int il4965_hw_nic_init(struct il_priv *il);
62int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
63
64/* rx */
65void il4965_rx_queue_restock(struct il_priv *il);
66void il4965_rx_replenish(struct il_priv *il);
67void il4965_rx_replenish_now(struct il_priv *il);
68void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
69int il4965_rxq_stop(struct il_priv *il);
70int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
71void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
72void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
73void il4965_rx_handle(struct il_priv *il);
74
75/* tx */
76void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
77int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
78 dma_addr_t addr, u16 len, u8 reset, u8 pad);
79int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
80void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
81 struct ieee80211_tx_info *info);
82int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
83int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
84 struct ieee80211_sta *sta, u16 tid, u16 * ssn);
85int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
86 struct ieee80211_sta *sta, u16 tid);
87int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
88void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
89int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
90void il4965_hw_txq_ctx_free(struct il_priv *il);
91int il4965_txq_ctx_alloc(struct il_priv *il);
92void il4965_txq_ctx_reset(struct il_priv *il);
93void il4965_txq_ctx_stop(struct il_priv *il);
94void il4965_txq_set_sched(struct il_priv *il, u32 mask);
95
96/*
97 * Acquire il->lock before calling this function !
98 */
99void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
100/**
101 * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
102 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
103 * @scd_retry: (1) Indicates queue will be used in aggregation mode
104 *
105 * NOTE: Acquire il->lock before calling this function !
106 */
107void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
108 int tx_fifo_id, int scd_retry);
109
110u8 il4965_toggle_tx_ant(struct il_priv *il, u8 ant_idx, u8 valid);
111
112/* rx */
113void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
114bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
115void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
116void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
117
118/* scan */
119int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
120
121/* station mgmt */
122int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
123 bool add);
124
125/* hcmd */
126int il4965_send_beacon_cmd(struct il_priv *il);
127
128#ifdef CONFIG_IWLEGACY_DEBUG
129const char *il4965_get_tx_fail_reason(u32 status);
130#else
131static inline const char *
132il4965_get_tx_fail_reason(u32 status)
133{
134 return "";
135}
136#endif
137
138/* station management */
139int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
140int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
141 const u8 *addr, u8 *sta_id_r);
142int il4965_remove_default_wep_key(struct il_priv *il,
143 struct il_rxon_context *ctx,
144 struct ieee80211_key_conf *key);
145int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
146 struct ieee80211_key_conf *key);
147int il4965_restore_default_wep_keys(struct il_priv *il,
148 struct il_rxon_context *ctx);
149int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
150 struct ieee80211_key_conf *key, u8 sta_id);
151int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
152 struct ieee80211_key_conf *key, u8 sta_id);
153void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
154 struct ieee80211_key_conf *keyconf,
155 struct ieee80211_sta *sta, u32 iv32,
156 u16 *phase1key);
157int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
158int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
159 int tid, u16 ssn);
160int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
161 int tid);
162void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
163int il4965_update_bcast_stations(struct il_priv *il);
164
165/* rate */
166static inline u8
167il4965_hw_get_rate(__le32 rate_n_flags)
168{
169 return le32_to_cpu(rate_n_flags) & 0xFF;
170}
171
172static inline __le32
173il4965_hw_set_rate_n_flags(u8 rate, u32 flags)
174{
175 return cpu_to_le32(flags | (u32) rate);
176}
177
178/* eeprom */
179void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
180int il4965_eeprom_acquire_semaphore(struct il_priv *il);
181void il4965_eeprom_release_semaphore(struct il_priv *il);
182int il4965_eeprom_check_version(struct il_priv *il);
183
184/* mac80211 handlers (for 4965) */
185void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
186int il4965_mac_start(struct ieee80211_hw *hw);
187void il4965_mac_stop(struct ieee80211_hw *hw);
188void il4965_configure_filter(struct ieee80211_hw *hw,
189 unsigned int changed_flags,
190 unsigned int *total_flags, u64 multicast);
191int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
192 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
193 struct ieee80211_key_conf *key);
194void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
195 struct ieee80211_vif *vif,
196 struct ieee80211_key_conf *keyconf,
197 struct ieee80211_sta *sta, u32 iv32,
198 u16 *phase1key);
199int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 enum ieee80211_ampdu_mlme_action action,
201 struct ieee80211_sta *sta, u16 tid, u16 * ssn,
202 u8 buf_size);
203int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
204 struct ieee80211_sta *sta);
205void il4965_mac_channel_switch(struct ieee80211_hw *hw,
206 struct ieee80211_channel_switch *ch_switch);
207
208void il4965_led_enable(struct il_priv *il);
209
210/* EEPROM */
211#define IL4965_EEPROM_IMG_SIZE 1024
212
213/*
214 * uCode queue management definitions ...
215 * The first queue used for block-ack aggregation is #7 (4965 only).
216 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
217 */
218#define IL49_FIRST_AMPDU_QUEUE 7
219
220/* Sizes and addresses for instruction and data memory (SRAM) in
221 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
222#define IL49_RTC_INST_LOWER_BOUND (0x000000)
223#define IL49_RTC_INST_UPPER_BOUND (0x018000)
224
225#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
226#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
227
228#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
229 IL49_RTC_INST_LOWER_BOUND)
230#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
231 IL49_RTC_DATA_LOWER_BOUND)
232
233#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
234#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
235
236/* Size of uCode instruction memory in bootstrap state machine */
237#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
238
239static inline int
240il4965_hw_valid_rtc_data_addr(u32 addr)
241{
242 return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
243 addr < IL49_RTC_DATA_UPPER_BOUND);
244}
245
246/********************* START TEMPERATURE *************************************/
247
248/**
249 * 4965 temperature calculation.
250 *
251 * The driver must calculate the device temperature before calculating
252 * a txpower setting (amplifier gain is temperature dependent). The
253 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
254 * values used for the life of the driver, and one of which (R4) is the
255 * real-time temperature indicator.
256 *
257 * uCode provides all 4 values to the driver via the "initialize alive"
258 * notification (see struct il4965_init_alive_resp). After the runtime uCode
259 * image loads, uCode updates the R4 value via stats notifications
260 * (see N_STATS), which occur after each received beacon
261 * when associated, or can be requested via C_STATS.
262 *
263 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
264 * must sign-extend to 32 bits before applying formula below.
265 *
266 * Formula:
267 *
268 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
269 *
270 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
271 * an additional correction, which should be centered around 0 degrees
272 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
273 * centering the 97/100 correction around 0 degrees K.
274 *
275 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
276 * temperature with factory-measured temperatures when calculating txpower
277 * settings.
278 */
279#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
280#define TEMPERATURE_CALIB_A_VAL 259
281
282/* Limit range of calculated temperature to be between these Kelvin values */
283#define IL_TX_POWER_TEMPERATURE_MIN (263)
284#define IL_TX_POWER_TEMPERATURE_MAX (410)
285
286#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
287 ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
288 (t) > IL_TX_POWER_TEMPERATURE_MAX)
289
290/********************* END TEMPERATURE ***************************************/
291
292/********************* START TXPOWER *****************************************/
293
294/**
295 * 4965 txpower calculations rely on information from three sources:
296 *
297 * 1) EEPROM
298 * 2) "initialize" alive notification
299 * 3) stats notifications
300 *
301 * EEPROM data consists of:
302 *
303 * 1) Regulatory information (max txpower and channel usage flags) is provided
304 * separately for each channel that can possibly supported by 4965.
305 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
306 * (legacy) channels.
307 *
308 * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
309 * for locations in EEPROM.
310 *
311 * 2) Factory txpower calibration information is provided separately for
312 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
313 * but 5 GHz has several sub-bands.
314 *
315 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
316 *
317 * See struct il4965_eeprom_calib_info (and the tree of structures
318 * contained within it) for format, and struct il4965_eeprom for
319 * locations in EEPROM.
320 *
321 * "Initialization alive" notification (see struct il4965_init_alive_resp)
322 * consists of:
323 *
324 * 1) Temperature calculation parameters.
325 *
326 * 2) Power supply voltage measurement.
327 *
328 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
329 *
330 * Statistics notifications deliver:
331 *
332 * 1) Current values for temperature param R4.
333 */
334
335/**
336 * To calculate a txpower setting for a given desired target txpower, channel,
337 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
338 * support MIMO and transmit diversity), driver must do the following:
339 *
340 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
341 * Do not exceed regulatory limit; reduce target txpower if necessary.
342 *
343 * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
344 * 2 transmitters will be used simultaneously; driver must reduce the
345 * regulatory limit by 3 dB (half-power) for each transmitter, so the
346 * combined total output of the 2 transmitters is within regulatory limits.
347 *
348 *
349 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
350 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
351 * reduce target txpower if necessary.
352 *
353 * Backoff values below are in 1/2 dB units (equivalent to steps in
354 * txpower gain tables):
355 *
356 * OFDM 6 - 36 MBit: 10 steps (5 dB)
357 * OFDM 48 MBit: 15 steps (7.5 dB)
358 * OFDM 54 MBit: 17 steps (8.5 dB)
359 * OFDM 60 MBit: 20 steps (10 dB)
360 * CCK all rates: 10 steps (5 dB)
361 *
362 * Backoff values apply to saturation txpower on a per-transmitter basis;
363 * when using MIMO (2 transmitters), each transmitter uses the same
364 * saturation level provided in EEPROM, and the same backoff values;
365 * no reduction (such as with regulatory txpower limits) is required.
366 *
367 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
368 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
369 * factory measurement for ht40 channels.
370 *
371 * The result of this step is the final target txpower. The rest of
372 * the steps figure out the proper settings for the device to achieve
373 * that target txpower.
374 *
375 *
376 * 3) Determine (EEPROM) calibration sub band for the target channel, by
377 * comparing against first and last channels in each sub band
378 * (see struct il4965_eeprom_calib_subband_info).
379 *
380 *
381 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
382 * referencing the 2 factory-measured (sample) channels within the sub band.
383 *
384 * Interpolation is based on difference between target channel's frequency
385 * and the sample channels' frequencies. Since channel numbers are based
386 * on frequency (5 MHz between each channel number), this is equivalent
387 * to interpolating based on channel number differences.
388 *
389 * Note that the sample channels may or may not be the channels at the
390 * edges of the sub band. The target channel may be "outside" of the
391 * span of the sampled channels.
392 *
393 * Driver may choose the pair (for 2 Tx chains) of measurements (see
394 * struct il4965_eeprom_calib_ch_info) for which the actual measured
395 * txpower comes closest to the desired txpower. Usually, though,
396 * the middle set of measurements is closest to the regulatory limits,
397 * and is therefore a good choice for all txpower calculations (this
398 * assumes that high accuracy is needed for maximizing legal txpower,
399 * while lower txpower configurations do not need as much accuracy).
400 *
401 * Driver should interpolate both members of the chosen measurement pair,
402 * i.e. for both Tx chains (radio transmitters), unless the driver knows
403 * that only one of the chains will be used (e.g. only one tx antenna
404 * connected, but this should be unusual). The rate scaling algorithm
405 * switches antennas to find best performance, so both Tx chains will
406 * be used (although only one at a time) even for non-MIMO transmissions.
407 *
408 * Driver should interpolate factory values for temperature, gain table
409 * idx, and actual power. The power amplifier detector values are
410 * not used by the driver.
411 *
412 * Sanity check: If the target channel happens to be one of the sample
413 * channels, the results should agree with the sample channel's
414 * measurements!
415 *
416 *
417 * 5) Find difference between desired txpower and (interpolated)
418 * factory-measured txpower. Using (interpolated) factory gain table idx
419 * (shown elsewhere) as a starting point, adjust this idx lower to
420 * increase txpower, or higher to decrease txpower, until the target
421 * txpower is reached. Each step in the gain table is 1/2 dB.
422 *
423 * For example, if factory measured txpower is 16 dBm, and target txpower
424 * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
425 * by 3 dB.
426 *
427 *
428 * 6) Find difference between current device temperature and (interpolated)
429 * factory-measured temperature for sub-band. Factory values are in
430 * degrees Celsius. To calculate current temperature, see comments for
431 * "4965 temperature calculation".
432 *
433 * If current temperature is higher than factory temperature, driver must
434 * increase gain (lower gain table idx), and vice verse.
435 *
436 * Temperature affects gain differently for different channels:
437 *
438 * 2.4 GHz all channels: 3.5 degrees per half-dB step
439 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
440 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
441 *
442 * NOTE: Temperature can increase rapidly when transmitting, especially
443 * with heavy traffic at high txpowers. Driver should update
444 * temperature calculations often under these conditions to
445 * maintain strong txpower in the face of rising temperature.
446 *
447 *
448 * 7) Find difference between current power supply voltage indicator
449 * (from "initialize alive") and factory-measured power supply voltage
450 * indicator (EEPROM).
451 *
452 * If the current voltage is higher (indicator is lower) than factory
453 * voltage, gain should be reduced (gain table idx increased) by:
454 *
455 * (eeprom - current) / 7
456 *
457 * If the current voltage is lower (indicator is higher) than factory
458 * voltage, gain should be increased (gain table idx decreased) by:
459 *
460 * 2 * (current - eeprom) / 7
461 *
462 * If number of idx steps in either direction turns out to be > 2,
463 * something is wrong ... just use 0.
464 *
465 * NOTE: Voltage compensation is independent of band/channel.
466 *
467 * NOTE: "Initialize" uCode measures current voltage, which is assumed
468 * to be constant after this initial measurement. Voltage
469 * compensation for txpower (number of steps in gain table)
470 * may be calculated once and used until the next uCode bootload.
471 *
472 *
473 * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
474 * adjust txpower for each transmitter chain, so txpower is balanced
475 * between the two chains. There are 5 pairs of tx_atten[group][chain]
476 * values in "initialize alive", one pair for each of 5 channel ranges:
477 *
478 * Group 0: 5 GHz channel 34-43
479 * Group 1: 5 GHz channel 44-70
480 * Group 2: 5 GHz channel 71-124
481 * Group 3: 5 GHz channel 125-200
482 * Group 4: 2.4 GHz all channels
483 *
484 * Add the tx_atten[group][chain] value to the idx for the target chain.
485 * The values are signed, but are in pairs of 0 and a non-negative number,
486 * so as to reduce gain (if necessary) of the "hotter" channel. This
487 * avoids any need to double-check for regulatory compliance after
488 * this step.
489 *
490 *
491 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
492 * value to the idx:
493 *
494 * Hardware rev B: 9 steps (4.5 dB)
495 * Hardware rev C: 5 steps (2.5 dB)
496 *
497 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
498 * bits [3:2], 1 = B, 2 = C.
499 *
500 * NOTE: This compensation is in addition to any saturation backoff that
501 * might have been applied in an earlier step.
502 *
503 *
504 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
505 *
506 * Limit the adjusted idx to stay within the table!
507 *
508 *
509 * 11) Read gain table entries for DSP and radio gain, place into appropriate
510 * location(s) in command (struct il4965_txpowertable_cmd).
511 */
512
513/**
514 * When MIMO is used (2 transmitters operating simultaneously), driver should
515 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
516 * for the device. That is, use half power for each transmitter, so total
517 * txpower is within regulatory limits.
518 *
519 * The value "6" represents number of steps in gain table to reduce power 3 dB.
520 * Each step is 1/2 dB.
521 */
522#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
523
524/**
525 * CCK gain compensation.
526 *
527 * When calculating txpowers for CCK, after making sure that the target power
528 * is within regulatory and saturation limits, driver must additionally
529 * back off gain by adding these values to the gain table idx.
530 *
531 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
532 * bits [3:2], 1 = B, 2 = C.
533 */
534#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
535#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
536
537/*
538 * 4965 power supply voltage compensation for txpower
539 */
540#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
541
542/**
543 * Gain tables.
544 *
545 * The following tables contain pair of values for setting txpower, i.e.
546 * gain settings for the output of the device's digital signal processor (DSP),
547 * and for the analog gain structure of the transmitter.
548 *
549 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
550 * are *relative* steps, not indications of absolute output power. Output
551 * power varies with temperature, voltage, and channel frequency, and also
552 * requires consideration of average power (to satisfy regulatory constraints),
553 * and peak power (to avoid distortion of the output signal).
554 *
555 * Each entry contains two values:
556 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
557 * linear value that multiplies the output of the digital signal processor,
558 * before being sent to the analog radio.
559 * 2) Radio gain. This sets the analog gain of the radio Tx path.
560 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
561 *
562 * EEPROM contains factory calibration data for txpower. This maps actual
563 * measured txpower levels to gain settings in the "well known" tables
564 * below ("well-known" means here that both factory calibration *and* the
565 * driver work with the same table).
566 *
567 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
568 * has an extension (into negative idxes), in case the driver needs to
569 * boost power setting for high device temperatures (higher than would be
570 * present during factory calibration). A 5 Ghz EEPROM idx of "40"
571 * corresponds to the 49th entry in the table used by the driver.
572 */
573#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
574#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
575
576/**
577 * 2.4 GHz gain table
578 *
579 * Index Dsp gain Radio gain
580 * 0 110 0x3f (highest gain)
581 * 1 104 0x3f
582 * 2 98 0x3f
583 * 3 110 0x3e
584 * 4 104 0x3e
585 * 5 98 0x3e
586 * 6 110 0x3d
587 * 7 104 0x3d
588 * 8 98 0x3d
589 * 9 110 0x3c
590 * 10 104 0x3c
591 * 11 98 0x3c
592 * 12 110 0x3b
593 * 13 104 0x3b
594 * 14 98 0x3b
595 * 15 110 0x3a
596 * 16 104 0x3a
597 * 17 98 0x3a
598 * 18 110 0x39
599 * 19 104 0x39
600 * 20 98 0x39
601 * 21 110 0x38
602 * 22 104 0x38
603 * 23 98 0x38
604 * 24 110 0x37
605 * 25 104 0x37
606 * 26 98 0x37
607 * 27 110 0x36
608 * 28 104 0x36
609 * 29 98 0x36
610 * 30 110 0x35
611 * 31 104 0x35
612 * 32 98 0x35
613 * 33 110 0x34
614 * 34 104 0x34
615 * 35 98 0x34
616 * 36 110 0x33
617 * 37 104 0x33
618 * 38 98 0x33
619 * 39 110 0x32
620 * 40 104 0x32
621 * 41 98 0x32
622 * 42 110 0x31
623 * 43 104 0x31
624 * 44 98 0x31
625 * 45 110 0x30
626 * 46 104 0x30
627 * 47 98 0x30
628 * 48 110 0x6
629 * 49 104 0x6
630 * 50 98 0x6
631 * 51 110 0x5
632 * 52 104 0x5
633 * 53 98 0x5
634 * 54 110 0x4
635 * 55 104 0x4
636 * 56 98 0x4
637 * 57 110 0x3
638 * 58 104 0x3
639 * 59 98 0x3
640 * 60 110 0x2
641 * 61 104 0x2
642 * 62 98 0x2
643 * 63 110 0x1
644 * 64 104 0x1
645 * 65 98 0x1
646 * 66 110 0x0
647 * 67 104 0x0
648 * 68 98 0x0
649 * 69 97 0
650 * 70 96 0
651 * 71 95 0
652 * 72 94 0
653 * 73 93 0
654 * 74 92 0
655 * 75 91 0
656 * 76 90 0
657 * 77 89 0
658 * 78 88 0
659 * 79 87 0
660 * 80 86 0
661 * 81 85 0
662 * 82 84 0
663 * 83 83 0
664 * 84 82 0
665 * 85 81 0
666 * 86 80 0
667 * 87 79 0
668 * 88 78 0
669 * 89 77 0
670 * 90 76 0
671 * 91 75 0
672 * 92 74 0
673 * 93 73 0
674 * 94 72 0
675 * 95 71 0
676 * 96 70 0
677 * 97 69 0
678 * 98 68 0
679 */
680
681/**
682 * 5 GHz gain table
683 *
684 * Index Dsp gain Radio gain
685 * -9 123 0x3F (highest gain)
686 * -8 117 0x3F
687 * -7 110 0x3F
688 * -6 104 0x3F
689 * -5 98 0x3F
690 * -4 110 0x3E
691 * -3 104 0x3E
692 * -2 98 0x3E
693 * -1 110 0x3D
694 * 0 104 0x3D
695 * 1 98 0x3D
696 * 2 110 0x3C
697 * 3 104 0x3C
698 * 4 98 0x3C
699 * 5 110 0x3B
700 * 6 104 0x3B
701 * 7 98 0x3B
702 * 8 110 0x3A
703 * 9 104 0x3A
704 * 10 98 0x3A
705 * 11 110 0x39
706 * 12 104 0x39
707 * 13 98 0x39
708 * 14 110 0x38
709 * 15 104 0x38
710 * 16 98 0x38
711 * 17 110 0x37
712 * 18 104 0x37
713 * 19 98 0x37
714 * 20 110 0x36
715 * 21 104 0x36
716 * 22 98 0x36
717 * 23 110 0x35
718 * 24 104 0x35
719 * 25 98 0x35
720 * 26 110 0x34
721 * 27 104 0x34
722 * 28 98 0x34
723 * 29 110 0x33
724 * 30 104 0x33
725 * 31 98 0x33
726 * 32 110 0x32
727 * 33 104 0x32
728 * 34 98 0x32
729 * 35 110 0x31
730 * 36 104 0x31
731 * 37 98 0x31
732 * 38 110 0x30
733 * 39 104 0x30
734 * 40 98 0x30
735 * 41 110 0x25
736 * 42 104 0x25
737 * 43 98 0x25
738 * 44 110 0x24
739 * 45 104 0x24
740 * 46 98 0x24
741 * 47 110 0x23
742 * 48 104 0x23
743 * 49 98 0x23
744 * 50 110 0x22
745 * 51 104 0x18
746 * 52 98 0x18
747 * 53 110 0x17
748 * 54 104 0x17
749 * 55 98 0x17
750 * 56 110 0x16
751 * 57 104 0x16
752 * 58 98 0x16
753 * 59 110 0x15
754 * 60 104 0x15
755 * 61 98 0x15
756 * 62 110 0x14
757 * 63 104 0x14
758 * 64 98 0x14
759 * 65 110 0x13
760 * 66 104 0x13
761 * 67 98 0x13
762 * 68 110 0x12
763 * 69 104 0x08
764 * 70 98 0x08
765 * 71 110 0x07
766 * 72 104 0x07
767 * 73 98 0x07
768 * 74 110 0x06
769 * 75 104 0x06
770 * 76 98 0x06
771 * 77 110 0x05
772 * 78 104 0x05
773 * 79 98 0x05
774 * 80 110 0x04
775 * 81 104 0x04
776 * 82 98 0x04
777 * 83 110 0x03
778 * 84 104 0x03
779 * 85 98 0x03
780 * 86 110 0x02
781 * 87 104 0x02
782 * 88 98 0x02
783 * 89 110 0x01
784 * 90 104 0x01
785 * 91 98 0x01
786 * 92 110 0x00
787 * 93 104 0x00
788 * 94 98 0x00
789 * 95 93 0x00
790 * 96 88 0x00
791 * 97 83 0x00
792 * 98 78 0x00
793 */
794
795/**
796 * Sanity checks and default values for EEPROM regulatory levels.
797 * If EEPROM values fall outside MIN/MAX range, use default values.
798 *
799 * Regulatory limits refer to the maximum average txpower allowed by
800 * regulatory agencies in the geographies in which the device is meant
801 * to be operated. These limits are SKU-specific (i.e. geography-specific),
802 * and channel-specific; each channel has an individual regulatory limit
803 * listed in the EEPROM.
804 *
805 * Units are in half-dBm (i.e. "34" means 17 dBm).
806 */
807#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
808#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
809#define IL_TX_POWER_REGULATORY_MIN (0)
810#define IL_TX_POWER_REGULATORY_MAX (34)
811
812/**
813 * Sanity checks and default values for EEPROM saturation levels.
814 * If EEPROM values fall outside MIN/MAX range, use default values.
815 *
816 * Saturation is the highest level that the output power amplifier can produce
817 * without significant clipping distortion. This is a "peak" power level.
818 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
819 * require differing amounts of backoff, relative to their average power output,
820 * in order to avoid clipping distortion.
821 *
822 * Driver must make sure that it is violating neither the saturation limit,
823 * nor the regulatory limit, when calculating Tx power settings for various
824 * rates.
825 *
826 * Units are in half-dBm (i.e. "38" means 19 dBm).
827 */
828#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
829#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
830#define IL_TX_POWER_SATURATION_MIN (20)
831#define IL_TX_POWER_SATURATION_MAX (50)
832
833/**
834 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
835 * and thermal Txpower calibration.
836 *
837 * When calculating txpower, driver must compensate for current device
838 * temperature; higher temperature requires higher gain. Driver must calculate
839 * current temperature (see "4965 temperature calculation"), then compare vs.
840 * factory calibration temperature in EEPROM; if current temperature is higher
841 * than factory temperature, driver must *increase* gain by proportions shown
842 * in table below. If current temperature is lower than factory, driver must
843 * *decrease* gain.
844 *
845 * Different frequency ranges require different compensation, as shown below.
846 */
847/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
848#define CALIB_IL_TX_ATTEN_GR1_FCH 34
849#define CALIB_IL_TX_ATTEN_GR1_LCH 43
850
851/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
852#define CALIB_IL_TX_ATTEN_GR2_FCH 44
853#define CALIB_IL_TX_ATTEN_GR2_LCH 70
854
855/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
856#define CALIB_IL_TX_ATTEN_GR3_FCH 71
857#define CALIB_IL_TX_ATTEN_GR3_LCH 124
858
859/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
860#define CALIB_IL_TX_ATTEN_GR4_FCH 125
861#define CALIB_IL_TX_ATTEN_GR4_LCH 200
862
863/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
864#define CALIB_IL_TX_ATTEN_GR5_FCH 1
865#define CALIB_IL_TX_ATTEN_GR5_LCH 20
866
867enum {
868 CALIB_CH_GROUP_1 = 0,
869 CALIB_CH_GROUP_2 = 1,
870 CALIB_CH_GROUP_3 = 2,
871 CALIB_CH_GROUP_4 = 3,
872 CALIB_CH_GROUP_5 = 4,
873 CALIB_CH_GROUP_MAX
874};
875
876/********************* END TXPOWER *****************************************/
877
878/**
879 * Tx/Rx Queues
880 *
881 * Most communication between driver and 4965 is via queues of data buffers.
882 * For example, all commands that the driver issues to device's embedded
883 * controller (uCode) are via the command queue (one of the Tx queues). All
884 * uCode command responses/replies/notifications, including Rx frames, are
885 * conveyed from uCode to driver via the Rx queue.
886 *
887 * Most support for these queues, including handshake support, resides in
888 * structures in host DRAM, shared between the driver and the device. When
889 * allocating this memory, the driver must make sure that data written by
890 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
891 * cache memory), so DRAM and cache are consistent, and the device can
892 * immediately see changes made by the driver.
893 *
894 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
895 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
896 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
897 */
898#define IL49_NUM_FIFOS 7
899#define IL49_CMD_FIFO_NUM 4
900#define IL49_NUM_QUEUES 16
901#define IL49_NUM_AMPDU_QUEUES 8
902
903/**
904 * struct il4965_schedq_bc_tbl
905 *
906 * Byte Count table
907 *
908 * Each Tx queue uses a byte-count table containing 320 entries:
909 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
910 * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
911 * max Tx win is 64 TFDs).
912 *
913 * When driver sets up a new TFD, it must also enter the total byte count
914 * of the frame to be transmitted into the corresponding entry in the byte
915 * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
916 * must duplicate the byte count entry in corresponding idx 256-319.
917 *
918 * padding puts each byte count table on a 1024-byte boundary;
919 * 4965 assumes tables are separated by 1024 bytes.
920 */
921struct il4965_scd_bc_tbl {
922 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
923 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
924} __packed;
925
926#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
927
928/* RSSI to dBm */
929#define IL4965_RSSI_OFFSET 44
930
931/* PCI registers */
932#define PCI_CFG_RETRY_TIMEOUT 0x041
933
934/* PCI register values */
935#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
936#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
937
938#define IL4965_DEFAULT_TX_RETRY 15
939
940/* EEPROM */
941#define IL4965_FIRST_AMPDU_QUEUE 10
942
943/* Calibration */
944void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
945void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
946void il4965_init_sensitivity(struct il_priv *il);
947void il4965_reset_run_time_calib(struct il_priv *il);
948void il4965_calib_free_results(struct il_priv *il);
949
950/* Debug */
951#ifdef CONFIG_IWLEGACY_DEBUGFS
952ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
953 size_t count, loff_t *ppos);
954ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
955 size_t count, loff_t *ppos);
956ssize_t il4965_ucode_general_stats_read(struct file *file,
957 char __user *user_buf, size_t count,
958 loff_t *ppos);
959#endif
960
961/****************************/
962/* Flow Handler Definitions */
963/****************************/
964
965/**
966 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
967 * Addresses are offsets from device's PCI hardware base address.
968 */
969#define FH49_MEM_LOWER_BOUND (0x1000)
970#define FH49_MEM_UPPER_BOUND (0x2000)
971
972/**
973 * Keep-Warm (KW) buffer base address.
974 *
975 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
976 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
977 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
978 * from going into a power-savings mode that would cause higher DRAM latency,
979 * and possible data over/under-runs, before all Tx/Rx is complete.
980 *
981 * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
982 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
983 * automatically invokes keep-warm accesses when normal accesses might not
984 * be sufficient to maintain fast DRAM response.
985 *
986 * Bit fields:
987 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
988 */
989#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
990
991/**
992 * TFD Circular Buffers Base (CBBC) addresses
993 *
994 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
995 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
996 * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
997 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
998 * aligned (address bits 0-7 must be 0).
999 *
1000 * Bit fields in each pointer register:
1001 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
1002 */
1003#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1004#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
1005
1006/* Find TFD CB base pointer for given queue (range 0-15). */
1007#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
1008
1009/**
1010 * Rx SRAM Control and Status Registers (RSCSR)
1011 *
1012 * These registers provide handshake between driver and 4965 for the Rx queue
1013 * (this queue handles *all* command responses, notifications, Rx data, etc.
1014 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
1015 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
1016 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
1017 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
1018 * mapping between RBDs and RBs.
1019 *
1020 * Driver must allocate host DRAM memory for the following, and set the
1021 * physical address of each into 4965 registers:
1022 *
1023 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
1024 * entries (although any power of 2, up to 4096, is selectable by driver).
1025 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
1026 * (typically 4K, although 8K or 16K are also selectable by driver).
1027 * Driver sets up RB size and number of RBDs in the CB via Rx config
1028 * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
1029 *
1030 * Bit fields within one RBD:
1031 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
1032 *
1033 * Driver sets physical address [35:8] of base of RBD circular buffer
1034 * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
1035 *
1036 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
1037 * (RBs) have been filled, via a "write pointer", actually the idx of
1038 * the RB's corresponding RBD within the circular buffer. Driver sets
1039 * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
1040 *
1041 * Bit fields in lower dword of Rx status buffer (upper dword not used
1042 * by driver; see struct il4965_shared, val0):
1043 * 31-12: Not used by driver
1044 * 11- 0: Index of last filled Rx buffer descriptor
1045 * (4965 writes, driver reads this value)
1046 *
1047 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
1048 * enter pointers to these RBs into contiguous RBD circular buffer entries,
1049 * and update the 4965's "write" idx register,
1050 * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
1051 *
1052 * This "write" idx corresponds to the *next* RBD that the driver will make
1053 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
1054 * the circular buffer. This value should initially be 0 (before preparing any
1055 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
1056 * wrap back to 0 at the end of the circular buffer (but don't wrap before
1057 * "read" idx has advanced past 1! See below).
1058 * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
1059 *
1060 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
1061 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
1062 * to tell the driver the idx of the latest filled RBD. The driver must
1063 * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
1064 *
1065 * The driver must also internally keep track of a third idx, which is the
1066 * next RBD to process. When receiving an Rx interrupt, driver should process
1067 * all filled but unprocessed RBs up to, but not including, the RB
1068 * corresponding to the "read" idx. For example, if "read" idx becomes "1",
1069 * driver may process the RB pointed to by RBD 0. Depending on volume of
1070 * traffic, there may be many RBs to process.
1071 *
1072 * If read idx == write idx, 4965 thinks there is no room to put new data.
1073 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
1074 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
1075 * and "read" idxes; that is, make sure that there are no more than 254
1076 * buffers waiting to be filled.
1077 */
1078#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
1079#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1080#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
1081
1082/**
1083 * Physical base address of 8-byte Rx Status buffer.
1084 * Bit fields:
1085 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
1086 */
1087#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
1088
1089/**
1090 * Physical base address of Rx Buffer Descriptor Circular Buffer.
1091 * Bit fields:
1092 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
1093 */
1094#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
1095
1096/**
1097 * Rx write pointer (idx, really!).
1098 * Bit fields:
1099 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
1100 * NOTE: For 256-entry circular buffer, use only bits [7:0].
1101 */
1102#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
1103#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
1104
1105/**
1106 * Rx Config/Status Registers (RCSR)
1107 * Rx Config Reg for channel 0 (only channel used)
1108 *
1109 * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
1110 * normal operation (see bit fields).
1111 *
1112 * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
1113 * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
1114 * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
1115 *
1116 * Bit fields:
1117 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1118 * '10' operate normally
1119 * 29-24: reserved
1120 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
1121 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
1122 * 19-18: reserved
1123 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
1124 * '10' 12K, '11' 16K.
1125 * 15-14: reserved
1126 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
1127 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
1128 * typical value 0x10 (about 1/2 msec)
1129 * 3- 0: reserved
1130 */
1131#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
1132#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
1133#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
1134
1135#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
1136
1137#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
1138#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
1139#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
1140#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
1141#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
1142#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
1143
1144#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
1145#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
1146#define RX_RB_TIMEOUT (0x10)
1147
1148#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
1149#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
1150#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
1151
1152#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
1153#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
1154#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
1155#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
1156
1157#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
1158#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
1159#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
1160
1161/**
1162 * Rx Shared Status Registers (RSSR)
1163 *
1164 * After stopping Rx DMA channel (writing 0 to
1165 * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
1166 * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
1167 *
1168 * Bit fields:
1169 * 24: 1 = Channel 0 is idle
1170 *
1171 * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
1172 * contain default values that should not be altered by the driver.
1173 */
1174#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
1175#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1176
1177#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
1178#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
1179#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
1180 (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
1181
1182#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
1183
1184#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
1185
1186/* TFDB Area - TFDs buffer table */
1187#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
1188#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
1189#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
1190#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
1191#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
1192
1193/**
1194 * Transmit DMA Channel Control/Status Registers (TCSR)
1195 *
1196 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
1197 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
1198 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
1199 *
1200 * To use a Tx DMA channel, driver must initialize its
1201 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
1202 *
1203 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1204 * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
1205 *
1206 * All other bits should be 0.
1207 *
1208 * Bit fields:
1209 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
1210 * '10' operate normally
1211 * 29- 4: Reserved, set to "0"
1212 * 3: Enable internal DMA requests (1, normal operation), disable (0)
1213 * 2- 0: Reserved, set to "0"
1214 */
1215#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
1216#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
1217
1218/* Find Control/Status reg for given Tx DMA/FIFO channel */
1219#define FH49_TCSR_CHNL_NUM (7)
1220#define FH50_TCSR_CHNL_NUM (8)
1221
1222/* TCSR: tx_config register values */
1223#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
1224 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
1225#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
1226 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
1227#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
1228 (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
1229
1230#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
1231#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
1232
1233#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
1234#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
1235
1236#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
1237#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
1238#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
1239
1240#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
1241#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
1242#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
1243
1244#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
1245#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
1246#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
1247
1248#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
1249#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
1250#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
1251
1252#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
1253#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
1254
1255/**
1256 * Tx Shared Status Registers (TSSR)
1257 *
1258 * After stopping Tx DMA channel (writing 0 to
1259 * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
1260 * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
1261 * (channel's buffers empty | no pending requests).
1262 *
1263 * Bit fields:
1264 * 31-24: 1 = Channel buffers empty (channel 7:0)
1265 * 23-16: 1 = No pending requests (channel 7:0)
1266 */
1267#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
1268#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
1269
1270#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
1271
1272/**
1273 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
1274 * 31: Indicates an address error when accessed to internal memory
1275 * uCode/driver must write "1" in order to clear this flag
1276 * 30: Indicates that Host did not send the expected number of dwords to FH
1277 * uCode/driver must write "1" in order to clear this flag
1278 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
1279 * command was received from the scheduler while the TRB was already full
1280 * with previous command
1281 * uCode/driver must write "1" in order to clear this flag
1282 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
1283 * bit is set, it indicates that the FH has received a full indication
1284 * from the RTC TxFIFO and the current value of the TxCredit counter was
1285 * not equal to zero. This mean that the credit mechanism was not
1286 * synchronized to the TxFIFO status
1287 * uCode/driver must write "1" in order to clear this flag
1288 */
1289#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
1290
1291#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
1292
1293/* Tx service channels */
1294#define FH49_SRVC_CHNL (9)
1295#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
1296#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
1297#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
1298 (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
1299
1300#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
1301/* Instruct FH to increment the retry count of a packet when
1302 * it is brought from the memory to TX-FIFO
1303 */
1304#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
1305
1306/* Keep Warm Size */
1307#define IL_KW_SIZE 0x1000 /* 4k */
1308
1309#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd47661..05bd375cb845 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
1config IWLWIFI_LEGACY 1config IWLEGACY
2 tristate 2 tristate
3 select FW_LOADER 3 select FW_LOADER
4 select NEW_LEDS 4 select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
7 select MAC80211_LEDS 7 select MAC80211_LEDS
8 8
9menu "Debugging Options" 9menu "Debugging Options"
10 depends on IWLWIFI_LEGACY 10 depends on IWLEGACY
11 11
12config IWLWIFI_LEGACY_DEBUG 12config IWLEGACY_DEBUG
13 bool "Enable full debugging output in 4965 and 3945 drivers" 13 bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
14 depends on IWLWIFI_LEGACY 14 depends on IWLEGACY
15 ---help--- 15 ---help---
16 This option will enable debug tracing output for the iwlwifilegacy 16 This option will enable debug tracing output for the iwlegacy
17 drivers. 17 drivers.
18 18
19 This will result in the kernel module being ~100k larger. You can 19 This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
29 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level 29 % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
30 30
31 You can find the list of debug mask values in: 31 You can find the list of debug mask values in:
32 drivers/net/wireless/iwlwifilegacy/iwl-debug.h 32 drivers/net/wireless/iwlegacy/common.h
33 33
34 If this is your first time using this driver, you should say Y here 34 If this is your first time using this driver, you should say Y here
35 as the debug information can assist others in helping you resolve 35 as the debug information can assist others in helping you resolve
36 any problems you may encounter. 36 any problems you may encounter.
37 37
38config IWLWIFI_LEGACY_DEBUGFS 38config IWLEGACY_DEBUGFS
39 bool "4965 and 3945 debugfs support" 39 bool "iwlegacy (iwl 3945/4965) debugfs support"
40 depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS 40 depends on IWLEGACY && MAC80211_DEBUGFS
41 ---help--- 41 ---help---
42 Enable creation of debugfs files for the iwlwifilegacy drivers. This 42 Enable creation of debugfs files for the iwlegacy drivers. This
43 is a low-impact option that allows getting insight into the 43 is a low-impact option that allows getting insight into the
44 driver's state at runtime. 44 driver's state at runtime.
45 45
46config IWLWIFI_LEGACY_DEVICE_TRACING
47 bool "iwlwifilegacy legacy device access tracing"
48 depends on IWLWIFI_LEGACY
49 depends on EVENT_TRACING
50 help
51 Say Y here to trace all commands, including TX frames and IO
52 accesses, sent to the device. If you say yes, iwlwifilegacy will
53 register with the ftrace framework for event tracing and dump
54 all this information to the ringbuffer, you may need to
55 increase the ringbuffer size. See the ftrace documentation
56 for more information.
57
58 When tracing is not enabled, this option still has some
59 (though rather small) overhead.
60
61 If unsure, say Y so we can help you better when problems
62 occur.
63endmenu 46endmenu
64 47
65config IWL4965 48config IWL4965
66 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 49 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
67 depends on PCI && MAC80211 50 depends on PCI && MAC80211
68 select IWLWIFI_LEGACY 51 select IWLEGACY
69 ---help--- 52 ---help---
70 This option enables support for 53 This option enables support for
71 54
@@ -93,7 +76,7 @@ config IWL4965
93config IWL3945 76config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 77 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on PCI && MAC80211 78 depends on PCI && MAC80211
96 select IWLWIFI_LEGACY 79 select IWLEGACY
97 ---help--- 80 ---help---
98 Select to build the driver supporting the: 81 Select to build the driver supporting the:
99 82
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb38c211..c985a01a0731 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
1obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o 1obj-$(CONFIG_IWLEGACY) += iwlegacy.o
2iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlegacy-objs := common.o
3iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o 3iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
4iwl-legacy-objs += iwl-scan.o iwl-led.o
5iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
6iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
7 4
8iwl-legacy-objs += $(iwl-legacy-m) 5iwlegacy-objs += $(iwlegacy-m)
9
10CFLAGS_iwl-devtrace.o := -I$(src)
11 6
12# 4965 7# 4965
13obj-$(CONFIG_IWL4965) += iwl4965.o 8obj-$(CONFIG_IWL4965) += iwl4965.o
14iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o 9iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
15iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o 10iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
16iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
17iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
18iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
19 11
20# 3945 12# 3945
21obj-$(CONFIG_IWL3945) += iwl3945.o 13obj-$(CONFIG_IWL3945) += iwl3945.o
22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 14iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
23iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o 15iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
24 16
25ccflags-y += -D__CHECK_ENDIAN__ 17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 89904054473f..25dd7d28d022 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63/*
64 * Please use this file (iwl-commands.h) only for uCode API definitions.
65 * Please use iwl-xxxx-hw.h for hardware-related definitions.
66 * Please use iwl-dev.h for driver implementation definitions.
67 */
68 63
69#ifndef __iwl_legacy_commands_h__ 64#ifndef __il_commands_h__
70#define __iwl_legacy_commands_h__ 65#define __il_commands_h__
71 66
72struct iwl_priv; 67#include <linux/ieee80211.h>
73 68
74/* uCode version contains 4 values: Major/Minor/API/Serial */ 69struct il_priv;
75#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
76#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
77#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
78#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
79 70
71/* uCode version contains 4 values: Major/Minor/API/Serial */
72#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
73#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
74#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
75#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
80 76
81/* Tx rates */ 77/* Tx rates */
82#define IWL_CCK_RATES 4 78#define IL_CCK_RATES 4
83#define IWL_OFDM_RATES 8 79#define IL_OFDM_RATES 8
84#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) 80#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
85 81
86enum { 82enum {
87 REPLY_ALIVE = 0x1, 83 N_ALIVE = 0x1,
88 REPLY_ERROR = 0x2, 84 N_ERROR = 0x2,
89 85
90 /* RXON and QOS commands */ 86 /* RXON and QOS commands */
91 REPLY_RXON = 0x10, 87 C_RXON = 0x10,
92 REPLY_RXON_ASSOC = 0x11, 88 C_RXON_ASSOC = 0x11,
93 REPLY_QOS_PARAM = 0x13, 89 C_QOS_PARAM = 0x13,
94 REPLY_RXON_TIMING = 0x14, 90 C_RXON_TIMING = 0x14,
95 91
96 /* Multi-Station support */ 92 /* Multi-Station support */
97 REPLY_ADD_STA = 0x18, 93 C_ADD_STA = 0x18,
98 REPLY_REMOVE_STA = 0x19, 94 C_REM_STA = 0x19,
99 95
100 /* Security */ 96 /* Security */
101 REPLY_WEPKEY = 0x20, 97 C_WEPKEY = 0x20,
102 98
103 /* RX, TX, LEDs */ 99 /* RX, TX, LEDs */
104 REPLY_3945_RX = 0x1b, /* 3945 only */ 100 N_3945_RX = 0x1b, /* 3945 only */
105 REPLY_TX = 0x1c, 101 C_TX = 0x1c,
106 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 102 C_RATE_SCALE = 0x47, /* 3945 only */
107 REPLY_LEDS_CMD = 0x48, 103 C_LEDS = 0x48,
108 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ 104 C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
109 105
110 /* 802.11h related */ 106 /* 802.11h related */
111 REPLY_CHANNEL_SWITCH = 0x72, 107 C_CHANNEL_SWITCH = 0x72,
112 CHANNEL_SWITCH_NOTIFICATION = 0x73, 108 N_CHANNEL_SWITCH = 0x73,
113 REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, 109 C_SPECTRUM_MEASUREMENT = 0x74,
114 SPECTRUM_MEASURE_NOTIFICATION = 0x75, 110 N_SPECTRUM_MEASUREMENT = 0x75,
115 111
116 /* Power Management */ 112 /* Power Management */
117 POWER_TABLE_CMD = 0x77, 113 C_POWER_TBL = 0x77,
118 PM_SLEEP_NOTIFICATION = 0x7A, 114 N_PM_SLEEP = 0x7A,
119 PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, 115 N_PM_DEBUG_STATS = 0x7B,
120 116
121 /* Scan commands and notifications */ 117 /* Scan commands and notifications */
122 REPLY_SCAN_CMD = 0x80, 118 C_SCAN = 0x80,
123 REPLY_SCAN_ABORT_CMD = 0x81, 119 C_SCAN_ABORT = 0x81,
124 SCAN_START_NOTIFICATION = 0x82, 120 N_SCAN_START = 0x82,
125 SCAN_RESULTS_NOTIFICATION = 0x83, 121 N_SCAN_RESULTS = 0x83,
126 SCAN_COMPLETE_NOTIFICATION = 0x84, 122 N_SCAN_COMPLETE = 0x84,
127 123
128 /* IBSS/AP commands */ 124 /* IBSS/AP commands */
129 BEACON_NOTIFICATION = 0x90, 125 N_BEACON = 0x90,
130 REPLY_TX_BEACON = 0x91, 126 C_TX_BEACON = 0x91,
131 127
132 /* Miscellaneous commands */ 128 /* Miscellaneous commands */
133 REPLY_TX_PWR_TABLE_CMD = 0x97, 129 C_TX_PWR_TBL = 0x97,
134 130
135 /* Bluetooth device coexistence config command */ 131 /* Bluetooth device coexistence config command */
136 REPLY_BT_CONFIG = 0x9b, 132 C_BT_CONFIG = 0x9b,
137 133
138 /* Statistics */ 134 /* Statistics */
139 REPLY_STATISTICS_CMD = 0x9c, 135 C_STATS = 0x9c,
140 STATISTICS_NOTIFICATION = 0x9d, 136 N_STATS = 0x9d,
141 137
142 /* RF-KILL commands and notifications */ 138 /* RF-KILL commands and notifications */
143 CARD_STATE_NOTIFICATION = 0xa1, 139 N_CARD_STATE = 0xa1,
144 140
145 /* Missed beacons notification */ 141 /* Missed beacons notification */
146 MISSED_BEACONS_NOTIFICATION = 0xa2, 142 N_MISSED_BEACONS = 0xa2,
147 143
148 REPLY_CT_KILL_CONFIG_CMD = 0xa4, 144 C_CT_KILL_CONFIG = 0xa4,
149 SENSITIVITY_CMD = 0xa8, 145 C_SENSITIVITY = 0xa8,
150 REPLY_PHY_CALIBRATION_CMD = 0xb0, 146 C_PHY_CALIBRATION = 0xb0,
151 REPLY_RX_PHY_CMD = 0xc0, 147 N_RX_PHY = 0xc0,
152 REPLY_RX_MPDU_CMD = 0xc1, 148 N_RX_MPDU = 0xc1,
153 REPLY_RX = 0xc3, 149 N_RX = 0xc3,
154 REPLY_COMPRESSED_BA = 0xc5, 150 N_COMPRESSED_BA = 0xc5,
155 151
156 REPLY_MAX = 0xff 152 IL_CN_MAX = 0xff
157}; 153};
158 154
159/****************************************************************************** 155/******************************************************************************
@@ -163,25 +159,25 @@ enum {
163 * 159 *
164 *****************************************************************************/ 160 *****************************************************************************/
165 161
166/* iwl_cmd_header flags value */ 162/* il_cmd_header flags value */
167#define IWL_CMD_FAILED_MSK 0x40 163#define IL_CMD_FAILED_MSK 0x40
168 164
169#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 165#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
170#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 166#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
171#define SEQ_TO_INDEX(s) ((s) & 0xff) 167#define SEQ_TO_IDX(s) ((s) & 0xff)
172#define INDEX_TO_SEQ(i) ((i) & 0xff) 168#define IDX_TO_SEQ(i) ((i) & 0xff)
173#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) 169#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
174#define SEQ_RX_FRAME cpu_to_le16(0x8000) 170#define SEQ_RX_FRAME cpu_to_le16(0x8000)
175 171
176/** 172/**
177 * struct iwl_cmd_header 173 * struct il_cmd_header
178 * 174 *
179 * This header format appears in the beginning of each command sent from the 175 * This header format appears in the beginning of each command sent from the
180 * driver, and each response/notification received from uCode. 176 * driver, and each response/notification received from uCode.
181 */ 177 */
182struct iwl_cmd_header { 178struct il_cmd_header {
183 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 179 u8 cmd; /* Command ID: C_RXON, etc. */
184 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ 180 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
185 /* 181 /*
186 * The driver sets up the sequence number to values of its choosing. 182 * The driver sets up the sequence number to values of its choosing.
187 * uCode does not use this value, but passes it back to the driver 183 * uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
192 * There is one exception: uCode sets bit 15 when it originates 188 * There is one exception: uCode sets bit 15 when it originates
193 * the response/notification, i.e. when the response/notification 189 * the response/notification, i.e. when the response/notification
194 * is not a direct response to a command sent by the driver. For 190 * is not a direct response to a command sent by the driver. For
195 * example, uCode issues REPLY_3945_RX when it sends a received frame 191 * example, uCode issues N_3945_RX when it sends a received frame
196 * to the driver; it is not a direct response to any driver command. 192 * to the driver; it is not a direct response to any driver command.
197 * 193 *
198 * The Linux driver uses the following format: 194 * The Linux driver uses the following format:
199 * 195 *
200 * 0:7 tfd index - position within TX queue 196 * 0:7 tfd idx - position within TX queue
201 * 8:12 TX queue id 197 * 8:12 TX queue id
202 * 13 reserved 198 * 13 reserved
203 * 14 huge - driver sets this to indicate command is in the 199 * 14 huge - driver sets this to indicate command is in the
204 * 'huge' storage at the end of the command buffers 200 * 'huge' storage at the end of the command buffers
205 * 15 unsolicited RX or uCode-originated notification 201 * 15 unsolicited RX or uCode-originated notification
206 */ 202 */
207 __le16 sequence; 203 __le16 sequence;
208 204
209 /* command or response/notification data follows immediately */ 205 /* command or response/notification data follows immediately */
210 u8 data[0]; 206 u8 data[0];
211} __packed; 207} __packed;
212 208
213
214/** 209/**
215 * struct iwl3945_tx_power 210 * struct il3945_tx_power
216 * 211 *
217 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH 212 * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
218 * 213 *
219 * Each entry contains two values: 214 * Each entry contains two values:
220 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained 215 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
223 * 2) Radio gain. This sets the analog gain of the radio Tx path. 218 * 2) Radio gain. This sets the analog gain of the radio Tx path.
224 * It is a coarser setting, and behaves in a logarithmic (dB) fashion. 219 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
225 * 220 *
226 * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. 221 * Driver obtains values from struct il3945_tx_power power_gain_table[][].
227 */ 222 */
228struct iwl3945_tx_power { 223struct il3945_tx_power {
229 u8 tx_gain; /* gain for analog radio */ 224 u8 tx_gain; /* gain for analog radio */
230 u8 dsp_atten; /* gain for DSP */ 225 u8 dsp_atten; /* gain for DSP */
231} __packed; 226} __packed;
232 227
233/** 228/**
234 * struct iwl3945_power_per_rate 229 * struct il3945_power_per_rate
235 * 230 *
236 * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 231 * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
237 */ 232 */
238struct iwl3945_power_per_rate { 233struct il3945_power_per_rate {
239 u8 rate; /* plcp */ 234 u8 rate; /* plcp */
240 struct iwl3945_tx_power tpc; 235 struct il3945_tx_power tpc;
241 u8 reserved; 236 u8 reserved;
242} __packed; 237} __packed;
243 238
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
245 * iwl4965 rate_n_flags bit fields 240 * iwl4965 rate_n_flags bit fields
246 * 241 *
247 * rate_n_flags format is used in following iwl4965 commands: 242 * rate_n_flags format is used in following iwl4965 commands:
248 * REPLY_RX (response only) 243 * N_RX (response only)
249 * REPLY_RX_MPDU (response only) 244 * N_RX_MPDU (response only)
250 * REPLY_TX (both command and response) 245 * C_TX (both command and response)
251 * REPLY_TX_LINK_QUALITY_CMD 246 * C_TX_LINK_QUALITY_CMD
252 * 247 *
253 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): 248 * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
254 * 2-0: 0) 6 Mbps 249 * 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
326#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) 321#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
327#define RATE_ANT_NUM 3 322#define RATE_ANT_NUM 3
328 323
329#define POWER_TABLE_NUM_ENTRIES 33 324#define POWER_TBL_NUM_ENTRIES 33
330#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 325#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
331#define POWER_TABLE_CCK_ENTRY 32 326#define POWER_TBL_CCK_ENTRY 32
332 327
333#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 328#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
334#define IWL_PWR_CCK_ENTRIES 2 329#define IL_PWR_CCK_ENTRIES 2
335 330
336/** 331/**
337 * union iwl4965_tx_power_dual_stream 332 * union il4965_tx_power_dual_stream
338 * 333 *
339 * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 334 * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
340 * Use __le32 version (struct tx_power_dual_stream) when building command. 335 * Use __le32 version (struct tx_power_dual_stream) when building command.
341 * 336 *
342 * Driver provides radio gain and DSP attenuation settings to device in pairs, 337 * Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
347 * For MIMO rates, one value may be different from the other, 342 * For MIMO rates, one value may be different from the other,
348 * in order to balance the Tx output between the two transmitters. 343 * in order to balance the Tx output between the two transmitters.
349 * 344 *
350 * See more details in doc for TXPOWER in iwl-4965-hw.h. 345 * See more details in doc for TXPOWER in 4965.h.
351 */ 346 */
352union iwl4965_tx_power_dual_stream { 347union il4965_tx_power_dual_stream {
353 struct { 348 struct {
354 u8 radio_tx_gain[2]; 349 u8 radio_tx_gain[2];
355 u8 dsp_predis_atten[2]; 350 u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
360/** 355/**
361 * struct tx_power_dual_stream 356 * struct tx_power_dual_stream
362 * 357 *
363 * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 358 * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
364 * 359 *
365 * Same format as iwl_tx_power_dual_stream, but __le32 360 * Same format as il_tx_power_dual_stream, but __le32
366 */ 361 */
367struct tx_power_dual_stream { 362struct tx_power_dual_stream {
368 __le32 dw; 363 __le32 dw;
369} __packed; 364} __packed;
370 365
371/** 366/**
372 * struct iwl4965_tx_power_db 367 * struct il4965_tx_power_db
373 * 368 *
374 * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH 369 * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
375 */ 370 */
376struct iwl4965_tx_power_db { 371struct il4965_tx_power_db {
377 struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; 372 struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
378} __packed; 373} __packed;
379 374
380/****************************************************************************** 375/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
387#define INITIALIZE_SUBTYPE (9) 382#define INITIALIZE_SUBTYPE (9)
388 383
389/* 384/*
390 * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) 385 * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
391 * 386 *
392 * uCode issues this "initialize alive" notification once the initialization 387 * uCode issues this "initialize alive" notification once the initialization
393 * uCode image has completed its work, and is ready to load the runtime image. 388 * uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
410 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, 405 * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
411 * for each of 5 frequency ranges. 406 * for each of 5 frequency ranges.
412 */ 407 */
413struct iwl_init_alive_resp { 408struct il_init_alive_resp {
414 u8 ucode_minor; 409 u8 ucode_minor;
415 u8 ucode_major; 410 u8 ucode_major;
416 __le16 reserved1; 411 __le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
433 * 2 Tx chains */ 428 * 2 Tx chains */
434} __packed; 429} __packed;
435 430
436
437/** 431/**
438 * REPLY_ALIVE = 0x1 (response only, not a command) 432 * N_ALIVE = 0x1 (response only, not a command)
439 * 433 *
440 * uCode issues this "alive" notification once the runtime image is ready 434 * uCode issues this "alive" notification once the runtime image is ready
441 * to receive commands from the driver. This is the *second* "alive" 435 * to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
454 * __le32 log_size; log capacity (in number of entries) 448 * __le32 log_size; log capacity (in number of entries)
455 * __le32 type; (1) timestamp with each entry, (0) no timestamp 449 * __le32 type; (1) timestamp with each entry, (0) no timestamp
456 * __le32 wraps; # times uCode has wrapped to top of circular buffer 450 * __le32 wraps; # times uCode has wrapped to top of circular buffer
457 * __le32 write_index; next circular buffer entry that uCode would fill 451 * __le32 write_idx; next circular buffer entry that uCode would fill
458 * 452 *
459 * The header is followed by the circular buffer of log entries. Entries 453 * The header is followed by the circular buffer of log entries. Entries
460 * with timestamps have the following format: 454 * with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
511 * The Linux driver can print both logs to the system log when a uCode error 505 * The Linux driver can print both logs to the system log when a uCode error
512 * occurs. 506 * occurs.
513 */ 507 */
514struct iwl_alive_resp { 508struct il_alive_resp {
515 u8 ucode_minor; 509 u8 ucode_minor;
516 u8 ucode_major; 510 u8 ucode_major;
517 __le16 reserved1; 511 __le16 reserved1;
518 u8 sw_rev[8]; 512 u8 sw_rev[8];
519 u8 ver_type; 513 u8 ver_type;
520 u8 ver_subtype; /* not "9" for runtime alive */ 514 u8 ver_subtype; /* not "9" for runtime alive */
521 __le16 reserved2; 515 __le16 reserved2;
522 __le32 log_event_table_ptr; /* SRAM address for event log */ 516 __le32 log_event_table_ptr; /* SRAM address for event log */
523 __le32 error_event_table_ptr; /* SRAM address for error log */ 517 __le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
526} __packed; 520} __packed;
527 521
528/* 522/*
529 * REPLY_ERROR = 0x2 (response only, not a command) 523 * N_ERROR = 0x2 (response only, not a command)
530 */ 524 */
531struct iwl_error_resp { 525struct il_error_resp {
532 __le32 error_type; 526 __le32 error_type;
533 u8 cmd_id; 527 u8 cmd_id;
534 u8 reserved1; 528 u8 reserved1;
@@ -554,7 +548,6 @@ enum {
554 RXON_DEV_TYPE_SNIFFER = 6, 548 RXON_DEV_TYPE_SNIFFER = 6,
555}; 549};
556 550
557
558#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) 551#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
559#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) 552#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
560#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) 553#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
593* (according to ON_AIR deassertion) */ 586* (according to ON_AIR deassertion) */
594#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) 587#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
595 588
596
597/* HT flags */ 589/* HT flags */
598#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) 590#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
599#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) 591#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
640#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) 632#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
641 633
642/** 634/**
643 * REPLY_RXON = 0x10 (command, has simple generic response) 635 * C_RXON = 0x10 (command, has simple generic response)
644 * 636 *
645 * RXON tunes the radio tuner to a service channel, and sets up a number 637 * RXON tunes the radio tuner to a service channel, and sets up a number
646 * of parameters that are used primarily for Rx, but also for Tx operations. 638 * of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
653 * channel. 645 * channel.
654 * 646 *
655 * NOTE: All RXONs wipe clean the internal txpower table. Driver must 647 * NOTE: All RXONs wipe clean the internal txpower table. Driver must
656 * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), 648 * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
657 * regardless of whether RXON_FILTER_ASSOC_MSK is set. 649 * regardless of whether RXON_FILTER_ASSOC_MSK is set.
658 */ 650 */
659 651
660struct iwl3945_rxon_cmd { 652struct il3945_rxon_cmd {
661 u8 node_addr[6]; 653 u8 node_addr[6];
662 __le16 reserved1; 654 __le16 reserved1;
663 u8 bssid_addr[6]; 655 u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
676 __le16 reserved5; 668 __le16 reserved5;
677} __packed; 669} __packed;
678 670
679struct iwl4965_rxon_cmd { 671struct il4965_rxon_cmd {
680 u8 node_addr[6]; 672 u8 node_addr[6];
681 __le16 reserved1; 673 __le16 reserved1;
682 u8 bssid_addr[6]; 674 u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
699/* Create a common rxon cmd which will be typecast into the 3945 or 4965 691/* Create a common rxon cmd which will be typecast into the 3945 or 4965
700 * specific rxon cmd, depending on where it is called from. 692 * specific rxon cmd, depending on where it is called from.
701 */ 693 */
702struct iwl_legacy_rxon_cmd { 694struct il_rxon_cmd {
703 u8 node_addr[6]; 695 u8 node_addr[6];
704 __le16 reserved1; 696 __le16 reserved1;
705 u8 bssid_addr[6]; 697 u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
721 u8 reserved5; 713 u8 reserved5;
722} __packed; 714} __packed;
723 715
724
725/* 716/*
726 * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) 717 * C_RXON_ASSOC = 0x11 (command, has simple generic response)
727 */ 718 */
728struct iwl3945_rxon_assoc_cmd { 719struct il3945_rxon_assoc_cmd {
729 __le32 flags; 720 __le32 flags;
730 __le32 filter_flags; 721 __le32 filter_flags;
731 u8 ofdm_basic_rates; 722 u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
733 __le16 reserved; 724 __le16 reserved;
734} __packed; 725} __packed;
735 726
736struct iwl4965_rxon_assoc_cmd { 727struct il4965_rxon_assoc_cmd {
737 __le32 flags; 728 __le32 flags;
738 __le32 filter_flags; 729 __le32 filter_flags;
739 u8 ofdm_basic_rates; 730 u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
744 __le16 reserved; 735 __le16 reserved;
745} __packed; 736} __packed;
746 737
747#define IWL_CONN_MAX_LISTEN_INTERVAL 10 738#define IL_CONN_MAX_LISTEN_INTERVAL 10
748#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ 739#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
749#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ 740#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
750 741
751/* 742/*
752 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 743 * C_RXON_TIMING = 0x14 (command, has simple generic response)
753 */ 744 */
754struct iwl_rxon_time_cmd { 745struct il_rxon_time_cmd {
755 __le64 timestamp; 746 __le64 timestamp;
756 __le16 beacon_interval; 747 __le16 beacon_interval;
757 __le16 atim_window; 748 __le16 atim_win;
758 __le32 beacon_init_val; 749 __le32 beacon_init_val;
759 __le16 listen_interval; 750 __le16 listen_interval;
760 u8 dtim_period; 751 u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
762} __packed; 753} __packed;
763 754
764/* 755/*
765 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 756 * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
766 */ 757 */
767struct iwl3945_channel_switch_cmd { 758struct il3945_channel_switch_cmd {
768 u8 band; 759 u8 band;
769 u8 expect_beacon; 760 u8 expect_beacon;
770 __le16 channel; 761 __le16 channel;
771 __le32 rxon_flags; 762 __le32 rxon_flags;
772 __le32 rxon_filter_flags; 763 __le32 rxon_filter_flags;
773 __le32 switch_time; 764 __le32 switch_time;
774 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 765 struct il3945_power_per_rate power[IL_MAX_RATES];
775} __packed; 766} __packed;
776 767
777struct iwl4965_channel_switch_cmd { 768struct il4965_channel_switch_cmd {
778 u8 band; 769 u8 band;
779 u8 expect_beacon; 770 u8 expect_beacon;
780 __le16 channel; 771 __le16 channel;
781 __le32 rxon_flags; 772 __le32 rxon_flags;
782 __le32 rxon_filter_flags; 773 __le32 rxon_filter_flags;
783 __le32 switch_time; 774 __le32 switch_time;
784 struct iwl4965_tx_power_db tx_power; 775 struct il4965_tx_power_db tx_power;
785} __packed; 776} __packed;
786 777
787/* 778/*
788 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 779 * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
789 */ 780 */
790struct iwl_csa_notification { 781struct il_csa_notification {
791 __le16 band; 782 __le16 band;
792 __le16 channel; 783 __le16 channel;
793 __le32 status; /* 0 - OK, 1 - fail */ 784 __le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
800 *****************************************************************************/ 791 *****************************************************************************/
801 792
802/** 793/**
803 * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM 794 * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
804 * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd 795 * One for each of 4 EDCA access categories in struct il_qosparam_cmd
805 * 796 *
806 * @cw_min: Contention window, start value in numbers of slots. 797 * @cw_min: Contention win, start value in numbers of slots.
807 * Should be a power-of-2, minus 1. Device's default is 0x0f. 798 * Should be a power-of-2, minus 1. Device's default is 0x0f.
808 * @cw_max: Contention window, max value in numbers of slots. 799 * @cw_max: Contention win, max value in numbers of slots.
809 * Should be a power-of-2, minus 1. Device's default is 0x3f. 800 * Should be a power-of-2, minus 1. Device's default is 0x3f.
810 * @aifsn: Number of slots in Arbitration Interframe Space (before 801 * @aifsn: Number of slots in Arbitration Interframe Space (before
811 * performing random backoff timing prior to Tx). Device default 1. 802 * performing random backoff timing prior to Tx). Device default 1.
812 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. 803 * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
813 * 804 *
814 * Device will automatically increase contention window by (2*CW) + 1 for each 805 * Device will automatically increase contention win by (2*CW) + 1 for each
815 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW 806 * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
816 * value, to cap the CW value. 807 * value, to cap the CW value.
817 */ 808 */
818struct iwl_ac_qos { 809struct il_ac_qos {
819 __le16 cw_min; 810 __le16 cw_min;
820 __le16 cw_max; 811 __le16 cw_max;
821 u8 aifsn; 812 u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
832#define AC_NUM 4 823#define AC_NUM 4
833 824
834/* 825/*
835 * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) 826 * C_QOS_PARAM = 0x13 (command, has simple generic response)
836 * 827 *
837 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs 828 * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
838 * 0: Background, 1: Best Effort, 2: Video, 3: Voice. 829 * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
839 */ 830 */
840struct iwl_qosparam_cmd { 831struct il_qosparam_cmd {
841 __le32 qos_flags; 832 __le32 qos_flags;
842 struct iwl_ac_qos ac[AC_NUM]; 833 struct il_ac_qos ac[AC_NUM];
843} __packed; 834} __packed;
844 835
845/****************************************************************************** 836/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
852 */ 843 */
853 844
854/* Special, dedicated locations within device's station table */ 845/* Special, dedicated locations within device's station table */
855#define IWL_AP_ID 0 846#define IL_AP_ID 0
856#define IWL_STA_ID 2 847#define IL_STA_ID 2
857#define IWL3945_BROADCAST_ID 24 848#define IL3945_BROADCAST_ID 24
858#define IWL3945_STATION_COUNT 25 849#define IL3945_STATION_COUNT 25
859#define IWL4965_BROADCAST_ID 31 850#define IL4965_BROADCAST_ID 31
860#define IWL4965_STATION_COUNT 32 851#define IL4965_STATION_COUNT 32
861 852
862#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 853#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
863#define IWL_INVALID_STATION 255 854#define IL_INVALID_STATION 255
864 855
865#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 856#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
866#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 857#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
901#define STA_MODIFY_DELBA_TID_MSK 0x10 892#define STA_MODIFY_DELBA_TID_MSK 0x10
902#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 893#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
903 894
904/* Receiver address (actually, Rx station's index into station table), 895/* Receiver address (actually, Rx station's idx into station table),
905 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 896 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
906#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 897#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
907 898
908struct iwl4965_keyinfo { 899struct il4965_keyinfo {
909 __le16 key_flags; 900 __le16 key_flags;
910 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ 901 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
911 u8 reserved1; 902 u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
918/** 909/**
919 * struct sta_id_modify 910 * struct sta_id_modify
920 * @addr[ETH_ALEN]: station's MAC address 911 * @addr[ETH_ALEN]: station's MAC address
921 * @sta_id: index of station in uCode's station table 912 * @sta_id: idx of station in uCode's station table
922 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change 913 * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
923 * 914 *
924 * Driver selects unused table index when adding new station, 915 * Driver selects unused table idx when adding new station,
925 * or the index to a pre-existing station entry when modifying that station. 916 * or the idx to a pre-existing station entry when modifying that station.
926 * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). 917 * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
927 * 918 *
928 * modify_mask flags select which parameters to modify vs. leave alone. 919 * modify_mask flags select which parameters to modify vs. leave alone.
929 */ 920 */
@@ -936,15 +927,15 @@ struct sta_id_modify {
936} __packed; 927} __packed;
937 928
938/* 929/*
939 * REPLY_ADD_STA = 0x18 (command) 930 * C_ADD_STA = 0x18 (command)
940 * 931 *
941 * The device contains an internal table of per-station information, 932 * The device contains an internal table of per-station information,
942 * with info on security keys, aggregation parameters, and Tx rates for 933 * with info on security keys, aggregation parameters, and Tx rates for
943 * initial Tx attempt and any retries (4965 devices uses 934 * initial Tx attempt and any retries (4965 devices uses
944 * REPLY_TX_LINK_QUALITY_CMD, 935 * C_TX_LINK_QUALITY_CMD,
945 * 3945 uses REPLY_RATE_SCALE to set up rate tables). 936 * 3945 uses C_RATE_SCALE to set up rate tables).
946 * 937 *
947 * REPLY_ADD_STA sets up the table entry for one station, either creating 938 * C_ADD_STA sets up the table entry for one station, either creating
948 * a new entry, or modifying a pre-existing one. 939 * a new entry, or modifying a pre-existing one.
949 * 940 *
950 * NOTE: RXON command (without "associated" bit set) wipes the station table 941 * NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
954 * their own txpower/rate setup data). 945 * their own txpower/rate setup data).
955 * 946 *
956 * When getting started on a new channel, driver must set up the 947 * When getting started on a new channel, driver must set up the
957 * IWL_BROADCAST_ID entry (last entry in the table). For a client 948 * IL_BROADCAST_ID entry (last entry in the table). For a client
958 * station in a BSS, once an AP is selected, driver sets up the AP STA 949 * station in a BSS, once an AP is selected, driver sets up the AP STA
959 * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP 950 * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
960 * are all that are needed for a BSS client station. If the device is 951 * are all that are needed for a BSS client station. If the device is
961 * used as AP, or in an IBSS network, driver must set up station table 952 * used as AP, or in an IBSS network, driver must set up station table
962 * entries for all STAs in network, starting with index IWL_STA_ID. 953 * entries for all STAs in network, starting with idx IL_STA_ID.
963 */ 954 */
964 955
965struct iwl3945_addsta_cmd { 956struct il3945_addsta_cmd {
966 u8 mode; /* 1: modify existing, 0: add new station */ 957 u8 mode; /* 1: modify existing, 0: add new station */
967 u8 reserved[3]; 958 u8 reserved[3];
968 struct sta_id_modify sta; 959 struct sta_id_modify sta;
969 struct iwl4965_keyinfo key; 960 struct il4965_keyinfo key;
970 __le32 station_flags; /* STA_FLG_* */ 961 __le32 station_flags; /* STA_FLG_* */
971 __le32 station_flags_msk; /* STA_FLG_* */ 962 __le32 station_flags_msk; /* STA_FLG_* */
972 963
973 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 964 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
990 __le16 add_immediate_ba_ssn; 981 __le16 add_immediate_ba_ssn;
991} __packed; 982} __packed;
992 983
993struct iwl4965_addsta_cmd { 984struct il4965_addsta_cmd {
994 u8 mode; /* 1: modify existing, 0: add new station */ 985 u8 mode; /* 1: modify existing, 0: add new station */
995 u8 reserved[3]; 986 u8 reserved[3];
996 struct sta_id_modify sta; 987 struct sta_id_modify sta;
997 struct iwl4965_keyinfo key; 988 struct il4965_keyinfo key;
998 __le32 station_flags; /* STA_FLG_* */ 989 __le32 station_flags; /* STA_FLG_* */
999 __le32 station_flags_msk; /* STA_FLG_* */ 990 __le32 station_flags_msk; /* STA_FLG_* */
1000 991
1001 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 992 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
1003 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 994 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1004 __le16 tid_disable_tx; 995 __le16 tid_disable_tx;
1005 996
1006 __le16 reserved1; 997 __le16 reserved1;
1007 998
1008 /* TID for which to add block-ack support. 999 /* TID for which to add block-ack support.
1009 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1000 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
1028} __packed; 1019} __packed;
1029 1020
1030/* Wrapper struct for 3945 and 4965 addsta_cmd structures */ 1021/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
1031struct iwl_legacy_addsta_cmd { 1022struct il_addsta_cmd {
1032 u8 mode; /* 1: modify existing, 0: add new station */ 1023 u8 mode; /* 1: modify existing, 0: add new station */
1033 u8 reserved[3]; 1024 u8 reserved[3];
1034 struct sta_id_modify sta; 1025 struct sta_id_modify sta;
1035 struct iwl4965_keyinfo key; 1026 struct il4965_keyinfo key;
1036 __le32 station_flags; /* STA_FLG_* */ 1027 __le32 station_flags; /* STA_FLG_* */
1037 __le32 station_flags_msk; /* STA_FLG_* */ 1028 __le32 station_flags_msk; /* STA_FLG_* */
1038 1029
1039 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) 1030 /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
1041 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ 1032 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
1042 __le16 tid_disable_tx; 1033 __le16 tid_disable_tx;
1043 1034
1044 __le16 rate_n_flags; /* 3945 only */ 1035 __le16 rate_n_flags; /* 3945 only */
1045 1036
1046 /* TID for which to add block-ack support. 1037 /* TID for which to add block-ack support.
1047 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1038 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
1065 __le16 reserved2; 1056 __le16 reserved2;
1066} __packed; 1057} __packed;
1067 1058
1068
1069#define ADD_STA_SUCCESS_MSK 0x1 1059#define ADD_STA_SUCCESS_MSK 0x1
1070#define ADD_STA_NO_ROOM_IN_TABLE 0x2 1060#define ADD_STA_NO_ROOM_IN_TBL 0x2
1071#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 1061#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
1072#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 1062#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
1073/* 1063/*
1074 * REPLY_ADD_STA = 0x18 (response) 1064 * C_ADD_STA = 0x18 (response)
1075 */ 1065 */
1076struct iwl_add_sta_resp { 1066struct il_add_sta_resp {
1077 u8 status; /* ADD_STA_* */ 1067 u8 status; /* ADD_STA_* */
1078} __packed; 1068} __packed;
1079 1069
1080#define REM_STA_SUCCESS_MSK 0x1 1070#define REM_STA_SUCCESS_MSK 0x1
1081/* 1071/*
1082 * REPLY_REM_STA = 0x19 (response) 1072 * C_REM_STA = 0x19 (response)
1083 */ 1073 */
1084struct iwl_rem_sta_resp { 1074struct il_rem_sta_resp {
1085 u8 status; 1075 u8 status;
1086} __packed; 1076} __packed;
1087 1077
1088/* 1078/*
1089 * REPLY_REM_STA = 0x19 (command) 1079 * C_REM_STA = 0x19 (command)
1090 */ 1080 */
1091struct iwl_rem_sta_cmd { 1081struct il_rem_sta_cmd {
1092 u8 num_sta; /* number of removed stations */ 1082 u8 num_sta; /* number of removed stations */
1093 u8 reserved[3]; 1083 u8 reserved[3];
1094 u8 addr[ETH_ALEN]; /* MAC addr of the first station */ 1084 u8 addr[ETH_ALEN]; /* MAC addr of the first station */
1095 u8 reserved2[2]; 1085 u8 reserved2[2];
1096} __packed; 1086} __packed;
1097 1087
1098#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) 1088#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
1099#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) 1089#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
1100#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) 1090#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
1101#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) 1091#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
1102#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) 1092#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
1103 1093
1104#define IWL_DROP_SINGLE 0 1094#define IL_DROP_SINGLE 0
1105#define IWL_DROP_SELECTED 1 1095#define IL_DROP_SELECTED 1
1106#define IWL_DROP_ALL 2 1096#define IL_DROP_ALL 2
1107 1097
1108/* 1098/*
1109 * REPLY_WEP_KEY = 0x20 1099 * REPLY_WEP_KEY = 0x20
1110 */ 1100 */
1111struct iwl_wep_key { 1101struct il_wep_key {
1112 u8 key_index; 1102 u8 key_idx;
1113 u8 key_offset; 1103 u8 key_offset;
1114 u8 reserved1[2]; 1104 u8 reserved1[2];
1115 u8 key_size; 1105 u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
1117 u8 key[16]; 1107 u8 key[16];
1118} __packed; 1108} __packed;
1119 1109
1120struct iwl_wep_cmd { 1110struct il_wep_cmd {
1121 u8 num_keys; 1111 u8 num_keys;
1122 u8 global_key_type; 1112 u8 global_key_type;
1123 u8 flags; 1113 u8 flags;
1124 u8 reserved; 1114 u8 reserved;
1125 struct iwl_wep_key key[0]; 1115 struct il_wep_key key[0];
1126} __packed; 1116} __packed;
1127 1117
1128#define WEP_KEY_WEP_TYPE 1 1118#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
1168#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) 1158#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1169#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) 1159#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1170 1160
1171 1161struct il3945_rx_frame_stats {
1172struct iwl3945_rx_frame_stats {
1173 u8 phy_count; 1162 u8 phy_count;
1174 u8 id; 1163 u8 id;
1175 u8 rssi; 1164 u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
1179 u8 payload[0]; 1168 u8 payload[0];
1180} __packed; 1169} __packed;
1181 1170
1182struct iwl3945_rx_frame_hdr { 1171struct il3945_rx_frame_hdr {
1183 __le16 channel; 1172 __le16 channel;
1184 __le16 phy_flags; 1173 __le16 phy_flags;
1185 u8 reserved1; 1174 u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
1188 u8 payload[0]; 1177 u8 payload[0];
1189} __packed; 1178} __packed;
1190 1179
1191struct iwl3945_rx_frame_end { 1180struct il3945_rx_frame_end {
1192 __le32 status; 1181 __le32 status;
1193 __le64 timestamp; 1182 __le64 timestamp;
1194 __le32 beacon_timestamp; 1183 __le32 beacon_timestamp;
1195} __packed; 1184} __packed;
1196 1185
1197/* 1186/*
1198 * REPLY_3945_RX = 0x1b (response only, not a command) 1187 * N_3945_RX = 0x1b (response only, not a command)
1199 * 1188 *
1200 * NOTE: DO NOT dereference from casts to this structure 1189 * NOTE: DO NOT dereference from casts to this structure
1201 * It is provided only for calculating minimum data set size. 1190 * It is provided only for calculating minimum data set size.
1202 * The actual offsets of the hdr and end are dynamic based on 1191 * The actual offsets of the hdr and end are dynamic based on
1203 * stats.phy_count 1192 * stats.phy_count
1204 */ 1193 */
1205struct iwl3945_rx_frame { 1194struct il3945_rx_frame {
1206 struct iwl3945_rx_frame_stats stats; 1195 struct il3945_rx_frame_stats stats;
1207 struct iwl3945_rx_frame_hdr hdr; 1196 struct il3945_rx_frame_hdr hdr;
1208 struct iwl3945_rx_frame_end end; 1197 struct il3945_rx_frame_end end;
1209} __packed; 1198} __packed;
1210 1199
1211#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) 1200#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
1212 1201
1213/* Fixed (non-configurable) rx data from phy */ 1202/* Fixed (non-configurable) rx data from phy */
1214 1203
1215#define IWL49_RX_RES_PHY_CNT 14 1204#define IL49_RX_RES_PHY_CNT 14
1216#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) 1205#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
1217#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) 1206#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
1218#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ 1207#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
1219#define IWL49_AGC_DB_POS (7) 1208#define IL49_AGC_DB_POS (7)
1220struct iwl4965_rx_non_cfg_phy { 1209struct il4965_rx_non_cfg_phy {
1221 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ 1210 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
1222 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ 1211 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
1223 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ 1212 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
1224 u8 pad[0]; 1213 u8 pad[0];
1225} __packed; 1214} __packed;
1226 1215
1227
1228/* 1216/*
1229 * REPLY_RX = 0xc3 (response only, not a command) 1217 * N_RX = 0xc3 (response only, not a command)
1230 * Used only for legacy (non 11n) frames. 1218 * Used only for legacy (non 11n) frames.
1231 */ 1219 */
1232struct iwl_rx_phy_res { 1220struct il_rx_phy_res {
1233 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ 1221 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
1234 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ 1222 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
1235 u8 stat_id; /* configurable DSP phy data set ID */ 1223 u8 stat_id; /* configurable DSP phy data set ID */
1236 u8 reserved1; 1224 u8 reserved1;
1237 __le64 timestamp; /* TSF at on air rise */ 1225 __le64 timestamp; /* TSF at on air rise */
1238 __le32 beacon_time_stamp; /* beacon at on-air rise */ 1226 __le32 beacon_time_stamp; /* beacon at on-air rise */
1239 __le16 phy_flags; /* general phy flags: band, modulation, ... */ 1227 __le16 phy_flags; /* general phy flags: band, modulation, ... */
1240 __le16 channel; /* channel number */ 1228 __le16 channel; /* channel number */
1241 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ 1229 u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
1242 __le32 rate_n_flags; /* RATE_MCS_* */ 1230 __le32 rate_n_flags; /* RATE_MCS_* */
1243 __le16 byte_count; /* frame's byte-count */ 1231 __le16 byte_count; /* frame's byte-count */
1244 __le16 frame_time; /* frame's time on the air */ 1232 __le16 frame_time; /* frame's time on the air */
1245} __packed; 1233} __packed;
1246 1234
1247struct iwl_rx_mpdu_res_start { 1235struct il_rx_mpdu_res_start {
1248 __le16 byte_count; 1236 __le16 byte_count;
1249 __le16 reserved; 1237 __le16 reserved;
1250} __packed; 1238} __packed;
1251 1239
1252
1253/****************************************************************************** 1240/******************************************************************************
1254 * (5) 1241 * (5)
1255 * Tx Commands & Responses: 1242 * Tx Commands & Responses:
1256 * 1243 *
1257 * Driver must place each REPLY_TX command into one of the prioritized Tx 1244 * Driver must place each C_TX command into one of the prioritized Tx
1258 * queues in host DRAM, shared between driver and device (see comments for 1245 * queues in host DRAM, shared between driver and device (see comments for
1259 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode 1246 * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
1260 * are preparing to transmit, the device pulls the Tx command over the PCI 1247 * are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
1264 * uCode handles all timing and protocol related to control frames 1251 * uCode handles all timing and protocol related to control frames
1265 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler 1252 * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
1266 * handle reception of block-acks; uCode updates the host driver via 1253 * handle reception of block-acks; uCode updates the host driver via
1267 * REPLY_COMPRESSED_BA. 1254 * N_COMPRESSED_BA.
1268 * 1255 *
1269 * uCode handles retrying Tx when an ACK is expected but not received. 1256 * uCode handles retrying Tx when an ACK is expected but not received.
1270 * This includes trying lower data rates than the one requested in the Tx 1257 * This includes trying lower data rates than the one requested in the Tx
1271 * command, as set up by the REPLY_RATE_SCALE (for 3945) or 1258 * command, as set up by the C_RATE_SCALE (for 3945) or
1272 * REPLY_TX_LINK_QUALITY_CMD (4965). 1259 * C_TX_LINK_QUALITY_CMD (4965).
1273 * 1260 *
1274 * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. 1261 * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
1275 * This command must be executed after every RXON command, before Tx can occur. 1262 * This command must be executed after every RXON command, before Tx can occur.
1276 *****************************************************************************/ 1263 *****************************************************************************/
1277 1264
1278/* REPLY_TX Tx flags field */ 1265/* C_TX Tx flags field */
1279 1266
1280/* 1267/*
1281 * 1: Use Request-To-Send protocol before this frame. 1268 * 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
1296#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) 1283#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1297 1284
1298/* For 4965 devices: 1285/* For 4965 devices:
1299 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 1286 * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
1300 * Tx command's initial_rate_index indicates first rate to try; 1287 * Tx command's initial_rate_idx indicates first rate to try;
1301 * uCode walks through table for additional Tx attempts. 1288 * uCode walks through table for additional Tx attempts.
1302 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. 1289 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1303 * This rate will be used for all Tx attempts; it will not be scaled. */ 1290 * This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
1322/* 1: uCode overrides sequence control field in MAC header. 1309/* 1: uCode overrides sequence control field in MAC header.
1323 * 0: Driver provides sequence control field in MAC header. 1310 * 0: Driver provides sequence control field in MAC header.
1324 * Set this for management frames, non-QOS data frames, non-unicast frames, 1311 * Set this for management frames, non-QOS data frames, non-unicast frames,
1325 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ 1312 * and also in Tx command embedded in C_SCAN for active scans. */
1326#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) 1313#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1327 1314
1328/* 1: This frame is non-last MPDU; more fragments are coming. 1315/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
1349/* HCCA-AP - disable duration overwriting. */ 1336/* HCCA-AP - disable duration overwriting. */
1350#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) 1337#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1351 1338
1352
1353/* 1339/*
1354 * TX command security control 1340 * TX command security control
1355 */ 1341 */
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
1369#define TKIP_ICV_LEN 4 1355#define TKIP_ICV_LEN 4
1370 1356
1371/* 1357/*
1372 * REPLY_TX = 0x1c (command) 1358 * C_TX = 0x1c (command)
1373 */ 1359 */
1374 1360
1375struct iwl3945_tx_cmd { 1361struct il3945_tx_cmd {
1376 /* 1362 /*
1377 * MPDU byte count: 1363 * MPDU byte count:
1378 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1364 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
1434} __packed; 1420} __packed;
1435 1421
1436/* 1422/*
1437 * REPLY_TX = 0x1c (response) 1423 * C_TX = 0x1c (response)
1438 */ 1424 */
1439struct iwl3945_tx_resp { 1425struct il3945_tx_resp {
1440 u8 failure_rts; 1426 u8 failure_rts;
1441 u8 failure_frame; 1427 u8 failure_frame;
1442 u8 bt_kill_count; 1428 u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
1445 __le32 status; /* TX status */ 1431 __le32 status; /* TX status */
1446} __packed; 1432} __packed;
1447 1433
1448
1449/* 1434/*
1450 * 4965 uCode updates these Tx attempt count values in host DRAM. 1435 * 4965 uCode updates these Tx attempt count values in host DRAM.
1451 * Used for managing Tx retries when expecting block-acks. 1436 * Used for managing Tx retries when expecting block-acks.
1452 * Driver should set these fields to 0. 1437 * Driver should set these fields to 0.
1453 */ 1438 */
1454struct iwl_dram_scratch { 1439struct il_dram_scratch {
1455 u8 try_cnt; /* Tx attempts */ 1440 u8 try_cnt; /* Tx attempts */
1456 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ 1441 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1457 __le16 reserved; 1442 __le16 reserved;
1458} __packed; 1443} __packed;
1459 1444
1460struct iwl_tx_cmd { 1445struct il_tx_cmd {
1461 /* 1446 /*
1462 * MPDU byte count: 1447 * MPDU byte count:
1463 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, 1448 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
1481 1466
1482 /* uCode may modify this field of the Tx command (in host DRAM!). 1467 /* uCode may modify this field of the Tx command (in host DRAM!).
1483 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ 1468 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1484 struct iwl_dram_scratch scratch; 1469 struct il_dram_scratch scratch;
1485 1470
1486 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ 1471 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1487 __le32 rate_n_flags; /* RATE_MCS_* */ 1472 __le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
1493 u8 sec_ctl; /* TX_CMD_SEC_* */ 1478 u8 sec_ctl; /* TX_CMD_SEC_* */
1494 1479
1495 /* 1480 /*
1496 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial 1481 * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
1497 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for 1482 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
1498 * data frames, this field may be used to selectively reduce initial 1483 * data frames, this field may be used to selectively reduce initial
1499 * rate (via non-0 value) for special frames (e.g. management), while 1484 * rate (via non-0 value) for special frames (e.g. management), while
1500 * still supporting rate scaling for all frames. 1485 * still supporting rate scaling for all frames.
1501 */ 1486 */
1502 u8 initial_rate_index; 1487 u8 initial_rate_idx;
1503 u8 reserved; 1488 u8 reserved;
1504 u8 key[16]; 1489 u8 key[16];
1505 __le16 next_frame_flags; 1490 __le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
1628}; 1613};
1629 1614
1630enum { 1615enum {
1631 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ 1616 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1632 TX_STATUS_DELAY_MSK = 0x00000040, 1617 TX_STATUS_DELAY_MSK = 0x00000040,
1633 TX_STATUS_ABORT_MSK = 0x00000080, 1618 TX_STATUS_ABORT_MSK = 0x00000080,
1634 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ 1619 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1635 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ 1620 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1636 TX_RESERVED = 0x00780000, /* bits 19:22 */ 1621 TX_RESERVED = 0x00780000, /* bits 19:22 */
1637 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ 1622 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1638 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1623 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1639}; 1624};
@@ -1671,7 +1656,7 @@ enum {
1671#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 1656#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
1672 1657
1673/* 1658/*
1674 * REPLY_TX = 0x1c (response) 1659 * C_TX = 0x1c (response)
1675 * 1660 *
1676 * This response may be in one of two slightly different formats, indicated 1661 * This response may be in one of two slightly different formats, indicated
1677 * by the frame_count field: 1662 * by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
1697 __le16 sequence; 1682 __le16 sequence;
1698} __packed; 1683} __packed;
1699 1684
1700struct iwl4965_tx_resp { 1685struct il4965_tx_resp {
1701 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1686 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1702 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1687 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
1703 u8 failure_rts; /* # failures due to unsuccessful RTS */ 1688 u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
1730 */ 1715 */
1731 union { 1716 union {
1732 __le32 status; 1717 __le32 status;
1733 struct agg_tx_status agg_status[0]; /* for each agg frame */ 1718 struct agg_tx_status agg_status[0]; /* for each agg frame */
1734 } u; 1719 } u;
1735} __packed; 1720} __packed;
1736 1721
1737/* 1722/*
1738 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1723 * N_COMPRESSED_BA = 0xc5 (response only, not a command)
1739 * 1724 *
1740 * Reports Block-Acknowledge from recipient station 1725 * Reports Block-Acknowledge from recipient station
1741 */ 1726 */
1742struct iwl_compressed_ba_resp { 1727struct il_compressed_ba_resp {
1743 __le32 sta_addr_lo32; 1728 __le32 sta_addr_lo32;
1744 __le16 sta_addr_hi16; 1729 __le16 sta_addr_hi16;
1745 __le16 reserved; 1730 __le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
1754} __packed; 1739} __packed;
1755 1740
1756/* 1741/*
1757 * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) 1742 * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
1758 * 1743 *
1759 * See details under "TXPOWER" in iwl-4965-hw.h. 1744 * See details under "TXPOWER" in 4965.h.
1760 */ 1745 */
1761 1746
1762struct iwl3945_txpowertable_cmd { 1747struct il3945_txpowertable_cmd {
1763 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1748 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1764 u8 reserved; 1749 u8 reserved;
1765 __le16 channel; 1750 __le16 channel;
1766 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 1751 struct il3945_power_per_rate power[IL_MAX_RATES];
1767} __packed; 1752} __packed;
1768 1753
1769struct iwl4965_txpowertable_cmd { 1754struct il4965_txpowertable_cmd {
1770 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ 1755 u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
1771 u8 reserved; 1756 u8 reserved;
1772 __le16 channel; 1757 __le16 channel;
1773 struct iwl4965_tx_power_db tx_power; 1758 struct il4965_tx_power_db tx_power;
1774} __packed; 1759} __packed;
1775 1760
1776
1777/** 1761/**
1778 * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response 1762 * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
1779 * 1763 *
1780 * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) 1764 * C_RATE_SCALE = 0x47 (command, has simple generic response)
1781 * 1765 *
1782 * NOTE: The table of rates passed to the uCode via the 1766 * NOTE: The table of rates passed to the uCode via the
1783 * RATE_SCALE command sets up the corresponding order of 1767 * RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
1786 * 1770 *
1787 * For example, if you set 9MB (PLCP 0x0f) as the first 1771 * For example, if you set 9MB (PLCP 0x0f) as the first
1788 * rate in the rate table, the bit mask for that rate 1772 * rate in the rate table, the bit mask for that rate
1789 * when passed through ofdm_basic_rates on the REPLY_RXON 1773 * when passed through ofdm_basic_rates on the C_RXON
1790 * command would be bit 0 (1 << 0) 1774 * command would be bit 0 (1 << 0)
1791 */ 1775 */
1792struct iwl3945_rate_scaling_info { 1776struct il3945_rate_scaling_info {
1793 __le16 rate_n_flags; 1777 __le16 rate_n_flags;
1794 u8 try_cnt; 1778 u8 try_cnt;
1795 u8 next_rate_index; 1779 u8 next_rate_idx;
1796} __packed; 1780} __packed;
1797 1781
1798struct iwl3945_rate_scaling_cmd { 1782struct il3945_rate_scaling_cmd {
1799 u8 table_id; 1783 u8 table_id;
1800 u8 reserved[3]; 1784 u8 reserved[3];
1801 struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; 1785 struct il3945_rate_scaling_info table[IL_MAX_RATES];
1802} __packed; 1786} __packed;
1803 1787
1804
1805/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ 1788/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
1806#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) 1789#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
1807 1790
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
1816#define LINK_QUAL_ANT_B_MSK (1 << 1) 1799#define LINK_QUAL_ANT_B_MSK (1 << 1)
1817#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) 1800#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
1818 1801
1819
1820/** 1802/**
1821 * struct iwl_link_qual_general_params 1803 * struct il_link_qual_general_params
1822 * 1804 *
1823 * Used in REPLY_TX_LINK_QUALITY_CMD 1805 * Used in C_TX_LINK_QUALITY_CMD
1824 */ 1806 */
1825struct iwl_link_qual_general_params { 1807struct il_link_qual_general_params {
1826 u8 flags; 1808 u8 flags;
1827 1809
1828 /* No entries at or above this (driver chosen) index contain MIMO */ 1810 /* No entries at or above this (driver chosen) idx contain MIMO */
1829 u8 mimo_delimiter; 1811 u8 mimo_delimiter;
1830 1812
1831 /* Best single antenna to use for single stream (legacy, SISO). */ 1813 /* Best single antenna to use for single stream (legacy, SISO). */
1832 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ 1814 u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
1833 1815
1834 /* Best antennas to use for MIMO (unused for 4965, assumes both). */ 1816 /* Best antennas to use for MIMO (unused for 4965, assumes both). */
1835 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ 1817 u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
1836 1818
1837 /* 1819 /*
1838 * If driver needs to use different initial rates for different 1820 * If driver needs to use different initial rates for different
1839 * EDCA QOS access categories (as implemented by tx fifos 0-3), 1821 * EDCA QOS access categories (as implemented by tx fifos 0-3),
1840 * this table will set that up, by indicating the indexes in the 1822 * this table will set that up, by indicating the idxes in the
1841 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. 1823 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
1842 * Otherwise, driver should set all entries to 0. 1824 * Otherwise, driver should set all entries to 0.
1843 * 1825 *
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
1845 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice 1827 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
1846 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. 1828 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
1847 */ 1829 */
1848 u8 start_rate_index[LINK_QUAL_AC_NUM]; 1830 u8 start_rate_idx[LINK_QUAL_AC_NUM];
1849} __packed; 1831} __packed;
1850 1832
1851#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 1833#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
1852#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 1834#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
1853#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) 1835#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
1854 1836
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
1861#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) 1843#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
1862 1844
1863/** 1845/**
1864 * struct iwl_link_qual_agg_params 1846 * struct il_link_qual_agg_params
1865 * 1847 *
1866 * Used in REPLY_TX_LINK_QUALITY_CMD 1848 * Used in C_TX_LINK_QUALITY_CMD
1867 */ 1849 */
1868struct iwl_link_qual_agg_params { 1850struct il_link_qual_agg_params {
1869 1851
1870 /* 1852 /*
1871 *Maximum number of uSec in aggregation. 1853 *Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
1892} __packed; 1874} __packed;
1893 1875
1894/* 1876/*
1895 * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) 1877 * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
1896 * 1878 *
1897 * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. 1879 * For 4965 devices only; 3945 uses C_RATE_SCALE.
1898 * 1880 *
1899 * Each station in the 4965 device's internal station table has its own table 1881 * Each station in the 4965 device's internal station table has its own table
1900 * of 16 1882 * of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
1903 * one station. 1885 * one station.
1904 * 1886 *
1905 * NOTE: Station must already be in 4965 device's station table. 1887 * NOTE: Station must already be in 4965 device's station table.
1906 * Use REPLY_ADD_STA. 1888 * Use C_ADD_STA.
1907 * 1889 *
1908 * The rate scaling procedures described below work well. Of course, other 1890 * The rate scaling procedures described below work well. Of course, other
1909 * procedures are possible, and may work better for particular environments. 1891 * procedures are possible, and may work better for particular environments.
1910 * 1892 *
1911 * 1893 *
1912 * FILLING THE RATE TABLE 1894 * FILLING THE RATE TBL
1913 * 1895 *
1914 * Given a particular initial rate and mode, as determined by the rate 1896 * Given a particular initial rate and mode, as determined by the rate
1915 * scaling algorithm described below, the Linux driver uses the following 1897 * scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
1948 * speculative mode as the new current active mode. 1930 * speculative mode as the new current active mode.
1949 * 1931 *
1950 * Each history set contains, separately for each possible rate, data for a 1932 * Each history set contains, separately for each possible rate, data for a
1951 * sliding window of the 62 most recent tx attempts at that rate. The data 1933 * sliding win of the 62 most recent tx attempts at that rate. The data
1952 * includes a shifting bitmap of success(1)/failure(0), and sums of successful 1934 * includes a shifting bitmap of success(1)/failure(0), and sums of successful
1953 * and attempted frames, from which the driver can additionally calculate a 1935 * and attempted frames, from which the driver can additionally calculate a
1954 * success ratio (success / attempted) and number of failures 1936 * success ratio (success / attempted) and number of failures
1955 * (attempted - success), and control the size of the window (attempted). 1937 * (attempted - success), and control the size of the win (attempted).
1956 * The driver uses the bit map to remove successes from the success sum, as 1938 * The driver uses the bit map to remove successes from the success sum, as
1957 * the oldest tx attempts fall out of the window. 1939 * the oldest tx attempts fall out of the win.
1958 * 1940 *
1959 * When the 4965 device makes multiple tx attempts for a given frame, each 1941 * When the 4965 device makes multiple tx attempts for a given frame, each
1960 * attempt might be at a different rate, and have different modulation 1942 * attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
1966 * 1948 *
1967 * When using block-ack (aggregation), all frames are transmitted at the same 1949 * When using block-ack (aggregation), all frames are transmitted at the same
1968 * rate, since there is no per-attempt acknowledgment from the destination 1950 * rate, since there is no per-attempt acknowledgment from the destination
1969 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in 1951 * station. The Tx response struct il_tx_resp indicates the Tx rate in
1970 * rate_n_flags field. After receiving a block-ack, the driver can update 1952 * rate_n_flags field. After receiving a block-ack, the driver can update
1971 * history for the entire block all at once. 1953 * history for the entire block all at once.
1972 * 1954 *
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
2016 * good performance; higher rate is sure to have poorer success. 1998 * good performance; higher rate is sure to have poorer success.
2017 * 1999 *
2018 * 6) Re-evaluate the rate after each tx frame. If working with block- 2000 * 6) Re-evaluate the rate after each tx frame. If working with block-
2019 * acknowledge, history and statistics may be calculated for the entire 2001 * acknowledge, history and stats may be calculated for the entire
2020 * block (including prior history that fits within the history windows), 2002 * block (including prior history that fits within the history wins),
2021 * before re-evaluation. 2003 * before re-evaluation.
2022 * 2004 *
2023 * FINDING BEST STARTING MODULATION MODE: 2005 * FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
2079 * legacy), and then repeat the search process. 2061 * legacy), and then repeat the search process.
2080 * 2062 *
2081 */ 2063 */
2082struct iwl_link_quality_cmd { 2064struct il_link_quality_cmd {
2083 2065
2084 /* Index of destination/recipient station in uCode's station table */ 2066 /* Index of destination/recipient station in uCode's station table */
2085 u8 sta_id; 2067 u8 sta_id;
2086 u8 reserved1; 2068 u8 reserved1;
2087 __le16 control; /* not used */ 2069 __le16 control; /* not used */
2088 struct iwl_link_qual_general_params general_params; 2070 struct il_link_qual_general_params general_params;
2089 struct iwl_link_qual_agg_params agg_params; 2071 struct il_link_qual_agg_params agg_params;
2090 2072
2091 /* 2073 /*
2092 * Rate info; when using rate-scaling, Tx command's initial_rate_index 2074 * Rate info; when using rate-scaling, Tx command's initial_rate_idx
2093 * specifies 1st Tx rate attempted, via index into this table. 2075 * specifies 1st Tx rate attempted, via idx into this table.
2094 * 4965 devices works its way through table when retrying Tx. 2076 * 4965 devices works its way through table when retrying Tx.
2095 */ 2077 */
2096 struct { 2078 struct {
2097 __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ 2079 __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
2098 } rs_table[LINK_QUAL_MAX_RETRY_NUM]; 2080 } rs_table[LINK_QUAL_MAX_RETRY_NUM];
2099 __le32 reserved2; 2081 __le32 reserved2;
2100} __packed; 2082} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
2117#define BT_MAX_KILL_DEF (0x5) 2099#define BT_MAX_KILL_DEF (0x5)
2118 2100
2119/* 2101/*
2120 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2102 * C_BT_CONFIG = 0x9b (command, has simple generic response)
2121 * 2103 *
2122 * 3945 and 4965 devices support hardware handshake with Bluetooth device on 2104 * 3945 and 4965 devices support hardware handshake with Bluetooth device on
2123 * same platform. Bluetooth device alerts wireless device when it will Tx; 2105 * same platform. Bluetooth device alerts wireless device when it will Tx;
2124 * wireless device can delay or kill its own Tx to accommodate. 2106 * wireless device can delay or kill its own Tx to accommodate.
2125 */ 2107 */
2126struct iwl_bt_cmd { 2108struct il_bt_cmd {
2127 u8 flags; 2109 u8 flags;
2128 u8 lead_time; 2110 u8 lead_time;
2129 u8 max_kill; 2111 u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
2132 __le32 kill_cts_mask; 2114 __le32 kill_cts_mask;
2133} __packed; 2115} __packed;
2134 2116
2135
2136/****************************************************************************** 2117/******************************************************************************
2137 * (6) 2118 * (6)
2138 * Spectrum Management (802.11h) Commands, Responses, Notifications: 2119 * Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
2150 RXON_FILTER_ASSOC_MSK | \ 2131 RXON_FILTER_ASSOC_MSK | \
2151 RXON_FILTER_BCON_AWARE_MSK) 2132 RXON_FILTER_BCON_AWARE_MSK)
2152 2133
2153struct iwl_measure_channel { 2134struct il_measure_channel {
2154 __le32 duration; /* measurement duration in extended beacon 2135 __le32 duration; /* measurement duration in extended beacon
2155 * format */ 2136 * format */
2156 u8 channel; /* channel to measure */ 2137 u8 channel; /* channel to measure */
2157 u8 type; /* see enum iwl_measure_type */ 2138 u8 type; /* see enum il_measure_type */
2158 __le16 reserved; 2139 __le16 reserved;
2159} __packed; 2140} __packed;
2160 2141
2161/* 2142/*
2162 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) 2143 * C_SPECTRUM_MEASUREMENT = 0x74 (command)
2163 */ 2144 */
2164struct iwl_spectrum_cmd { 2145struct il_spectrum_cmd {
2165 __le16 len; /* number of bytes starting from token */ 2146 __le16 len; /* number of bytes starting from token */
2166 u8 token; /* token id */ 2147 u8 token; /* token id */
2167 u8 id; /* measurement id -- 0 or 1 */ 2148 u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
2174 __le32 filter_flags; /* rxon filter flags */ 2155 __le32 filter_flags; /* rxon filter flags */
2175 __le16 channel_count; /* minimum 1, maximum 10 */ 2156 __le16 channel_count; /* minimum 1, maximum 10 */
2176 __le16 reserved3; 2157 __le16 reserved3;
2177 struct iwl_measure_channel channels[10]; 2158 struct il_measure_channel channels[10];
2178} __packed; 2159} __packed;
2179 2160
2180/* 2161/*
2181 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) 2162 * C_SPECTRUM_MEASUREMENT = 0x74 (response)
2182 */ 2163 */
2183struct iwl_spectrum_resp { 2164struct il_spectrum_resp {
2184 u8 token; 2165 u8 token;
2185 u8 id; /* id of the prior command replaced, or 0xff */ 2166 u8 id; /* id of the prior command replaced, or 0xff */
2186 __le16 status; /* 0 - command will be handled 2167 __le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
2188 * measurement) */ 2169 * measurement) */
2189} __packed; 2170} __packed;
2190 2171
2191enum iwl_measurement_state { 2172enum il_measurement_state {
2192 IWL_MEASUREMENT_START = 0, 2173 IL_MEASUREMENT_START = 0,
2193 IWL_MEASUREMENT_STOP = 1, 2174 IL_MEASUREMENT_STOP = 1,
2194}; 2175};
2195 2176
2196enum iwl_measurement_status { 2177enum il_measurement_status {
2197 IWL_MEASUREMENT_OK = 0, 2178 IL_MEASUREMENT_OK = 0,
2198 IWL_MEASUREMENT_CONCURRENT = 1, 2179 IL_MEASUREMENT_CONCURRENT = 1,
2199 IWL_MEASUREMENT_CSA_CONFLICT = 2, 2180 IL_MEASUREMENT_CSA_CONFLICT = 2,
2200 IWL_MEASUREMENT_TGH_CONFLICT = 3, 2181 IL_MEASUREMENT_TGH_CONFLICT = 3,
2201 /* 4-5 reserved */ 2182 /* 4-5 reserved */
2202 IWL_MEASUREMENT_STOPPED = 6, 2183 IL_MEASUREMENT_STOPPED = 6,
2203 IWL_MEASUREMENT_TIMEOUT = 7, 2184 IL_MEASUREMENT_TIMEOUT = 7,
2204 IWL_MEASUREMENT_PERIODIC_FAILED = 8, 2185 IL_MEASUREMENT_PERIODIC_FAILED = 8,
2205}; 2186};
2206 2187
2207#define NUM_ELEMENTS_IN_HISTOGRAM 8 2188#define NUM_ELEMENTS_IN_HISTOGRAM 8
2208 2189
2209struct iwl_measurement_histogram { 2190struct il_measurement_histogram {
2210 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ 2191 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
2211 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ 2192 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
2212} __packed; 2193} __packed;
2213 2194
2214/* clear channel availability counters */ 2195/* clear channel availability counters */
2215struct iwl_measurement_cca_counters { 2196struct il_measurement_cca_counters {
2216 __le32 ofdm; 2197 __le32 ofdm;
2217 __le32 cck; 2198 __le32 cck;
2218} __packed; 2199} __packed;
2219 2200
2220enum iwl_measure_type { 2201enum il_measure_type {
2221 IWL_MEASURE_BASIC = (1 << 0), 2202 IL_MEASURE_BASIC = (1 << 0),
2222 IWL_MEASURE_CHANNEL_LOAD = (1 << 1), 2203 IL_MEASURE_CHANNEL_LOAD = (1 << 1),
2223 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), 2204 IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
2224 IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), 2205 IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
2225 IWL_MEASURE_FRAME = (1 << 4), 2206 IL_MEASURE_FRAME = (1 << 4),
2226 /* bits 5:6 are reserved */ 2207 /* bits 5:6 are reserved */
2227 IWL_MEASURE_IDLE = (1 << 7), 2208 IL_MEASURE_IDLE = (1 << 7),
2228}; 2209};
2229 2210
2230/* 2211/*
2231 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) 2212 * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
2232 */ 2213 */
2233struct iwl_spectrum_notification { 2214struct il_spectrum_notification {
2234 u8 id; /* measurement id -- 0 or 1 */ 2215 u8 id; /* measurement id -- 0 or 1 */
2235 u8 token; 2216 u8 token;
2236 u8 channel_index; /* index in measurement channel list */ 2217 u8 channel_idx; /* idx in measurement channel list */
2237 u8 state; /* 0 - start, 1 - stop */ 2218 u8 state; /* 0 - start, 1 - stop */
2238 __le32 start_time; /* lower 32-bits of TSF */ 2219 __le32 start_time; /* lower 32-bits of TSF */
2239 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ 2220 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
2240 u8 channel; 2221 u8 channel;
2241 u8 type; /* see enum iwl_measurement_type */ 2222 u8 type; /* see enum il_measurement_type */
2242 u8 reserved1; 2223 u8 reserved1;
2243 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only 2224 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2244 * valid if applicable for measurement type requested. */ 2225 * valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
2248 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - 2229 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2249 * unidentified */ 2230 * unidentified */
2250 u8 reserved2[3]; 2231 u8 reserved2[3];
2251 struct iwl_measurement_histogram histogram; 2232 struct il_measurement_histogram histogram;
2252 __le32 stop_time; /* lower 32-bits of TSF */ 2233 __le32 stop_time; /* lower 32-bits of TSF */
2253 __le32 status; /* see iwl_measurement_status */ 2234 __le32 status; /* see il_measurement_status */
2254} __packed; 2235} __packed;
2255 2236
2256/****************************************************************************** 2237/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
2260 *****************************************************************************/ 2241 *****************************************************************************/
2261 2242
2262/** 2243/**
2263 * struct iwl_powertable_cmd - Power Table Command 2244 * struct il_powertable_cmd - Power Table Command
2264 * @flags: See below: 2245 * @flags: See below:
2265 * 2246 *
2266 * POWER_TABLE_CMD = 0x77 (command, has simple generic response) 2247 * C_POWER_TBL = 0x77 (command, has simple generic response)
2267 * 2248 *
2268 * PM allow: 2249 * PM allow:
2269 * bit 0 - '0' Driver not allow power management 2250 * bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
2290 * '10' force xtal sleep 2271 * '10' force xtal sleep
2291 * '11' Illegal set 2272 * '11' Illegal set
2292 * 2273 *
2293 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then 2274 * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
2294 * ucode assume sleep over DTIM is allowed and we don't need to wake up 2275 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2295 * for every DTIM. 2276 * for every DTIM.
2296 */ 2277 */
2297#define IWL_POWER_VEC_SIZE 5 2278#define IL_POWER_VEC_SIZE 5
2298 2279
2299#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) 2280#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2300#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) 2281#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2301 2282
2302struct iwl3945_powertable_cmd { 2283struct il3945_powertable_cmd {
2303 __le16 flags; 2284 __le16 flags;
2304 u8 reserved[2]; 2285 u8 reserved[2];
2305 __le32 rx_data_timeout; 2286 __le32 rx_data_timeout;
2306 __le32 tx_data_timeout; 2287 __le32 tx_data_timeout;
2307 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2288 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2308} __packed; 2289} __packed;
2309 2290
2310struct iwl_powertable_cmd { 2291struct il_powertable_cmd {
2311 __le16 flags; 2292 __le16 flags;
2312 u8 keep_alive_seconds; /* 3945 reserved */ 2293 u8 keep_alive_seconds; /* 3945 reserved */
2313 u8 debug_flags; /* 3945 reserved */ 2294 u8 debug_flags; /* 3945 reserved */
2314 __le32 rx_data_timeout; 2295 __le32 rx_data_timeout;
2315 __le32 tx_data_timeout; 2296 __le32 tx_data_timeout;
2316 __le32 sleep_interval[IWL_POWER_VEC_SIZE]; 2297 __le32 sleep_interval[IL_POWER_VEC_SIZE];
2317 __le32 keep_alive_beacons; 2298 __le32 keep_alive_beacons;
2318} __packed; 2299} __packed;
2319 2300
2320/* 2301/*
2321 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2302 * N_PM_SLEEP = 0x7A (notification only, not a command)
2322 * all devices identical. 2303 * all devices identical.
2323 */ 2304 */
2324struct iwl_sleep_notification { 2305struct il_sleep_notification {
2325 u8 pm_sleep_mode; 2306 u8 pm_sleep_mode;
2326 u8 pm_wakeup_src; 2307 u8 pm_wakeup_src;
2327 __le16 reserved; 2308 __le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
2332 2313
2333/* Sleep states. all devices identical. */ 2314/* Sleep states. all devices identical. */
2334enum { 2315enum {
2335 IWL_PM_NO_SLEEP = 0, 2316 IL_PM_NO_SLEEP = 0,
2336 IWL_PM_SLP_MAC = 1, 2317 IL_PM_SLP_MAC = 1,
2337 IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, 2318 IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
2338 IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, 2319 IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
2339 IWL_PM_SLP_PHY = 4, 2320 IL_PM_SLP_PHY = 4,
2340 IWL_PM_SLP_REPENT = 5, 2321 IL_PM_SLP_REPENT = 5,
2341 IWL_PM_WAKEUP_BY_TIMER = 6, 2322 IL_PM_WAKEUP_BY_TIMER = 6,
2342 IWL_PM_WAKEUP_BY_DRIVER = 7, 2323 IL_PM_WAKEUP_BY_DRIVER = 7,
2343 IWL_PM_WAKEUP_BY_RFKILL = 8, 2324 IL_PM_WAKEUP_BY_RFKILL = 8,
2344 /* 3 reserved */ 2325 /* 3 reserved */
2345 IWL_PM_NUM_OF_MODES = 12, 2326 IL_PM_NUM_OF_MODES = 12,
2346}; 2327};
2347 2328
2348/* 2329/*
2349 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) 2330 * N_CARD_STATE = 0xa1 (notification only, not a command)
2350 */ 2331 */
2351struct iwl_card_state_notif { 2332struct il_card_state_notif {
2352 __le32 flags; 2333 __le32 flags;
2353} __packed; 2334} __packed;
2354 2335
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
2357#define CT_CARD_DISABLED 0x04 2338#define CT_CARD_DISABLED 0x04
2358#define RXON_CARD_DISABLED 0x10 2339#define RXON_CARD_DISABLED 0x10
2359 2340
2360struct iwl_ct_kill_config { 2341struct il_ct_kill_config {
2361 __le32 reserved; 2342 __le32 reserved;
2362 __le32 critical_temperature_M; 2343 __le32 critical_temperature_M;
2363 __le32 critical_temperature_R; 2344 __le32 critical_temperature_R;
2364} __packed; 2345} __packed;
2365 2346
2366/****************************************************************************** 2347/******************************************************************************
2367 * (8) 2348 * (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
2373#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) 2354#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2374 2355
2375/** 2356/**
2376 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table 2357 * struct il_scan_channel - entry in C_SCAN channel table
2377 * 2358 *
2378 * One for each channel in the scan list. 2359 * One for each channel in the scan list.
2379 * Each channel can independently select: 2360 * Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
2383 * quiet_plcp_th, good_CRC_th) 2364 * quiet_plcp_th, good_CRC_th)
2384 * 2365 *
2385 * To avoid uCode errors, make sure the following are true (see comments 2366 * To avoid uCode errors, make sure the following are true (see comments
2386 * under struct iwl_scan_cmd about max_out_time and quiet_time): 2367 * under struct il_scan_cmd about max_out_time and quiet_time):
2387 * 1) If using passive_dwell (i.e. passive_dwell != 0): 2368 * 1) If using passive_dwell (i.e. passive_dwell != 0):
2388 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) 2369 * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
2389 * 2) quiet_time <= active_dwell 2370 * 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
2391 * passive_dwell < max_out_time 2372 * passive_dwell < max_out_time
2392 * active_dwell < max_out_time 2373 * active_dwell < max_out_time
2393 */ 2374 */
2394struct iwl3945_scan_channel { 2375struct il3945_scan_channel {
2395 /* 2376 /*
2396 * type is defined as: 2377 * type is defined as:
2397 * 0:0 1 = active, 0 = passive 2378 * 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
2400 * 5:7 reserved 2381 * 5:7 reserved
2401 */ 2382 */
2402 u8 type; 2383 u8 type;
2403 u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ 2384 u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
2404 struct iwl3945_tx_power tpc; 2385 struct il3945_tx_power tpc;
2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2386 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
2406 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ 2387 __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
2407} __packed; 2388} __packed;
2408 2389
2409/* set number of direct probes u8 type */ 2390/* set number of direct probes u8 type */
2410#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) 2391#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
2411 2392
2412struct iwl_scan_channel { 2393struct il_scan_channel {
2413 /* 2394 /*
2414 * type is defined as: 2395 * type is defined as:
2415 * 0:0 1 = active, 0 = passive 2396 * 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
2418 * 21:31 reserved 2399 * 21:31 reserved
2419 */ 2400 */
2420 __le32 type; 2401 __le32 type;
2421 __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ 2402 __le16 channel; /* band is selected by il_scan_cmd "flags" field */
2422 u8 tx_gain; /* gain for analog radio */ 2403 u8 tx_gain; /* gain for analog radio */
2423 u8 dsp_atten; /* gain for DSP */ 2404 u8 dsp_atten; /* gain for DSP */
2424 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ 2405 __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
2426} __packed; 2407} __packed;
2427 2408
2428/* set number of direct probes __le32 type */ 2409/* set number of direct probes __le32 type */
2429#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 2410#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
2430 2411
2431/** 2412/**
2432 * struct iwl_ssid_ie - directed scan network information element 2413 * struct il_ssid_ie - directed scan network information element
2433 * 2414 *
2434 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in 2415 * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
2435 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; 2416 * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
2436 * each channel may select different ssids from among the 20 (4) entries. 2417 * each channel may select different ssids from among the 20 (4) entries.
2437 * SSID IEs get transmitted in reverse order of entry. 2418 * SSID IEs get transmitted in reverse order of entry.
2438 */ 2419 */
2439struct iwl_ssid_ie { 2420struct il_ssid_ie {
2440 u8 id; 2421 u8 id;
2441 u8 len; 2422 u8 len;
2442 u8 ssid[32]; 2423 u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
2445#define PROBE_OPTION_MAX_3945 4 2426#define PROBE_OPTION_MAX_3945 4
2446#define PROBE_OPTION_MAX 20 2427#define PROBE_OPTION_MAX 20
2447#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2428#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2448#define IWL_GOOD_CRC_TH_DISABLED 0 2429#define IL_GOOD_CRC_TH_DISABLED 0
2449#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) 2430#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2450#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) 2431#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2451#define IWL_MAX_SCAN_SIZE 1024 2432#define IL_MAX_SCAN_SIZE 1024
2452#define IWL_MAX_CMD_SIZE 4096 2433#define IL_MAX_CMD_SIZE 4096
2453 2434
2454/* 2435/*
2455 * REPLY_SCAN_CMD = 0x80 (command) 2436 * C_SCAN = 0x80 (command)
2456 * 2437 *
2457 * The hardware scan command is very powerful; the driver can set it up to 2438 * The hardware scan command is very powerful; the driver can set it up to
2458 * maintain (relatively) normal network traffic while doing a scan in the 2439 * maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
2501 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. 2482 * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
2502 * 2483 *
2503 * To avoid uCode errors, see timing restrictions described under 2484 * To avoid uCode errors, see timing restrictions described under
2504 * struct iwl_scan_channel. 2485 * struct il_scan_channel.
2505 */ 2486 */
2506 2487
2507struct iwl3945_scan_cmd { 2488struct il3945_scan_cmd {
2508 __le16 len; 2489 __le16 len;
2509 u8 reserved0; 2490 u8 reserved0;
2510 u8 channel_count; /* # channels in channel list */ 2491 u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
2525 2506
2526 /* For active scans (set to all-0s for passive scans). 2507 /* For active scans (set to all-0s for passive scans).
2527 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2508 * Does not include payload. Must specify Tx rate; no rate scaling. */
2528 struct iwl3945_tx_cmd tx_cmd; 2509 struct il3945_tx_cmd tx_cmd;
2529 2510
2530 /* For directed active scans (set to all-0s otherwise) */ 2511 /* For directed active scans (set to all-0s otherwise) */
2531 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; 2512 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
2532 2513
2533 /* 2514 /*
2534 * Probe request frame, followed by channel list. 2515 * Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
2538 * Number of channels in list is specified by channel_count. 2519 * Number of channels in list is specified by channel_count.
2539 * Each channel in list is of type: 2520 * Each channel in list is of type:
2540 * 2521 *
2541 * struct iwl3945_scan_channel channels[0]; 2522 * struct il3945_scan_channel channels[0];
2542 * 2523 *
2543 * NOTE: Only one band of channels can be scanned per pass. You 2524 * NOTE: Only one band of channels can be scanned per pass. You
2544 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2525 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2545 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) 2526 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2546 * before requesting another scan. 2527 * before requesting another scan.
2547 */ 2528 */
2548 u8 data[0]; 2529 u8 data[0];
2549} __packed; 2530} __packed;
2550 2531
2551struct iwl_scan_cmd { 2532struct il_scan_cmd {
2552 __le16 len; 2533 __le16 len;
2553 u8 reserved0; 2534 u8 reserved0;
2554 u8 channel_count; /* # channels in channel list */ 2535 u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
2569 2550
2570 /* For active scans (set to all-0s for passive scans). 2551 /* For active scans (set to all-0s for passive scans).
2571 * Does not include payload. Must specify Tx rate; no rate scaling. */ 2552 * Does not include payload. Must specify Tx rate; no rate scaling. */
2572 struct iwl_tx_cmd tx_cmd; 2553 struct il_tx_cmd tx_cmd;
2573 2554
2574 /* For directed active scans (set to all-0s otherwise) */ 2555 /* For directed active scans (set to all-0s otherwise) */
2575 struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; 2556 struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
2576 2557
2577 /* 2558 /*
2578 * Probe request frame, followed by channel list. 2559 * Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
2582 * Number of channels in list is specified by channel_count. 2563 * Number of channels in list is specified by channel_count.
2583 * Each channel in list is of type: 2564 * Each channel in list is of type:
2584 * 2565 *
2585 * struct iwl_scan_channel channels[0]; 2566 * struct il_scan_channel channels[0];
2586 * 2567 *
2587 * NOTE: Only one band of channels can be scanned per pass. You 2568 * NOTE: Only one band of channels can be scanned per pass. You
2588 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2569 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
2589 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) 2570 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
2590 * before requesting another scan. 2571 * before requesting another scan.
2591 */ 2572 */
2592 u8 data[0]; 2573 u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
2598#define ABORT_STATUS 0x2 2579#define ABORT_STATUS 0x2
2599 2580
2600/* 2581/*
2601 * REPLY_SCAN_CMD = 0x80 (response) 2582 * C_SCAN = 0x80 (response)
2602 */ 2583 */
2603struct iwl_scanreq_notification { 2584struct il_scanreq_notification {
2604 __le32 status; /* 1: okay, 2: cannot fulfill request */ 2585 __le32 status; /* 1: okay, 2: cannot fulfill request */
2605} __packed; 2586} __packed;
2606 2587
2607/* 2588/*
2608 * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) 2589 * N_SCAN_START = 0x82 (notification only, not a command)
2609 */ 2590 */
2610struct iwl_scanstart_notification { 2591struct il_scanstart_notification {
2611 __le32 tsf_low; 2592 __le32 tsf_low;
2612 __le32 tsf_high; 2593 __le32 tsf_high;
2613 __le32 beacon_timer; 2594 __le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
2620#define SCAN_OWNER_STATUS 0x1 2601#define SCAN_OWNER_STATUS 0x1
2621#define MEASURE_OWNER_STATUS 0x2 2602#define MEASURE_OWNER_STATUS 0x2
2622 2603
2623#define IWL_PROBE_STATUS_OK 0 2604#define IL_PROBE_STATUS_OK 0
2624#define IWL_PROBE_STATUS_TX_FAILED BIT(0) 2605#define IL_PROBE_STATUS_TX_FAILED BIT(0)
2625/* error statuses combined with TX_FAILED */ 2606/* error statuses combined with TX_FAILED */
2626#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) 2607#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
2627#define IWL_PROBE_STATUS_FAIL_BT BIT(2) 2608#define IL_PROBE_STATUS_FAIL_BT BIT(2)
2628 2609
2629#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ 2610#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
2630/* 2611/*
2631 * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) 2612 * N_SCAN_RESULTS = 0x83 (notification only, not a command)
2632 */ 2613 */
2633struct iwl_scanresults_notification { 2614struct il_scanresults_notification {
2634 u8 channel; 2615 u8 channel;
2635 u8 band; 2616 u8 band;
2636 u8 probe_status; 2617 u8 probe_status;
2637 u8 num_probe_not_sent; /* not enough time to send */ 2618 u8 num_probe_not_sent; /* not enough time to send */
2638 __le32 tsf_low; 2619 __le32 tsf_low;
2639 __le32 tsf_high; 2620 __le32 tsf_high;
2640 __le32 statistics[NUMBER_OF_STATISTICS]; 2621 __le32 stats[NUMBER_OF_STATS];
2641} __packed; 2622} __packed;
2642 2623
2643/* 2624/*
2644 * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) 2625 * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
2645 */ 2626 */
2646struct iwl_scancomplete_notification { 2627struct il_scancomplete_notification {
2647 u8 scanned_channels; 2628 u8 scanned_channels;
2648 u8 status; 2629 u8 status;
2649 u8 last_channel; 2630 u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
2651 __le32 tsf_high; 2632 __le32 tsf_high;
2652} __packed; 2633} __packed;
2653 2634
2654
2655/****************************************************************************** 2635/******************************************************************************
2656 * (9) 2636 * (9)
2657 * IBSS/AP Commands and Notifications: 2637 * IBSS/AP Commands and Notifications:
2658 * 2638 *
2659 *****************************************************************************/ 2639 *****************************************************************************/
2660 2640
2661enum iwl_ibss_manager { 2641enum il_ibss_manager {
2662 IWL_NOT_IBSS_MANAGER = 0, 2642 IL_NOT_IBSS_MANAGER = 0,
2663 IWL_IBSS_MANAGER = 1, 2643 IL_IBSS_MANAGER = 1,
2664}; 2644};
2665 2645
2666/* 2646/*
2667 * BEACON_NOTIFICATION = 0x90 (notification only, not a command) 2647 * N_BEACON = 0x90 (notification only, not a command)
2668 */ 2648 */
2669 2649
2670struct iwl3945_beacon_notif { 2650struct il3945_beacon_notif {
2671 struct iwl3945_tx_resp beacon_notify_hdr; 2651 struct il3945_tx_resp beacon_notify_hdr;
2672 __le32 low_tsf; 2652 __le32 low_tsf;
2673 __le32 high_tsf; 2653 __le32 high_tsf;
2674 __le32 ibss_mgr_status; 2654 __le32 ibss_mgr_status;
2675} __packed; 2655} __packed;
2676 2656
2677struct iwl4965_beacon_notif { 2657struct il4965_beacon_notif {
2678 struct iwl4965_tx_resp beacon_notify_hdr; 2658 struct il4965_tx_resp beacon_notify_hdr;
2679 __le32 low_tsf; 2659 __le32 low_tsf;
2680 __le32 high_tsf; 2660 __le32 high_tsf;
2681 __le32 ibss_mgr_status; 2661 __le32 ibss_mgr_status;
2682} __packed; 2662} __packed;
2683 2663
2684/* 2664/*
2685 * REPLY_TX_BEACON = 0x91 (command, has simple generic response) 2665 * C_TX_BEACON= 0x91 (command, has simple generic response)
2686 */ 2666 */
2687 2667
2688struct iwl3945_tx_beacon_cmd { 2668struct il3945_tx_beacon_cmd {
2689 struct iwl3945_tx_cmd tx; 2669 struct il3945_tx_cmd tx;
2690 __le16 tim_idx; 2670 __le16 tim_idx;
2691 u8 tim_size; 2671 u8 tim_size;
2692 u8 reserved1; 2672 u8 reserved1;
2693 struct ieee80211_hdr frame[0]; /* beacon frame */ 2673 struct ieee80211_hdr frame[0]; /* beacon frame */
2694} __packed; 2674} __packed;
2695 2675
2696struct iwl_tx_beacon_cmd { 2676struct il_tx_beacon_cmd {
2697 struct iwl_tx_cmd tx; 2677 struct il_tx_cmd tx;
2698 __le16 tim_idx; 2678 __le16 tim_idx;
2699 u8 tim_size; 2679 u8 tim_size;
2700 u8 reserved1; 2680 u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
2707 * 2687 *
2708 *****************************************************************************/ 2688 *****************************************************************************/
2709 2689
2710#define IWL_TEMP_CONVERT 260 2690#define IL_TEMP_CONVERT 260
2711 2691
2712#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 2692#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
2713#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 2693#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
2727 } failed; 2707 } failed;
2728} __packed; 2708} __packed;
2729 2709
2730/* statistics command response */ 2710/* stats command response */
2731 2711
2732struct iwl39_statistics_rx_phy { 2712struct iwl39_stats_rx_phy {
2733 __le32 ina_cnt; 2713 __le32 ina_cnt;
2734 __le32 fina_cnt; 2714 __le32 fina_cnt;
2735 __le32 plcp_err; 2715 __le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
2747 __le32 sent_cts_cnt; 2727 __le32 sent_cts_cnt;
2748} __packed; 2728} __packed;
2749 2729
2750struct iwl39_statistics_rx_non_phy { 2730struct iwl39_stats_rx_non_phy {
2751 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2731 __le32 bogus_cts; /* CTS received when not expecting CTS */
2752 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2732 __le32 bogus_ack; /* ACK received when not expecting ACK */
2753 __le32 non_bssid_frames; /* number of frames with BSSID that 2733 __le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
2758 * our serving channel */ 2738 * our serving channel */
2759} __packed; 2739} __packed;
2760 2740
2761struct iwl39_statistics_rx { 2741struct iwl39_stats_rx {
2762 struct iwl39_statistics_rx_phy ofdm; 2742 struct iwl39_stats_rx_phy ofdm;
2763 struct iwl39_statistics_rx_phy cck; 2743 struct iwl39_stats_rx_phy cck;
2764 struct iwl39_statistics_rx_non_phy general; 2744 struct iwl39_stats_rx_non_phy general;
2765} __packed; 2745} __packed;
2766 2746
2767struct iwl39_statistics_tx { 2747struct iwl39_stats_tx {
2768 __le32 preamble_cnt; 2748 __le32 preamble_cnt;
2769 __le32 rx_detected_cnt; 2749 __le32 rx_detected_cnt;
2770 __le32 bt_prio_defer_cnt; 2750 __le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
2776 __le32 actual_ack_cnt; 2756 __le32 actual_ack_cnt;
2777} __packed; 2757} __packed;
2778 2758
2779struct statistics_dbg { 2759struct stats_dbg {
2780 __le32 burst_check; 2760 __le32 burst_check;
2781 __le32 burst_count; 2761 __le32 burst_count;
2782 __le32 wait_for_silence_timeout_cnt; 2762 __le32 wait_for_silence_timeout_cnt;
2783 __le32 reserved[3]; 2763 __le32 reserved[3];
2784} __packed; 2764} __packed;
2785 2765
2786struct iwl39_statistics_div { 2766struct iwl39_stats_div {
2787 __le32 tx_on_a; 2767 __le32 tx_on_a;
2788 __le32 tx_on_b; 2768 __le32 tx_on_b;
2789 __le32 exec_time; 2769 __le32 exec_time;
2790 __le32 probe_time; 2770 __le32 probe_time;
2791} __packed; 2771} __packed;
2792 2772
2793struct iwl39_statistics_general { 2773struct iwl39_stats_general {
2794 __le32 temperature; 2774 __le32 temperature;
2795 struct statistics_dbg dbg; 2775 struct stats_dbg dbg;
2796 __le32 sleep_time; 2776 __le32 sleep_time;
2797 __le32 slots_out; 2777 __le32 slots_out;
2798 __le32 slots_idle; 2778 __le32 slots_idle;
2799 __le32 ttl_timestamp; 2779 __le32 ttl_timestamp;
2800 struct iwl39_statistics_div div; 2780 struct iwl39_stats_div div;
2801} __packed; 2781} __packed;
2802 2782
2803struct statistics_rx_phy { 2783struct stats_rx_phy {
2804 __le32 ina_cnt; 2784 __le32 ina_cnt;
2805 __le32 fina_cnt; 2785 __le32 fina_cnt;
2806 __le32 plcp_err; 2786 __le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
2823 __le32 reserved3; 2803 __le32 reserved3;
2824} __packed; 2804} __packed;
2825 2805
2826struct statistics_rx_ht_phy { 2806struct stats_rx_ht_phy {
2827 __le32 plcp_err; 2807 __le32 plcp_err;
2828 __le32 overrun_err; 2808 __le32 overrun_err;
2829 __le32 early_overrun_err; 2809 __le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
2838 2818
2839#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) 2819#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
2840 2820
2841struct statistics_rx_non_phy { 2821struct stats_rx_non_phy {
2842 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2822 __le32 bogus_cts; /* CTS received when not expecting CTS */
2843 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2823 __le32 bogus_ack; /* ACK received when not expecting ACK */
2844 __le32 non_bssid_frames; /* number of frames with BSSID that 2824 __le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
2852 __le32 num_missed_bcon; /* number of missed beacons */ 2832 __le32 num_missed_bcon; /* number of missed beacons */
2853 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the 2833 __le32 adc_rx_saturation_time; /* count in 0.8us units the time the
2854 * ADC was in saturation */ 2834 * ADC was in saturation */
2855 __le32 ina_detection_search_time;/* total time (in 0.8us) searched 2835 __le32 ina_detection_search_time; /* total time (in 0.8us) searched
2856 * for INA */ 2836 * for INA */
2857 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ 2837 __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
2858 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ 2838 __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
2859 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ 2839 __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
2860 __le32 interference_data_flag; /* flag for interference data 2840 __le32 interference_data_flag; /* flag for interference data
2861 * availability. 1 when data is 2841 * availability. 1 when data is
2862 * available. */ 2842 * available. */
2863 __le32 channel_load; /* counts RX Enable time in uSec */ 2843 __le32 channel_load; /* counts RX Enable time in uSec */
2864 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM 2844 __le32 dsp_false_alarms; /* DSP false alarm (both OFDM
2865 * and CCK) counter */ 2845 * and CCK) counter */
2866 __le32 beacon_rssi_a; 2846 __le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
2871 __le32 beacon_energy_c; 2851 __le32 beacon_energy_c;
2872} __packed; 2852} __packed;
2873 2853
2874struct statistics_rx { 2854struct stats_rx {
2875 struct statistics_rx_phy ofdm; 2855 struct stats_rx_phy ofdm;
2876 struct statistics_rx_phy cck; 2856 struct stats_rx_phy cck;
2877 struct statistics_rx_non_phy general; 2857 struct stats_rx_non_phy general;
2878 struct statistics_rx_ht_phy ofdm_ht; 2858 struct stats_rx_ht_phy ofdm_ht;
2879} __packed; 2859} __packed;
2880 2860
2881/** 2861/**
2882 * struct statistics_tx_power - current tx power 2862 * struct stats_tx_power - current tx power
2883 * 2863 *
2884 * @ant_a: current tx power on chain a in 1/2 dB step 2864 * @ant_a: current tx power on chain a in 1/2 dB step
2885 * @ant_b: current tx power on chain b in 1/2 dB step 2865 * @ant_b: current tx power on chain b in 1/2 dB step
2886 * @ant_c: current tx power on chain c in 1/2 dB step 2866 * @ant_c: current tx power on chain c in 1/2 dB step
2887 */ 2867 */
2888struct statistics_tx_power { 2868struct stats_tx_power {
2889 u8 ant_a; 2869 u8 ant_a;
2890 u8 ant_b; 2870 u8 ant_b;
2891 u8 ant_c; 2871 u8 ant_c;
2892 u8 reserved; 2872 u8 reserved;
2893} __packed; 2873} __packed;
2894 2874
2895struct statistics_tx_non_phy_agg { 2875struct stats_tx_non_phy_agg {
2896 __le32 ba_timeout; 2876 __le32 ba_timeout;
2897 __le32 ba_reschedule_frames; 2877 __le32 ba_reschedule_frames;
2898 __le32 scd_query_agg_frame_cnt; 2878 __le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
2905 __le32 rx_ba_rsp_cnt; 2885 __le32 rx_ba_rsp_cnt;
2906} __packed; 2886} __packed;
2907 2887
2908struct statistics_tx { 2888struct stats_tx {
2909 __le32 preamble_cnt; 2889 __le32 preamble_cnt;
2910 __le32 rx_detected_cnt; 2890 __le32 rx_detected_cnt;
2911 __le32 bt_prio_defer_cnt; 2891 __le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
2920 __le32 burst_abort_missing_next_frame_cnt; 2900 __le32 burst_abort_missing_next_frame_cnt;
2921 __le32 cts_timeout_collision; 2901 __le32 cts_timeout_collision;
2922 __le32 ack_or_ba_timeout_collision; 2902 __le32 ack_or_ba_timeout_collision;
2923 struct statistics_tx_non_phy_agg agg; 2903 struct stats_tx_non_phy_agg agg;
2924 2904
2925 __le32 reserved1; 2905 __le32 reserved1;
2926} __packed; 2906} __packed;
2927 2907
2928 2908struct stats_div {
2929struct statistics_div {
2930 __le32 tx_on_a; 2909 __le32 tx_on_a;
2931 __le32 tx_on_b; 2910 __le32 tx_on_b;
2932 __le32 exec_time; 2911 __le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
2935 __le32 reserved2; 2914 __le32 reserved2;
2936} __packed; 2915} __packed;
2937 2916
2938struct statistics_general_common { 2917struct stats_general_common {
2939 __le32 temperature; /* radio temperature */ 2918 __le32 temperature; /* radio temperature */
2940 struct statistics_dbg dbg; 2919 struct stats_dbg dbg;
2941 __le32 sleep_time; 2920 __le32 sleep_time;
2942 __le32 slots_out; 2921 __le32 slots_out;
2943 __le32 slots_idle; 2922 __le32 slots_idle;
2944 __le32 ttl_timestamp; 2923 __le32 ttl_timestamp;
2945 struct statistics_div div; 2924 struct stats_div div;
2946 __le32 rx_enable_counter; 2925 __le32 rx_enable_counter;
2947 /* 2926 /*
2948 * num_of_sos_states: 2927 * num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
2952 __le32 num_of_sos_states; 2931 __le32 num_of_sos_states;
2953} __packed; 2932} __packed;
2954 2933
2955struct statistics_general { 2934struct stats_general {
2956 struct statistics_general_common common; 2935 struct stats_general_common common;
2957 __le32 reserved2; 2936 __le32 reserved2;
2958 __le32 reserved3; 2937 __le32 reserved3;
2959} __packed; 2938} __packed;
2960 2939
2961#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) 2940#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
2962#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) 2941#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
2963#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) 2942#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
2964 2943
2965/* 2944/*
2966 * REPLY_STATISTICS_CMD = 0x9c, 2945 * C_STATS = 0x9c,
2967 * all devices identical. 2946 * all devices identical.
2968 * 2947 *
2969 * This command triggers an immediate response containing uCode statistics. 2948 * This command triggers an immediate response containing uCode stats.
2970 * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. 2949 * The response is in the same format as N_STATS 0x9d, below.
2971 * 2950 *
2972 * If the CLEAR_STATS configuration flag is set, uCode will clear its 2951 * If the CLEAR_STATS configuration flag is set, uCode will clear its
2973 * internal copy of the statistics (counters) after issuing the response. 2952 * internal copy of the stats (counters) after issuing the response.
2974 * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). 2953 * This flag does not affect N_STATSs after beacons (see below).
2975 * 2954 *
2976 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue 2955 * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
2977 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag 2956 * N_STATSs after received beacons (see below). This flag
2978 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. 2957 * does not affect the response to the C_STATS 0x9c itself.
2979 */ 2958 */
2980#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ 2959#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2981#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ 2960#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
2982struct iwl_statistics_cmd { 2961struct il_stats_cmd {
2983 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 2962 __le32 configuration_flags; /* IL_STATS_CONF_* */
2984} __packed; 2963} __packed;
2985 2964
2986/* 2965/*
2987 * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) 2966 * N_STATS = 0x9d (notification only, not a command)
2988 * 2967 *
2989 * By default, uCode issues this notification after receiving a beacon 2968 * By default, uCode issues this notification after receiving a beacon
2990 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the 2969 * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
2991 * REPLY_STATISTICS_CMD 0x9c, above. 2970 * C_STATS 0x9c, above.
2992 * 2971 *
2993 * Statistics counters continue to increment beacon after beacon, but are 2972 * Statistics counters continue to increment beacon after beacon, but are
2994 * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD 2973 * cleared when changing channels or when driver issues C_STATS
2995 * 0x9c with CLEAR_STATS bit set (see above). 2974 * 0x9c with CLEAR_STATS bit set (see above).
2996 * 2975 *
2997 * uCode also issues this notification during scans. uCode clears statistics 2976 * uCode also issues this notification during scans. uCode clears stats
2998 * appropriately so that each notification contains statistics for only the 2977 * appropriately so that each notification contains stats for only the
2999 * one channel that has just been scanned. 2978 * one channel that has just been scanned.
3000 */ 2979 */
3001#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) 2980#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
3002#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) 2981#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
3003 2982
3004struct iwl3945_notif_statistics { 2983struct il3945_notif_stats {
3005 __le32 flag; 2984 __le32 flag;
3006 struct iwl39_statistics_rx rx; 2985 struct iwl39_stats_rx rx;
3007 struct iwl39_statistics_tx tx; 2986 struct iwl39_stats_tx tx;
3008 struct iwl39_statistics_general general; 2987 struct iwl39_stats_general general;
3009} __packed; 2988} __packed;
3010 2989
3011struct iwl_notif_statistics { 2990struct il_notif_stats {
3012 __le32 flag; 2991 __le32 flag;
3013 struct statistics_rx rx; 2992 struct stats_rx rx;
3014 struct statistics_tx tx; 2993 struct stats_tx tx;
3015 struct statistics_general general; 2994 struct stats_general general;
3016} __packed; 2995} __packed;
3017 2996
3018/* 2997/*
3019 * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) 2998 * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
3020 * 2999 *
3021 * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed 3000 * uCode send N_MISSED_BEACONS to driver when detect beacon missed
3022 * in regardless of how many missed beacons, which mean when driver receive the 3001 * in regardless of how many missed beacons, which mean when driver receive the
3023 * notification, inside the command, it can find all the beacons information 3002 * notification, inside the command, it can find all the beacons information
3024 * which include number of total missed beacons, number of consecutive missed 3003 * which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
3035 * 3014 *
3036 */ 3015 */
3037 3016
3038#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) 3017#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
3039#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) 3018#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
3040#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF 3019#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
3041 3020
3042struct iwl_missed_beacon_notif { 3021struct il_missed_beacon_notif {
3043 __le32 consecutive_missed_beacons; 3022 __le32 consecutive_missed_beacons;
3044 __le32 total_missed_becons; 3023 __le32 total_missed_becons;
3045 __le32 num_expected_beacons; 3024 __le32 num_expected_beacons;
3046 __le32 num_recvd_beacons; 3025 __le32 num_recvd_beacons;
3047} __packed; 3026} __packed;
3048 3027
3049
3050/****************************************************************************** 3028/******************************************************************************
3051 * (11) 3029 * (11)
3052 * Rx Calibration Commands: 3030 * Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
3062 *****************************************************************************/ 3040 *****************************************************************************/
3063 3041
3064/** 3042/**
3065 * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) 3043 * C_SENSITIVITY = 0xa8 (command, has simple generic response)
3066 * 3044 *
3067 * This command sets up the Rx signal detector for a sensitivity level that 3045 * This command sets up the Rx signal detector for a sensitivity level that
3068 * is high enough to lock onto all signals within the associated network, 3046 * is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
3076 * time listening, not transmitting). Driver must adjust sensitivity so that 3054 * time listening, not transmitting). Driver must adjust sensitivity so that
3077 * the ratio of actual false alarms to actual Rx time falls within this range. 3055 * the ratio of actual false alarms to actual Rx time falls within this range.
3078 * 3056 *
3079 * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each 3057 * While associated, uCode delivers N_STATSs after each
3080 * received beacon. These provide information to the driver to analyze the 3058 * received beacon. These provide information to the driver to analyze the
3081 * sensitivity. Don't analyze statistics that come in from scanning, or any 3059 * sensitivity. Don't analyze stats that come in from scanning, or any
3082 * other non-associated-network source. Pertinent statistics include: 3060 * other non-associated-network source. Pertinent stats include:
3083 * 3061 *
3084 * From "general" statistics (struct statistics_rx_non_phy): 3062 * From "general" stats (struct stats_rx_non_phy):
3085 * 3063 *
3086 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) 3064 * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
3087 * Measure of energy of desired signal. Used for establishing a level 3065 * Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
3094 * uSecs of actual Rx time during beacon period (varies according to 3072 * uSecs of actual Rx time during beacon period (varies according to
3095 * how much time was spent transmitting). 3073 * how much time was spent transmitting).
3096 * 3074 *
3097 * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: 3075 * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
3098 * 3076 *
3099 * false_alarm_cnt 3077 * false_alarm_cnt
3100 * Signal locks abandoned early (before phy-level header). 3078 * Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
3111 * 3089 *
3112 * Total number of false alarms = false_alarms + plcp_errs 3090 * Total number of false alarms = false_alarms + plcp_errs
3113 * 3091 *
3114 * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd 3092 * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
3115 * (notice that the start points for OFDM are at or close to settings for 3093 * (notice that the start points for OFDM are at or close to settings for
3116 * maximum sensitivity): 3094 * maximum sensitivity):
3117 * 3095 *
3118 * START / MIN / MAX 3096 * START / MIN / MAX
3119 * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 3097 * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
3120 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 3098 * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
3121 * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 3099 * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
3122 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 3100 * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
3123 * 3101 *
3124 * If actual rate of OFDM false alarms (+ plcp_errors) is too high 3102 * If actual rate of OFDM false alarms (+ plcp_errors) is too high
3125 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity 3103 * (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
3152 * Reset this to 0 at the first beacon period that falls within the 3130 * Reset this to 0 at the first beacon period that falls within the
3153 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). 3131 * "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
3154 * 3132 *
3155 * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd 3133 * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
3156 * (notice that the start points for CCK are at maximum sensitivity): 3134 * (notice that the start points for CCK are at maximum sensitivity):
3157 * 3135 *
3158 * START / MIN / MAX 3136 * START / MIN / MAX
3159 * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 3137 * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
3160 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 3138 * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
3161 * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 3139 * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
3162 * 3140 *
3163 * If actual rate of CCK false alarms (+ plcp_errors) is too high 3141 * If actual rate of CCK false alarms (+ plcp_errors) is too high
3164 * (greater than 50 for each 204.8 msecs listening), method for reducing 3142 * (greater than 50 for each 204.8 msecs listening), method for reducing
3165 * sensitivity is: 3143 * sensitivity is:
3166 * 3144 *
3167 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, 3145 * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3168 * up to max 400. 3146 * up to max 400.
3169 * 3147 *
3170 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, 3148 * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
3171 * sensitivity has been reduced a significant amount; bring it up to 3149 * sensitivity has been reduced a significant amount; bring it up to
3172 * a moderate 161. Otherwise, *add* 3, up to max 200. 3150 * a moderate 161. Otherwise, *add* 3, up to max 200.
3173 * 3151 *
3174 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, 3152 * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
3175 * sensitivity has been reduced only a moderate or small amount; 3153 * sensitivity has been reduced only a moderate or small amount;
3176 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, 3154 * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
3177 * down to min 0. Otherwise (if gain has been significantly reduced), 3155 * down to min 0. Otherwise (if gain has been significantly reduced),
3178 * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. 3156 * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
3179 * 3157 *
3180 * b) Save a snapshot of the "silence reference". 3158 * b) Save a snapshot of the "silence reference".
3181 * 3159 *
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
3191 * 3169 *
3192 * Method for increasing sensitivity: 3170 * Method for increasing sensitivity:
3193 * 3171 *
3194 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, 3172 * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
3195 * down to min 125. 3173 * down to min 125.
3196 * 3174 *
3197 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, 3175 * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
3198 * down to min 200. 3176 * down to min 200.
3199 * 3177 *
3200 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. 3178 * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
3201 * 3179 *
3202 * If actual rate of CCK false alarms (+ plcp_errors) is within good range 3180 * If actual rate of CCK false alarms (+ plcp_errors) is within good range
3203 * (between 5 and 50 for each 204.8 msecs listening): 3181 * (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
3206 * 3184 *
3207 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), 3185 * 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
3208 * give some extra margin to energy threshold by *subtracting* 8 3186 * give some extra margin to energy threshold by *subtracting* 8
3209 * from value in HD_MIN_ENERGY_CCK_DET_INDEX. 3187 * from value in HD_MIN_ENERGY_CCK_DET_IDX.
3210 * 3188 *
3211 * For all cases (too few, too many, good range), make sure that the CCK 3189 * For all cases (too few, too many, good range), make sure that the CCK
3212 * detection threshold (energy) is below the energy level for robust 3190 * detection threshold (energy) is below the energy level for robust
3213 * detection over the past 10 beacon periods, the "Max cck energy". 3191 * detection over the past 10 beacon periods, the "Max cck energy".
3214 * Lower values mean higher energy; this means making sure that the value 3192 * Lower values mean higher energy; this means making sure that the value
3215 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". 3193 * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
3216 * 3194 *
3217 */ 3195 */
3218 3196
3219/* 3197/*
3220 * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) 3198 * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
3221 */ 3199 */
3222#define HD_TABLE_SIZE (11) /* number of entries */ 3200#define HD_TBL_SIZE (11) /* number of entries */
3223#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ 3201#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
3224#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) 3202#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
3225#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) 3203#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
3226#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) 3204#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
3227#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) 3205#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
3228#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) 3206#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
3229#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) 3207#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
3230#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) 3208#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
3231#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) 3209#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
3232#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) 3210#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
3233#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 3211#define HD_OFDM_ENERGY_TH_IN_IDX (10)
3234 3212
3235/* Control field in struct iwl_sensitivity_cmd */ 3213/* Control field in struct il_sensitivity_cmd */
3236#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) 3214#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
3237#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) 3215#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
3238 3216
3239/** 3217/**
3240 * struct iwl_sensitivity_cmd 3218 * struct il_sensitivity_cmd
3241 * @control: (1) updates working table, (0) updates default table 3219 * @control: (1) updates working table, (0) updates default table
3242 * @table: energy threshold values, use HD_* as index into table 3220 * @table: energy threshold values, use HD_* as idx into table
3243 * 3221 *
3244 * Always use "1" in "control" to update uCode's working table and DSP. 3222 * Always use "1" in "control" to update uCode's working table and DSP.
3245 */ 3223 */
3246struct iwl_sensitivity_cmd { 3224struct il_sensitivity_cmd {
3247 __le16 control; /* always use "1" */ 3225 __le16 control; /* always use "1" */
3248 __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ 3226 __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
3249} __packed; 3227} __packed;
3250 3228
3251
3252/** 3229/**
3253 * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) 3230 * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
3254 * 3231 *
3255 * This command sets the relative gains of 4965 device's 3 radio receiver chains. 3232 * This command sets the relative gains of 4965 device's 3 radio receiver chains.
3256 * 3233 *
3257 * After the first association, driver should accumulate signal and noise 3234 * After the first association, driver should accumulate signal and noise
3258 * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 3235 * stats from the N_STATSs that follow the first 20
3259 * beacons from the associated network (don't collect statistics that come 3236 * beacons from the associated network (don't collect stats that come
3260 * in from scanning, or any other non-network source). 3237 * in from scanning, or any other non-network source).
3261 * 3238 *
3262 * DISCONNECTED ANTENNA: 3239 * DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
3264 * Driver should determine which antennas are actually connected, by comparing 3241 * Driver should determine which antennas are actually connected, by comparing
3265 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the 3242 * average beacon signal levels for the 3 Rx chains. Accumulate (add) the
3266 * following values over 20 beacons, one accumulator for each of the chains 3243 * following values over 20 beacons, one accumulator for each of the chains
3267 * a/b/c, from struct statistics_rx_non_phy: 3244 * a/b/c, from struct stats_rx_non_phy:
3268 * 3245 *
3269 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) 3246 * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
3270 * 3247 *
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
3283 * to antennas, see above) for gain, by comparing the average signal levels 3260 * to antennas, see above) for gain, by comparing the average signal levels
3284 * detected during the silence after each beacon (background noise). 3261 * detected during the silence after each beacon (background noise).
3285 * Accumulate (add) the following values over 20 beacons, one accumulator for 3262 * Accumulate (add) the following values over 20 beacons, one accumulator for
3286 * each of the chains a/b/c, from struct statistics_rx_non_phy: 3263 * each of the chains a/b/c, from struct stats_rx_non_phy:
3287 * 3264 *
3288 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) 3265 * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
3289 * 3266 *
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
3294 * (accum_noise[i] - accum_noise[reference]) / 30 3271 * (accum_noise[i] - accum_noise[reference]) / 30
3295 * 3272 *
3296 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. 3273 * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
3297 * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the 3274 * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
3298 * driver should limit the difference results to a range of 0-3 (0-4.5 dB), 3275 * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
3299 * and set bit 2 to indicate "reduce gain". The value for the reference 3276 * and set bit 2 to indicate "reduce gain". The value for the reference
3300 * (weakest) chain should be "0". 3277 * (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
3306 3283
3307/* Phy calibration command for series */ 3284/* Phy calibration command for series */
3308/* The default calibrate table size if not specified by firmware */ 3285/* The default calibrate table size if not specified by firmware */
3309#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 3286#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
3310enum { 3287enum {
3311 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, 3288 IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
3312 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, 3289 IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
3313}; 3290};
3314 3291
3315#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) 3292#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
3316 3293
3317struct iwl_calib_hdr { 3294struct il_calib_hdr {
3318 u8 op_code; 3295 u8 op_code;
3319 u8 first_group; 3296 u8 first_group;
3320 u8 groups_num; 3297 u8 groups_num;
3321 u8 data_valid; 3298 u8 data_valid;
3322} __packed; 3299} __packed;
3323 3300
3324/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ 3301/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
3325struct iwl_calib_diff_gain_cmd { 3302struct il_calib_diff_gain_cmd {
3326 struct iwl_calib_hdr hdr; 3303 struct il_calib_hdr hdr;
3327 s8 diff_gain_a; /* see above */ 3304 s8 diff_gain_a; /* see above */
3328 s8 diff_gain_b; 3305 s8 diff_gain_b;
3329 s8 diff_gain_c; 3306 s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
3338 3315
3339/* 3316/*
3340 * LEDs Command & Response 3317 * LEDs Command & Response
3341 * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) 3318 * C_LEDS = 0x48 (command, has simple generic response)
3342 * 3319 *
3343 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), 3320 * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
3344 * this command turns it on or off, or sets up a periodic blinking cycle. 3321 * this command turns it on or off, or sets up a periodic blinking cycle.
3345 */ 3322 */
3346struct iwl_led_cmd { 3323struct il_led_cmd {
3347 __le32 interval; /* "interval" in uSec */ 3324 __le32 interval; /* "interval" in uSec */
3348 u8 id; /* 1: Activity, 2: Link, 3: Tech */ 3325 u8 id; /* 1: Activity, 2: Link, 3: Tech */
3349 u8 off; /* # intervals off while blinking; 3326 u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
3353 u8 reserved; 3330 u8 reserved;
3354} __packed; 3331} __packed;
3355 3332
3356
3357/****************************************************************************** 3333/******************************************************************************
3358 * (13) 3334 * (13)
3359 * Union of all expected notifications/responses: 3335 * Union of all expected notifications/responses:
3360 * 3336 *
3361 *****************************************************************************/ 3337 *****************************************************************************/
3362 3338
3363struct iwl_rx_packet { 3339#define IL_RX_FRAME_SIZE_MSK 0x00003fff
3340
3341struct il_rx_pkt {
3364 /* 3342 /*
3365 * The first 4 bytes of the RX frame header contain both the RX frame 3343 * The first 4 bytes of the RX frame header contain both the RX frame
3366 * size and some flags. 3344 * size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
3372 * 13-00: RX frame size 3350 * 13-00: RX frame size
3373 */ 3351 */
3374 __le32 len_n_flags; 3352 __le32 len_n_flags;
3375 struct iwl_cmd_header hdr; 3353 struct il_cmd_header hdr;
3376 union { 3354 union {
3377 struct iwl3945_rx_frame rx_frame; 3355 struct il3945_rx_frame rx_frame;
3378 struct iwl3945_tx_resp tx_resp; 3356 struct il3945_tx_resp tx_resp;
3379 struct iwl3945_beacon_notif beacon_status; 3357 struct il3945_beacon_notif beacon_status;
3380 3358
3381 struct iwl_alive_resp alive_frame; 3359 struct il_alive_resp alive_frame;
3382 struct iwl_spectrum_notification spectrum_notif; 3360 struct il_spectrum_notification spectrum_notif;
3383 struct iwl_csa_notification csa_notif; 3361 struct il_csa_notification csa_notif;
3384 struct iwl_error_resp err_resp; 3362 struct il_error_resp err_resp;
3385 struct iwl_card_state_notif card_state_notif; 3363 struct il_card_state_notif card_state_notif;
3386 struct iwl_add_sta_resp add_sta; 3364 struct il_add_sta_resp add_sta;
3387 struct iwl_rem_sta_resp rem_sta; 3365 struct il_rem_sta_resp rem_sta;
3388 struct iwl_sleep_notification sleep_notif; 3366 struct il_sleep_notification sleep_notif;
3389 struct iwl_spectrum_resp spectrum; 3367 struct il_spectrum_resp spectrum;
3390 struct iwl_notif_statistics stats; 3368 struct il_notif_stats stats;
3391 struct iwl_compressed_ba_resp compressed_ba; 3369 struct il_compressed_ba_resp compressed_ba;
3392 struct iwl_missed_beacon_notif missed_beacon; 3370 struct il_missed_beacon_notif missed_beacon;
3393 __le32 status; 3371 __le32 status;
3394 u8 raw[0]; 3372 u8 raw[0];
3395 } u; 3373 } u;
3396} __packed; 3374} __packed;
3397 3375
3398#endif /* __iwl_legacy_commands_h__ */ 3376#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 000000000000..7e2924f332a7
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5707 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/lockdep.h>
36#include <linux/init.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
41#include <net/mac80211.h>
42
43#include "common.h"
44
45const char *
46il_get_cmd_string(u8 cmd)
47{
48 switch (cmd) {
49 IL_CMD(N_ALIVE);
50 IL_CMD(N_ERROR);
51 IL_CMD(C_RXON);
52 IL_CMD(C_RXON_ASSOC);
53 IL_CMD(C_QOS_PARAM);
54 IL_CMD(C_RXON_TIMING);
55 IL_CMD(C_ADD_STA);
56 IL_CMD(C_REM_STA);
57 IL_CMD(C_WEPKEY);
58 IL_CMD(N_3945_RX);
59 IL_CMD(C_TX);
60 IL_CMD(C_RATE_SCALE);
61 IL_CMD(C_LEDS);
62 IL_CMD(C_TX_LINK_QUALITY_CMD);
63 IL_CMD(C_CHANNEL_SWITCH);
64 IL_CMD(N_CHANNEL_SWITCH);
65 IL_CMD(C_SPECTRUM_MEASUREMENT);
66 IL_CMD(N_SPECTRUM_MEASUREMENT);
67 IL_CMD(C_POWER_TBL);
68 IL_CMD(N_PM_SLEEP);
69 IL_CMD(N_PM_DEBUG_STATS);
70 IL_CMD(C_SCAN);
71 IL_CMD(C_SCAN_ABORT);
72 IL_CMD(N_SCAN_START);
73 IL_CMD(N_SCAN_RESULTS);
74 IL_CMD(N_SCAN_COMPLETE);
75 IL_CMD(N_BEACON);
76 IL_CMD(C_TX_BEACON);
77 IL_CMD(C_TX_PWR_TBL);
78 IL_CMD(C_BT_CONFIG);
79 IL_CMD(C_STATS);
80 IL_CMD(N_STATS);
81 IL_CMD(N_CARD_STATE);
82 IL_CMD(N_MISSED_BEACONS);
83 IL_CMD(C_CT_KILL_CONFIG);
84 IL_CMD(C_SENSITIVITY);
85 IL_CMD(C_PHY_CALIBRATION);
86 IL_CMD(N_RX_PHY);
87 IL_CMD(N_RX_MPDU);
88 IL_CMD(N_RX);
89 IL_CMD(N_COMPRESSED_BA);
90 default:
91 return "UNKNOWN";
92
93 }
94}
95EXPORT_SYMBOL(il_get_cmd_string);
96
97#define HOST_COMPLETE_TIMEOUT (HZ / 2)
98
99static void
100il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
101 struct il_rx_pkt *pkt)
102{
103 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
104 IL_ERR("Bad return from %s (0x%08X)\n",
105 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
106 return;
107 }
108#ifdef CONFIG_IWLEGACY_DEBUG
109 switch (cmd->hdr.cmd) {
110 case C_TX_LINK_QUALITY_CMD:
111 case C_SENSITIVITY:
112 D_HC_DUMP("back from %s (0x%08X)\n",
113 il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
114 break;
115 default:
116 D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
117 pkt->hdr.flags);
118 }
119#endif
120}
121
122static int
123il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
124{
125 int ret;
126
127 BUG_ON(!(cmd->flags & CMD_ASYNC));
128
129 /* An asynchronous command can not expect an SKB to be set. */
130 BUG_ON(cmd->flags & CMD_WANT_SKB);
131
132 /* Assign a generic callback if one is not provided */
133 if (!cmd->callback)
134 cmd->callback = il_generic_cmd_callback;
135
136 if (test_bit(S_EXIT_PENDING, &il->status))
137 return -EBUSY;
138
139 ret = il_enqueue_hcmd(il, cmd);
140 if (ret < 0) {
141 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
142 il_get_cmd_string(cmd->id), ret);
143 return ret;
144 }
145 return 0;
146}
147
148int
149il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
150{
151 int cmd_idx;
152 int ret;
153
154 lockdep_assert_held(&il->mutex);
155
156 BUG_ON(cmd->flags & CMD_ASYNC);
157
158 /* A synchronous command can not have a callback set. */
159 BUG_ON(cmd->callback);
160
161 D_INFO("Attempting to send sync command %s\n",
162 il_get_cmd_string(cmd->id));
163
164 set_bit(S_HCMD_ACTIVE, &il->status);
165 D_INFO("Setting HCMD_ACTIVE for command %s\n",
166 il_get_cmd_string(cmd->id));
167
168 cmd_idx = il_enqueue_hcmd(il, cmd);
169 if (cmd_idx < 0) {
170 ret = cmd_idx;
171 IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
172 il_get_cmd_string(cmd->id), ret);
173 goto out;
174 }
175
176 ret = wait_event_timeout(il->wait_command_queue,
177 !test_bit(S_HCMD_ACTIVE, &il->status),
178 HOST_COMPLETE_TIMEOUT);
179 if (!ret) {
180 if (test_bit(S_HCMD_ACTIVE, &il->status)) {
181 IL_ERR("Error sending %s: time out after %dms.\n",
182 il_get_cmd_string(cmd->id),
183 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
184
185 clear_bit(S_HCMD_ACTIVE, &il->status);
186 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
187 il_get_cmd_string(cmd->id));
188 ret = -ETIMEDOUT;
189 goto cancel;
190 }
191 }
192
193 if (test_bit(S_RF_KILL_HW, &il->status)) {
194 IL_ERR("Command %s aborted: RF KILL Switch\n",
195 il_get_cmd_string(cmd->id));
196 ret = -ECANCELED;
197 goto fail;
198 }
199 if (test_bit(S_FW_ERROR, &il->status)) {
200 IL_ERR("Command %s failed: FW Error\n",
201 il_get_cmd_string(cmd->id));
202 ret = -EIO;
203 goto fail;
204 }
205 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
206 IL_ERR("Error: Response NULL in '%s'\n",
207 il_get_cmd_string(cmd->id));
208 ret = -EIO;
209 goto cancel;
210 }
211
212 ret = 0;
213 goto out;
214
215cancel:
216 if (cmd->flags & CMD_WANT_SKB) {
217 /*
218 * Cancel the CMD_WANT_SKB flag for the cmd in the
219 * TX cmd queue. Otherwise in case the cmd comes
220 * in later, it will possibly set an invalid
221 * address (cmd->meta.source).
222 */
223 il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
224 }
225fail:
226 if (cmd->reply_page) {
227 il_free_pages(il, cmd->reply_page);
228 cmd->reply_page = 0;
229 }
230out:
231 return ret;
232}
233EXPORT_SYMBOL(il_send_cmd_sync);
234
235int
236il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
237{
238 if (cmd->flags & CMD_ASYNC)
239 return il_send_cmd_async(il, cmd);
240
241 return il_send_cmd_sync(il, cmd);
242}
243EXPORT_SYMBOL(il_send_cmd);
244
245int
246il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
247{
248 struct il_host_cmd cmd = {
249 .id = id,
250 .len = len,
251 .data = data,
252 };
253
254 return il_send_cmd_sync(il, &cmd);
255}
256EXPORT_SYMBOL(il_send_cmd_pdu);
257
258int
259il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
260 void (*callback) (struct il_priv *il,
261 struct il_device_cmd *cmd,
262 struct il_rx_pkt *pkt))
263{
264 struct il_host_cmd cmd = {
265 .id = id,
266 .len = len,
267 .data = data,
268 };
269
270 cmd.flags |= CMD_ASYNC;
271 cmd.callback = callback;
272
273 return il_send_cmd_async(il, &cmd);
274}
275EXPORT_SYMBOL(il_send_cmd_pdu_async);
276
277/* default: IL_LED_BLINK(0) using blinking idx table */
278static int led_mode;
279module_param(led_mode, int, S_IRUGO);
280MODULE_PARM_DESC(led_mode,
281 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
282
283/* Throughput OFF time(ms) ON time (ms)
284 * >300 25 25
285 * >200 to 300 40 40
286 * >100 to 200 55 55
287 * >70 to 100 65 65
288 * >50 to 70 75 75
289 * >20 to 50 85 85
290 * >10 to 20 95 95
291 * >5 to 10 110 110
292 * >1 to 5 130 130
293 * >0 to 1 167 167
294 * <=0 SOLID ON
295 */
296static const struct ieee80211_tpt_blink il_blink[] = {
297 {.throughput = 0, .blink_time = 334},
298 {.throughput = 1 * 1024 - 1, .blink_time = 260},
299 {.throughput = 5 * 1024 - 1, .blink_time = 220},
300 {.throughput = 10 * 1024 - 1, .blink_time = 190},
301 {.throughput = 20 * 1024 - 1, .blink_time = 170},
302 {.throughput = 50 * 1024 - 1, .blink_time = 150},
303 {.throughput = 70 * 1024 - 1, .blink_time = 130},
304 {.throughput = 100 * 1024 - 1, .blink_time = 110},
305 {.throughput = 200 * 1024 - 1, .blink_time = 80},
306 {.throughput = 300 * 1024 - 1, .blink_time = 50},
307};
308
309/*
310 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
311 * Led blink rate analysis showed an average deviation of 0% on 3945,
312 * 5% on 4965 HW.
313 * Need to compensate on the led on/off time per HW according to the deviation
314 * to achieve the desired led frequency
315 * The calculation is: (100-averageDeviation)/100 * blinkTime
316 * For code efficiency the calculation will be:
317 * compensation = (100 - averageDeviation) * 64 / 100
318 * NewBlinkTime = (compensation * BlinkTime) / 64
319 */
320static inline u8
321il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
322{
323 if (!compensation) {
324 IL_ERR("undefined blink compensation: "
325 "use pre-defined blinking time\n");
326 return time;
327 }
328
329 return (u8) ((time * compensation) >> 6);
330}
331
332/* Set led pattern command */
333static int
334il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
335{
336 struct il_led_cmd led_cmd = {
337 .id = IL_LED_LINK,
338 .interval = IL_DEF_LED_INTRVL
339 };
340 int ret;
341
342 if (!test_bit(S_READY, &il->status))
343 return -EBUSY;
344
345 if (il->blink_on == on && il->blink_off == off)
346 return 0;
347
348 if (off == 0) {
349 /* led is SOLID_ON */
350 on = IL_LED_SOLID;
351 }
352
353 D_LED("Led blink time compensation=%u\n",
354 il->cfg->base_params->led_compensation);
355 led_cmd.on =
356 il_blink_compensation(il, on,
357 il->cfg->base_params->led_compensation);
358 led_cmd.off =
359 il_blink_compensation(il, off,
360 il->cfg->base_params->led_compensation);
361
362 ret = il->cfg->ops->led->cmd(il, &led_cmd);
363 if (!ret) {
364 il->blink_on = on;
365 il->blink_off = off;
366 }
367 return ret;
368}
369
370static void
371il_led_brightness_set(struct led_classdev *led_cdev,
372 enum led_brightness brightness)
373{
374 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
375 unsigned long on = 0;
376
377 if (brightness > 0)
378 on = IL_LED_SOLID;
379
380 il_led_cmd(il, on, 0);
381}
382
383static int
384il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
385 unsigned long *delay_off)
386{
387 struct il_priv *il = container_of(led_cdev, struct il_priv, led);
388
389 return il_led_cmd(il, *delay_on, *delay_off);
390}
391
392void
393il_leds_init(struct il_priv *il)
394{
395 int mode = led_mode;
396 int ret;
397
398 if (mode == IL_LED_DEFAULT)
399 mode = il->cfg->led_mode;
400
401 il->led.name =
402 kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
403 il->led.brightness_set = il_led_brightness_set;
404 il->led.blink_set = il_led_blink_set;
405 il->led.max_brightness = 1;
406
407 switch (mode) {
408 case IL_LED_DEFAULT:
409 WARN_ON(1);
410 break;
411 case IL_LED_BLINK:
412 il->led.default_trigger =
413 ieee80211_create_tpt_led_trigger(il->hw,
414 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
415 il_blink,
416 ARRAY_SIZE(il_blink));
417 break;
418 case IL_LED_RF_STATE:
419 il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
420 break;
421 }
422
423 ret = led_classdev_register(&il->pci_dev->dev, &il->led);
424 if (ret) {
425 kfree(il->led.name);
426 return;
427 }
428
429 il->led_registered = true;
430}
431EXPORT_SYMBOL(il_leds_init);
432
433void
434il_leds_exit(struct il_priv *il)
435{
436 if (!il->led_registered)
437 return;
438
439 led_classdev_unregister(&il->led);
440 kfree(il->led.name);
441}
442EXPORT_SYMBOL(il_leds_exit);
443
444/************************** EEPROM BANDS ****************************
445 *
446 * The il_eeprom_band definitions below provide the mapping from the
447 * EEPROM contents to the specific channel number supported for each
448 * band.
449 *
450 * For example, il_priv->eeprom.band_3_channels[4] from the band_3
451 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
452 * The specific geography and calibration information for that channel
453 * is contained in the eeprom map itself.
454 *
455 * During init, we copy the eeprom information and channel map
456 * information into il->channel_info_24/52 and il->channel_map_24/52
457 *
458 * channel_map_24/52 provides the idx in the channel_info array for a
459 * given channel. We have to have two separate maps as there is channel
460 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
461 * band_2
462 *
463 * A value of 0xff stored in the channel_map indicates that the channel
464 * is not supported by the hardware at all.
465 *
466 * A value of 0xfe in the channel_map indicates that the channel is not
467 * valid for Tx with the current hardware. This means that
468 * while the system can tune and receive on a given channel, it may not
469 * be able to associate or transmit any frames on that
470 * channel. There is no corresponding channel information for that
471 * entry.
472 *
473 *********************************************************************/
474
475/* 2.4 GHz */
476const u8 il_eeprom_band_1[14] = {
477 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
478};
479
480/* 5.2 GHz bands */
481static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
482 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
483};
484
485static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
486 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
487};
488
489static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
490 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
491};
492
493static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
494 145, 149, 153, 157, 161, 165
495};
496
497static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
498 1, 2, 3, 4, 5, 6, 7
499};
500
501static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
502 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
503};
504
505/******************************************************************************
506 *
507 * EEPROM related functions
508 *
509******************************************************************************/
510
511static int
512il_eeprom_verify_signature(struct il_priv *il)
513{
514 u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
515 int ret = 0;
516
517 D_EEPROM("EEPROM signature=0x%08x\n", gp);
518 switch (gp) {
519 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
520 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
521 break;
522 default:
523 IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
524 ret = -ENOENT;
525 break;
526 }
527 return ret;
528}
529
530const u8 *
531il_eeprom_query_addr(const struct il_priv *il, size_t offset)
532{
533 BUG_ON(offset >= il->cfg->base_params->eeprom_size);
534 return &il->eeprom[offset];
535}
536EXPORT_SYMBOL(il_eeprom_query_addr);
537
538u16
539il_eeprom_query16(const struct il_priv *il, size_t offset)
540{
541 if (!il->eeprom)
542 return 0;
543 return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
544}
545EXPORT_SYMBOL(il_eeprom_query16);
546
547/**
548 * il_eeprom_init - read EEPROM contents
549 *
550 * Load the EEPROM contents from adapter into il->eeprom
551 *
552 * NOTE: This routine uses the non-debug IO access functions.
553 */
554int
555il_eeprom_init(struct il_priv *il)
556{
557 __le16 *e;
558 u32 gp = _il_rd(il, CSR_EEPROM_GP);
559 int sz;
560 int ret;
561 u16 addr;
562
563 /* allocate eeprom */
564 sz = il->cfg->base_params->eeprom_size;
565 D_EEPROM("NVM size = %d\n", sz);
566 il->eeprom = kzalloc(sz, GFP_KERNEL);
567 if (!il->eeprom) {
568 ret = -ENOMEM;
569 goto alloc_err;
570 }
571 e = (__le16 *) il->eeprom;
572
573 il->cfg->ops->lib->apm_ops.init(il);
574
575 ret = il_eeprom_verify_signature(il);
576 if (ret < 0) {
577 IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
578 ret = -ENOENT;
579 goto err;
580 }
581
582 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
583 ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
584 if (ret < 0) {
585 IL_ERR("Failed to acquire EEPROM semaphore.\n");
586 ret = -ENOENT;
587 goto err;
588 }
589
590 /* eeprom is an array of 16bit values */
591 for (addr = 0; addr < sz; addr += sizeof(u16)) {
592 u32 r;
593
594 _il_wr(il, CSR_EEPROM_REG,
595 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
596
597 ret =
598 _il_poll_bit(il, CSR_EEPROM_REG,
599 CSR_EEPROM_REG_READ_VALID_MSK,
600 CSR_EEPROM_REG_READ_VALID_MSK,
601 IL_EEPROM_ACCESS_TIMEOUT);
602 if (ret < 0) {
603 IL_ERR("Time out reading EEPROM[%d]\n", addr);
604 goto done;
605 }
606 r = _il_rd(il, CSR_EEPROM_REG);
607 e[addr / 2] = cpu_to_le16(r >> 16);
608 }
609
610 D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
611 il_eeprom_query16(il, EEPROM_VERSION));
612
613 ret = 0;
614done:
615 il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
616
617err:
618 if (ret)
619 il_eeprom_free(il);
620 /* Reset chip to save power until we load uCode during "up". */
621 il_apm_stop(il);
622alloc_err:
623 return ret;
624}
625EXPORT_SYMBOL(il_eeprom_init);
626
627void
628il_eeprom_free(struct il_priv *il)
629{
630 kfree(il->eeprom);
631 il->eeprom = NULL;
632}
633EXPORT_SYMBOL(il_eeprom_free);
634
635static void
636il_init_band_reference(const struct il_priv *il, int eep_band,
637 int *eeprom_ch_count,
638 const struct il_eeprom_channel **eeprom_ch_info,
639 const u8 **eeprom_ch_idx)
640{
641 u32 offset =
642 il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
643 switch (eep_band) {
644 case 1: /* 2.4GHz band */
645 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
646 *eeprom_ch_info =
647 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
648 offset);
649 *eeprom_ch_idx = il_eeprom_band_1;
650 break;
651 case 2: /* 4.9GHz band */
652 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
653 *eeprom_ch_info =
654 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
655 offset);
656 *eeprom_ch_idx = il_eeprom_band_2;
657 break;
658 case 3: /* 5.2GHz band */
659 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
660 *eeprom_ch_info =
661 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
662 offset);
663 *eeprom_ch_idx = il_eeprom_band_3;
664 break;
665 case 4: /* 5.5GHz band */
666 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
667 *eeprom_ch_info =
668 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
669 offset);
670 *eeprom_ch_idx = il_eeprom_band_4;
671 break;
672 case 5: /* 5.7GHz band */
673 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
674 *eeprom_ch_info =
675 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
676 offset);
677 *eeprom_ch_idx = il_eeprom_band_5;
678 break;
679 case 6: /* 2.4GHz ht40 channels */
680 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
681 *eeprom_ch_info =
682 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
683 offset);
684 *eeprom_ch_idx = il_eeprom_band_6;
685 break;
686 case 7: /* 5 GHz ht40 channels */
687 *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
688 *eeprom_ch_info =
689 (struct il_eeprom_channel *)il_eeprom_query_addr(il,
690 offset);
691 *eeprom_ch_idx = il_eeprom_band_7;
692 break;
693 default:
694 BUG();
695 }
696}
697
698#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
699 ? # x " " : "")
700/**
701 * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
702 *
703 * Does not set up a command, or touch hardware.
704 */
705static int
706il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
707 const struct il_eeprom_channel *eeprom_ch,
708 u8 clear_ht40_extension_channel)
709{
710 struct il_channel_info *ch_info;
711
712 ch_info =
713 (struct il_channel_info *)il_get_channel_info(il, band, channel);
714
715 if (!il_is_channel_valid(ch_info))
716 return -1;
717
718 D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
719 " Ad-Hoc %ssupported\n", ch_info->channel,
720 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
721 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
722 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
723 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
724 eeprom_ch->max_power_avg,
725 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
726 !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
727
728 ch_info->ht40_eeprom = *eeprom_ch;
729 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
730 ch_info->ht40_flags = eeprom_ch->flags;
731 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
732 ch_info->ht40_extension_channel &=
733 ~clear_ht40_extension_channel;
734
735 return 0;
736}
737
738#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
739 ? # x " " : "")
740
741/**
742 * il_init_channel_map - Set up driver's info for all possible channels
743 */
744int
745il_init_channel_map(struct il_priv *il)
746{
747 int eeprom_ch_count = 0;
748 const u8 *eeprom_ch_idx = NULL;
749 const struct il_eeprom_channel *eeprom_ch_info = NULL;
750 int band, ch;
751 struct il_channel_info *ch_info;
752
753 if (il->channel_count) {
754 D_EEPROM("Channel map already initialized.\n");
755 return 0;
756 }
757
758 D_EEPROM("Initializing regulatory info from EEPROM\n");
759
760 il->channel_count =
761 ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
762 ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
763 ARRAY_SIZE(il_eeprom_band_5);
764
765 D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
766
767 il->channel_info =
768 kzalloc(sizeof(struct il_channel_info) * il->channel_count,
769 GFP_KERNEL);
770 if (!il->channel_info) {
771 IL_ERR("Could not allocate channel_info\n");
772 il->channel_count = 0;
773 return -ENOMEM;
774 }
775
776 ch_info = il->channel_info;
777
778 /* Loop through the 5 EEPROM bands adding them in order to the
779 * channel map we maintain (that contains additional information than
780 * what just in the EEPROM) */
781 for (band = 1; band <= 5; band++) {
782
783 il_init_band_reference(il, band, &eeprom_ch_count,
784 &eeprom_ch_info, &eeprom_ch_idx);
785
786 /* Loop through each band adding each of the channels */
787 for (ch = 0; ch < eeprom_ch_count; ch++) {
788 ch_info->channel = eeprom_ch_idx[ch];
789 ch_info->band =
790 (band ==
791 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
792
793 /* permanently store EEPROM's channel regulatory flags
794 * and max power in channel info database. */
795 ch_info->eeprom = eeprom_ch_info[ch];
796
797 /* Copy the run-time flags so they are there even on
798 * invalid channels */
799 ch_info->flags = eeprom_ch_info[ch].flags;
800 /* First write that ht40 is not enabled, and then enable
801 * one by one */
802 ch_info->ht40_extension_channel =
803 IEEE80211_CHAN_NO_HT40;
804
805 if (!(il_is_channel_valid(ch_info))) {
806 D_EEPROM("Ch. %d Flags %x [%sGHz] - "
807 "No traffic\n", ch_info->channel,
808 ch_info->flags,
809 il_is_channel_a_band(ch_info) ? "5.2" :
810 "2.4");
811 ch_info++;
812 continue;
813 }
814
815 /* Initialize regulatory-based run-time data */
816 ch_info->max_power_avg = ch_info->curr_txpow =
817 eeprom_ch_info[ch].max_power_avg;
818 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
819 ch_info->min_power = 0;
820
821 D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
822 " Ad-Hoc %ssupported\n", ch_info->channel,
823 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
824 CHECK_AND_PRINT_I(VALID),
825 CHECK_AND_PRINT_I(IBSS),
826 CHECK_AND_PRINT_I(ACTIVE),
827 CHECK_AND_PRINT_I(RADAR),
828 CHECK_AND_PRINT_I(WIDE),
829 CHECK_AND_PRINT_I(DFS),
830 eeprom_ch_info[ch].flags,
831 eeprom_ch_info[ch].max_power_avg,
832 ((eeprom_ch_info[ch].
833 flags & EEPROM_CHANNEL_IBSS) &&
834 !(eeprom_ch_info[ch].
835 flags & EEPROM_CHANNEL_RADAR)) ? "" :
836 "not ");
837
838 ch_info++;
839 }
840 }
841
842 /* Check if we do have HT40 channels */
843 if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
844 EEPROM_REGULATORY_BAND_NO_HT40 &&
845 il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
846 EEPROM_REGULATORY_BAND_NO_HT40)
847 return 0;
848
849 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
850 for (band = 6; band <= 7; band++) {
851 enum ieee80211_band ieeeband;
852
853 il_init_band_reference(il, band, &eeprom_ch_count,
854 &eeprom_ch_info, &eeprom_ch_idx);
855
856 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
857 ieeeband =
858 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
859
860 /* Loop through each band adding each of the channels */
861 for (ch = 0; ch < eeprom_ch_count; ch++) {
862 /* Set up driver's info for lower half */
863 il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
864 &eeprom_ch_info[ch],
865 IEEE80211_CHAN_NO_HT40PLUS);
866
867 /* Set up driver's info for upper half */
868 il_mod_ht40_chan_info(il, ieeeband,
869 eeprom_ch_idx[ch] + 4,
870 &eeprom_ch_info[ch],
871 IEEE80211_CHAN_NO_HT40MINUS);
872 }
873 }
874
875 return 0;
876}
877EXPORT_SYMBOL(il_init_channel_map);
878
879/*
880 * il_free_channel_map - undo allocations in il_init_channel_map
881 */
882void
883il_free_channel_map(struct il_priv *il)
884{
885 kfree(il->channel_info);
886 il->channel_count = 0;
887}
888EXPORT_SYMBOL(il_free_channel_map);
889
890/**
891 * il_get_channel_info - Find driver's ilate channel info
892 *
893 * Based on band and channel number.
894 */
895const struct il_channel_info *
896il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
897 u16 channel)
898{
899 int i;
900
901 switch (band) {
902 case IEEE80211_BAND_5GHZ:
903 for (i = 14; i < il->channel_count; i++) {
904 if (il->channel_info[i].channel == channel)
905 return &il->channel_info[i];
906 }
907 break;
908 case IEEE80211_BAND_2GHZ:
909 if (channel >= 1 && channel <= 14)
910 return &il->channel_info[channel - 1];
911 break;
912 default:
913 BUG();
914 }
915
916 return NULL;
917}
918EXPORT_SYMBOL(il_get_channel_info);
919
920/*
921 * Setting power level allows the card to go to sleep when not busy.
922 *
923 * We calculate a sleep command based on the required latency, which
924 * we get from mac80211. In order to handle thermal throttling, we can
925 * also use pre-defined power levels.
926 */
927
928/*
929 * This defines the old power levels. They are still used by default
930 * (level 1) and for thermal throttle (levels 3 through 5)
931 */
932
933struct il_power_vec_entry {
934 struct il_powertable_cmd cmd;
935 u8 no_dtim; /* number of skip dtim */
936};
937
938static void
939il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
940{
941 memset(cmd, 0, sizeof(*cmd));
942
943 if (il->power_data.pci_pm)
944 cmd->flags |= IL_POWER_PCI_PM_MSK;
945
946 D_POWER("Sleep command for CAM\n");
947}
948
949static int
950il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
951{
952 D_POWER("Sending power/sleep command\n");
953 D_POWER("Flags value = 0x%08X\n", cmd->flags);
954 D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
955 D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
956 D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
957 le32_to_cpu(cmd->sleep_interval[0]),
958 le32_to_cpu(cmd->sleep_interval[1]),
959 le32_to_cpu(cmd->sleep_interval[2]),
960 le32_to_cpu(cmd->sleep_interval[3]),
961 le32_to_cpu(cmd->sleep_interval[4]));
962
963 return il_send_cmd_pdu(il, C_POWER_TBL,
964 sizeof(struct il_powertable_cmd), cmd);
965}
966
967int
968il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
969{
970 int ret;
971 bool update_chains;
972
973 lockdep_assert_held(&il->mutex);
974
975 /* Don't update the RX chain when chain noise calibration is running */
976 update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
977 il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
978
979 if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
980 return 0;
981
982 if (!il_is_ready_rf(il))
983 return -EIO;
984
985 /* scan complete use sleep_power_next, need to be updated */
986 memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
987 if (test_bit(S_SCANNING, &il->status) && !force) {
988 D_INFO("Defer power set mode while scanning\n");
989 return 0;
990 }
991
992 if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
993 set_bit(S_POWER_PMI, &il->status);
994
995 ret = il_set_power(il, cmd);
996 if (!ret) {
997 if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
998 clear_bit(S_POWER_PMI, &il->status);
999
1000 if (il->cfg->ops->lib->update_chain_flags && update_chains)
1001 il->cfg->ops->lib->update_chain_flags(il);
1002 else if (il->cfg->ops->lib->update_chain_flags)
1003 D_POWER("Cannot update the power, chain noise "
1004 "calibration running: %d\n",
1005 il->chain_noise_data.state);
1006
1007 memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
1008 } else
1009 IL_ERR("set power fail, ret = %d", ret);
1010
1011 return ret;
1012}
1013
1014int
1015il_power_update_mode(struct il_priv *il, bool force)
1016{
1017 struct il_powertable_cmd cmd;
1018
1019 il_power_sleep_cam_cmd(il, &cmd);
1020 return il_power_set_mode(il, &cmd, force);
1021}
1022EXPORT_SYMBOL(il_power_update_mode);
1023
1024/* initialize to default */
1025void
1026il_power_initialize(struct il_priv *il)
1027{
1028 u16 lctl = il_pcie_link_ctl(il);
1029
1030 il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
1031
1032 il->power_data.debug_sleep_level_override = -1;
1033
1034 memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
1035}
1036EXPORT_SYMBOL(il_power_initialize);
1037
1038/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
1039 * sending probe req. This should be set long enough to hear probe responses
1040 * from more than one AP. */
1041#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
1042#define IL_ACTIVE_DWELL_TIME_52 (20)
1043
1044#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
1045#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
1046
1047/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
1048 * Must be set longer than active dwell time.
1049 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
1050#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
1051#define IL_PASSIVE_DWELL_TIME_52 (10)
1052#define IL_PASSIVE_DWELL_BASE (100)
1053#define IL_CHANNEL_TUNE_TIME 5
1054
1055static int
1056il_send_scan_abort(struct il_priv *il)
1057{
1058 int ret;
1059 struct il_rx_pkt *pkt;
1060 struct il_host_cmd cmd = {
1061 .id = C_SCAN_ABORT,
1062 .flags = CMD_WANT_SKB,
1063 };
1064
1065 /* Exit instantly with error when device is not ready
1066 * to receive scan abort command or it does not perform
1067 * hardware scan currently */
1068 if (!test_bit(S_READY, &il->status) ||
1069 !test_bit(S_GEO_CONFIGURED, &il->status) ||
1070 !test_bit(S_SCAN_HW, &il->status) ||
1071 test_bit(S_FW_ERROR, &il->status) ||
1072 test_bit(S_EXIT_PENDING, &il->status))
1073 return -EIO;
1074
1075 ret = il_send_cmd_sync(il, &cmd);
1076 if (ret)
1077 return ret;
1078
1079 pkt = (struct il_rx_pkt *)cmd.reply_page;
1080 if (pkt->u.status != CAN_ABORT_STATUS) {
1081 /* The scan abort will return 1 for success or
1082 * 2 for "failure". A failure condition can be
1083 * due to simply not being in an active scan which
1084 * can occur if we send the scan abort before we
1085 * the microcode has notified us that a scan is
1086 * completed. */
1087 D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
1088 ret = -EIO;
1089 }
1090
1091 il_free_pages(il, cmd.reply_page);
1092 return ret;
1093}
1094
1095static void
1096il_complete_scan(struct il_priv *il, bool aborted)
1097{
1098 /* check if scan was requested from mac80211 */
1099 if (il->scan_request) {
1100 D_SCAN("Complete scan in mac80211\n");
1101 ieee80211_scan_completed(il->hw, aborted);
1102 }
1103
1104 il->scan_vif = NULL;
1105 il->scan_request = NULL;
1106}
1107
1108void
1109il_force_scan_end(struct il_priv *il)
1110{
1111 lockdep_assert_held(&il->mutex);
1112
1113 if (!test_bit(S_SCANNING, &il->status)) {
1114 D_SCAN("Forcing scan end while not scanning\n");
1115 return;
1116 }
1117
1118 D_SCAN("Forcing scan end\n");
1119 clear_bit(S_SCANNING, &il->status);
1120 clear_bit(S_SCAN_HW, &il->status);
1121 clear_bit(S_SCAN_ABORTING, &il->status);
1122 il_complete_scan(il, true);
1123}
1124
1125static void
1126il_do_scan_abort(struct il_priv *il)
1127{
1128 int ret;
1129
1130 lockdep_assert_held(&il->mutex);
1131
1132 if (!test_bit(S_SCANNING, &il->status)) {
1133 D_SCAN("Not performing scan to abort\n");
1134 return;
1135 }
1136
1137 if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
1138 D_SCAN("Scan abort in progress\n");
1139 return;
1140 }
1141
1142 ret = il_send_scan_abort(il);
1143 if (ret) {
1144 D_SCAN("Send scan abort failed %d\n", ret);
1145 il_force_scan_end(il);
1146 } else
1147 D_SCAN("Successfully send scan abort\n");
1148}
1149
1150/**
1151 * il_scan_cancel - Cancel any currently executing HW scan
1152 */
1153int
1154il_scan_cancel(struct il_priv *il)
1155{
1156 D_SCAN("Queuing abort scan\n");
1157 queue_work(il->workqueue, &il->abort_scan);
1158 return 0;
1159}
1160EXPORT_SYMBOL(il_scan_cancel);
1161
1162/**
1163 * il_scan_cancel_timeout - Cancel any currently executing HW scan
1164 * @ms: amount of time to wait (in milliseconds) for scan to abort
1165 *
1166 */
1167int
1168il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
1169{
1170 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
1171
1172 lockdep_assert_held(&il->mutex);
1173
1174 D_SCAN("Scan cancel timeout\n");
1175
1176 il_do_scan_abort(il);
1177
1178 while (time_before_eq(jiffies, timeout)) {
1179 if (!test_bit(S_SCAN_HW, &il->status))
1180 break;
1181 msleep(20);
1182 }
1183
1184 return test_bit(S_SCAN_HW, &il->status);
1185}
1186EXPORT_SYMBOL(il_scan_cancel_timeout);
1187
1188/* Service response to C_SCAN (0x80) */
1189static void
1190il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
1191{
1192#ifdef CONFIG_IWLEGACY_DEBUG
1193 struct il_rx_pkt *pkt = rxb_addr(rxb);
1194 struct il_scanreq_notification *notif =
1195 (struct il_scanreq_notification *)pkt->u.raw;
1196
1197 D_SCAN("Scan request status = 0x%x\n", notif->status);
1198#endif
1199}
1200
1201/* Service N_SCAN_START (0x82) */
1202static void
1203il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
1204{
1205 struct il_rx_pkt *pkt = rxb_addr(rxb);
1206 struct il_scanstart_notification *notif =
1207 (struct il_scanstart_notification *)pkt->u.raw;
1208 il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
1209 D_SCAN("Scan start: " "%d [802.11%s] "
1210 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
1211 notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
1212 le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
1213}
1214
1215/* Service N_SCAN_RESULTS (0x83) */
1216static void
1217il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
1218{
1219#ifdef CONFIG_IWLEGACY_DEBUG
1220 struct il_rx_pkt *pkt = rxb_addr(rxb);
1221 struct il_scanresults_notification *notif =
1222 (struct il_scanresults_notification *)pkt->u.raw;
1223
1224 D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
1225 "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
1226 le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
1227 le32_to_cpu(notif->stats[0]),
1228 le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
1229#endif
1230}
1231
1232/* Service N_SCAN_COMPLETE (0x84) */
1233static void
1234il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
1235{
1236
1237#ifdef CONFIG_IWLEGACY_DEBUG
1238 struct il_rx_pkt *pkt = rxb_addr(rxb);
1239 struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
1240#endif
1241
1242 D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
1243 scan_notif->scanned_channels, scan_notif->tsf_low,
1244 scan_notif->tsf_high, scan_notif->status);
1245
1246 /* The HW is no longer scanning */
1247 clear_bit(S_SCAN_HW, &il->status);
1248
1249 D_SCAN("Scan on %sGHz took %dms\n",
1250 (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
1251 jiffies_to_msecs(jiffies - il->scan_start));
1252
1253 queue_work(il->workqueue, &il->scan_completed);
1254}
1255
1256void
1257il_setup_rx_scan_handlers(struct il_priv *il)
1258{
1259 /* scan handlers */
1260 il->handlers[C_SCAN] = il_hdl_scan;
1261 il->handlers[N_SCAN_START] = il_hdl_scan_start;
1262 il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
1263 il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
1264}
1265EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1266
1267inline u16
1268il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1269 u8 n_probes)
1270{
1271 if (band == IEEE80211_BAND_5GHZ)
1272 return IL_ACTIVE_DWELL_TIME_52 +
1273 IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
1274 else
1275 return IL_ACTIVE_DWELL_TIME_24 +
1276 IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
1277}
1278EXPORT_SYMBOL(il_get_active_dwell_time);
1279
1280u16
1281il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1282 struct ieee80211_vif *vif)
1283{
1284 struct il_rxon_context *ctx = &il->ctx;
1285 u16 value;
1286
1287 u16 passive =
1288 (band ==
1289 IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
1290 IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
1291 IL_PASSIVE_DWELL_TIME_52;
1292
1293 if (il_is_any_associated(il)) {
1294 /*
1295 * If we're associated, we clamp the maximum passive
1296 * dwell time to be 98% of the smallest beacon interval
1297 * (minus 2 * channel tune time)
1298 */
1299 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
1300 if (value > IL_PASSIVE_DWELL_BASE || !value)
1301 value = IL_PASSIVE_DWELL_BASE;
1302 value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
1303 passive = min(value, passive);
1304 }
1305
1306 return passive;
1307}
1308EXPORT_SYMBOL(il_get_passive_dwell_time);
1309
1310void
1311il_init_scan_params(struct il_priv *il)
1312{
1313 u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
1314 if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
1315 il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1316 if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
1317 il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
1318}
1319EXPORT_SYMBOL(il_init_scan_params);
1320
1321static int
1322il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
1323{
1324 int ret;
1325
1326 lockdep_assert_held(&il->mutex);
1327
1328 if (WARN_ON(!il->cfg->ops->utils->request_scan))
1329 return -EOPNOTSUPP;
1330
1331 cancel_delayed_work(&il->scan_check);
1332
1333 if (!il_is_ready_rf(il)) {
1334 IL_WARN("Request scan called when driver not ready.\n");
1335 return -EIO;
1336 }
1337
1338 if (test_bit(S_SCAN_HW, &il->status)) {
1339 D_SCAN("Multiple concurrent scan requests in parallel.\n");
1340 return -EBUSY;
1341 }
1342
1343 if (test_bit(S_SCAN_ABORTING, &il->status)) {
1344 D_SCAN("Scan request while abort pending.\n");
1345 return -EBUSY;
1346 }
1347
1348 D_SCAN("Starting scan...\n");
1349
1350 set_bit(S_SCANNING, &il->status);
1351 il->scan_start = jiffies;
1352
1353 ret = il->cfg->ops->utils->request_scan(il, vif);
1354 if (ret) {
1355 clear_bit(S_SCANNING, &il->status);
1356 return ret;
1357 }
1358
1359 queue_delayed_work(il->workqueue, &il->scan_check,
1360 IL_SCAN_CHECK_WATCHDOG);
1361
1362 return 0;
1363}
1364
1365int
1366il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1367 struct cfg80211_scan_request *req)
1368{
1369 struct il_priv *il = hw->priv;
1370 int ret;
1371
1372 D_MAC80211("enter\n");
1373
1374 if (req->n_channels == 0)
1375 return -EINVAL;
1376
1377 mutex_lock(&il->mutex);
1378
1379 if (test_bit(S_SCANNING, &il->status)) {
1380 D_SCAN("Scan already in progress.\n");
1381 ret = -EAGAIN;
1382 goto out_unlock;
1383 }
1384
1385 /* mac80211 will only ask for one band at a time */
1386 il->scan_request = req;
1387 il->scan_vif = vif;
1388 il->scan_band = req->channels[0]->band;
1389
1390 ret = il_scan_initiate(il, vif);
1391
1392 D_MAC80211("leave\n");
1393
1394out_unlock:
1395 mutex_unlock(&il->mutex);
1396
1397 return ret;
1398}
1399EXPORT_SYMBOL(il_mac_hw_scan);
1400
1401static void
1402il_bg_scan_check(struct work_struct *data)
1403{
1404 struct il_priv *il =
1405 container_of(data, struct il_priv, scan_check.work);
1406
1407 D_SCAN("Scan check work\n");
1408
1409 /* Since we are here firmware does not finish scan and
1410 * most likely is in bad shape, so we don't bother to
1411 * send abort command, just force scan complete to mac80211 */
1412 mutex_lock(&il->mutex);
1413 il_force_scan_end(il);
1414 mutex_unlock(&il->mutex);
1415}
1416
1417/**
1418 * il_fill_probe_req - fill in all required fields and IE for probe request
1419 */
1420
1421u16
1422il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1423 const u8 *ta, const u8 *ies, int ie_len, int left)
1424{
1425 int len = 0;
1426 u8 *pos = NULL;
1427
1428 /* Make sure there is enough space for the probe request,
1429 * two mandatory IEs and the data */
1430 left -= 24;
1431 if (left < 0)
1432 return 0;
1433
1434 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1435 memcpy(frame->da, il_bcast_addr, ETH_ALEN);
1436 memcpy(frame->sa, ta, ETH_ALEN);
1437 memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
1438 frame->seq_ctrl = 0;
1439
1440 len += 24;
1441
1442 /* ...next IE... */
1443 pos = &frame->u.probe_req.variable[0];
1444
1445 /* fill in our indirect SSID IE */
1446 left -= 2;
1447 if (left < 0)
1448 return 0;
1449 *pos++ = WLAN_EID_SSID;
1450 *pos++ = 0;
1451
1452 len += 2;
1453
1454 if (WARN_ON(left < ie_len))
1455 return len;
1456
1457 if (ies && ie_len) {
1458 memcpy(pos, ies, ie_len);
1459 len += ie_len;
1460 }
1461
1462 return (u16) len;
1463}
1464EXPORT_SYMBOL(il_fill_probe_req);
1465
1466static void
1467il_bg_abort_scan(struct work_struct *work)
1468{
1469 struct il_priv *il = container_of(work, struct il_priv, abort_scan);
1470
1471 D_SCAN("Abort scan work\n");
1472
1473 /* We keep scan_check work queued in case when firmware will not
1474 * report back scan completed notification */
1475 mutex_lock(&il->mutex);
1476 il_scan_cancel_timeout(il, 200);
1477 mutex_unlock(&il->mutex);
1478}
1479
1480static void
1481il_bg_scan_completed(struct work_struct *work)
1482{
1483 struct il_priv *il = container_of(work, struct il_priv, scan_completed);
1484 bool aborted;
1485
1486 D_SCAN("Completed scan.\n");
1487
1488 cancel_delayed_work(&il->scan_check);
1489
1490 mutex_lock(&il->mutex);
1491
1492 aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
1493 if (aborted)
1494 D_SCAN("Aborted scan completed.\n");
1495
1496 if (!test_and_clear_bit(S_SCANNING, &il->status)) {
1497 D_SCAN("Scan already completed.\n");
1498 goto out_settings;
1499 }
1500
1501 il_complete_scan(il, aborted);
1502
1503out_settings:
1504 /* Can we still talk to firmware ? */
1505 if (!il_is_ready_rf(il))
1506 goto out;
1507
1508 /*
1509 * We do not commit power settings while scan is pending,
1510 * do it now if the settings changed.
1511 */
1512 il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
1513 il_set_tx_power(il, il->tx_power_next, false);
1514
1515 il->cfg->ops->utils->post_scan(il);
1516
1517out:
1518 mutex_unlock(&il->mutex);
1519}
1520
1521void
1522il_setup_scan_deferred_work(struct il_priv *il)
1523{
1524 INIT_WORK(&il->scan_completed, il_bg_scan_completed);
1525 INIT_WORK(&il->abort_scan, il_bg_abort_scan);
1526 INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
1527}
1528EXPORT_SYMBOL(il_setup_scan_deferred_work);
1529
1530void
1531il_cancel_scan_deferred_work(struct il_priv *il)
1532{
1533 cancel_work_sync(&il->abort_scan);
1534 cancel_work_sync(&il->scan_completed);
1535
1536 if (cancel_delayed_work_sync(&il->scan_check)) {
1537 mutex_lock(&il->mutex);
1538 il_force_scan_end(il);
1539 mutex_unlock(&il->mutex);
1540 }
1541}
1542EXPORT_SYMBOL(il_cancel_scan_deferred_work);
1543
1544/* il->sta_lock must be held */
1545static void
1546il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
1547{
1548
1549 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
1550 IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
1551 sta_id, il->stations[sta_id].sta.sta.addr);
1552
1553 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
1554 D_ASSOC("STA id %u addr %pM already present"
1555 " in uCode (according to driver)\n", sta_id,
1556 il->stations[sta_id].sta.sta.addr);
1557 } else {
1558 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
1559 D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
1560 il->stations[sta_id].sta.sta.addr);
1561 }
1562}
1563
1564static int
1565il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
1566 struct il_rx_pkt *pkt, bool sync)
1567{
1568 u8 sta_id = addsta->sta.sta_id;
1569 unsigned long flags;
1570 int ret = -EIO;
1571
1572 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1573 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
1574 return ret;
1575 }
1576
1577 D_INFO("Processing response for adding station %u\n", sta_id);
1578
1579 spin_lock_irqsave(&il->sta_lock, flags);
1580
1581 switch (pkt->u.add_sta.status) {
1582 case ADD_STA_SUCCESS_MSK:
1583 D_INFO("C_ADD_STA PASSED\n");
1584 il_sta_ucode_activate(il, sta_id);
1585 ret = 0;
1586 break;
1587 case ADD_STA_NO_ROOM_IN_TBL:
1588 IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
1589 break;
1590 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
1591 IL_ERR("Adding station %d failed, no block ack resource.\n",
1592 sta_id);
1593 break;
1594 case ADD_STA_MODIFY_NON_EXIST_STA:
1595 IL_ERR("Attempting to modify non-existing station %d\n",
1596 sta_id);
1597 break;
1598 default:
1599 D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
1600 break;
1601 }
1602
1603 D_INFO("%s station id %u addr %pM\n",
1604 il->stations[sta_id].sta.mode ==
1605 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
1606 il->stations[sta_id].sta.sta.addr);
1607
1608 /*
1609 * XXX: The MAC address in the command buffer is often changed from
1610 * the original sent to the device. That is, the MAC address
1611 * written to the command buffer often is not the same MAC address
1612 * read from the command buffer when the command returns. This
1613 * issue has not yet been resolved and this debugging is left to
1614 * observe the problem.
1615 */
1616 D_INFO("%s station according to cmd buffer %pM\n",
1617 il->stations[sta_id].sta.mode ==
1618 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
1619 spin_unlock_irqrestore(&il->sta_lock, flags);
1620
1621 return ret;
1622}
1623
1624static void
1625il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
1626 struct il_rx_pkt *pkt)
1627{
1628 struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
1629
1630 il_process_add_sta_resp(il, addsta, pkt, false);
1631
1632}
1633
1634int
1635il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
1636{
1637 struct il_rx_pkt *pkt = NULL;
1638 int ret = 0;
1639 u8 data[sizeof(*sta)];
1640 struct il_host_cmd cmd = {
1641 .id = C_ADD_STA,
1642 .flags = flags,
1643 .data = data,
1644 };
1645 u8 sta_id __maybe_unused = sta->sta.sta_id;
1646
1647 D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
1648 flags & CMD_ASYNC ? "a" : "");
1649
1650 if (flags & CMD_ASYNC)
1651 cmd.callback = il_add_sta_callback;
1652 else {
1653 cmd.flags |= CMD_WANT_SKB;
1654 might_sleep();
1655 }
1656
1657 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
1658 ret = il_send_cmd(il, &cmd);
1659
1660 if (ret || (flags & CMD_ASYNC))
1661 return ret;
1662
1663 if (ret == 0) {
1664 pkt = (struct il_rx_pkt *)cmd.reply_page;
1665 ret = il_process_add_sta_resp(il, sta, pkt, true);
1666 }
1667 il_free_pages(il, cmd.reply_page);
1668
1669 return ret;
1670}
1671EXPORT_SYMBOL(il_send_add_sta);
1672
1673static void
1674il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
1675 struct il_rxon_context *ctx)
1676{
1677 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
1678 __le32 sta_flags;
1679 u8 mimo_ps_mode;
1680
1681 if (!sta || !sta_ht_inf->ht_supported)
1682 goto done;
1683
1684 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
1685 D_ASSOC("spatial multiplexing power save mode: %s\n",
1686 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
1687 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
1688 "disabled");
1689
1690 sta_flags = il->stations[idx].sta.station_flags;
1691
1692 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
1693
1694 switch (mimo_ps_mode) {
1695 case WLAN_HT_CAP_SM_PS_STATIC:
1696 sta_flags |= STA_FLG_MIMO_DIS_MSK;
1697 break;
1698 case WLAN_HT_CAP_SM_PS_DYNAMIC:
1699 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
1700 break;
1701 case WLAN_HT_CAP_SM_PS_DISABLED:
1702 break;
1703 default:
1704 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
1705 break;
1706 }
1707
1708 sta_flags |=
1709 cpu_to_le32((u32) sta_ht_inf->
1710 ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
1711
1712 sta_flags |=
1713 cpu_to_le32((u32) sta_ht_inf->
1714 ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
1715
1716 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
1717 sta_flags |= STA_FLG_HT40_EN_MSK;
1718 else
1719 sta_flags &= ~STA_FLG_HT40_EN_MSK;
1720
1721 il->stations[idx].sta.station_flags = sta_flags;
1722done:
1723 return;
1724}
1725
1726/**
1727 * il_prep_station - Prepare station information for addition
1728 *
1729 * should be called with sta_lock held
1730 */
1731u8
1732il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
1733 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
1734{
1735 struct il_station_entry *station;
1736 int i;
1737 u8 sta_id = IL_INVALID_STATION;
1738 u16 rate;
1739
1740 if (is_ap)
1741 sta_id = ctx->ap_sta_id;
1742 else if (is_broadcast_ether_addr(addr))
1743 sta_id = ctx->bcast_sta_id;
1744 else
1745 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
1746 if (!compare_ether_addr
1747 (il->stations[i].sta.sta.addr, addr)) {
1748 sta_id = i;
1749 break;
1750 }
1751
1752 if (!il->stations[i].used &&
1753 sta_id == IL_INVALID_STATION)
1754 sta_id = i;
1755 }
1756
1757 /*
1758 * These two conditions have the same outcome, but keep them
1759 * separate
1760 */
1761 if (unlikely(sta_id == IL_INVALID_STATION))
1762 return sta_id;
1763
1764 /*
1765 * uCode is not able to deal with multiple requests to add a
1766 * station. Keep track if one is in progress so that we do not send
1767 * another.
1768 */
1769 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1770 D_INFO("STA %d already in process of being added.\n", sta_id);
1771 return sta_id;
1772 }
1773
1774 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1775 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
1776 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
1777 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1778 sta_id, addr);
1779 return sta_id;
1780 }
1781
1782 station = &il->stations[sta_id];
1783 station->used = IL_STA_DRIVER_ACTIVE;
1784 D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
1785 il->num_stations++;
1786
1787 /* Set up the C_ADD_STA command to send to device */
1788 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
1789 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
1790 station->sta.mode = 0;
1791 station->sta.sta.sta_id = sta_id;
1792 station->sta.station_flags = ctx->station_flags;
1793 station->ctxid = ctx->ctxid;
1794
1795 if (sta) {
1796 struct il_station_priv_common *sta_priv;
1797
1798 sta_priv = (void *)sta->drv_priv;
1799 sta_priv->ctx = ctx;
1800 }
1801
1802 /*
1803 * OK to call unconditionally, since local stations (IBSS BSSID
1804 * STA and broadcast STA) pass in a NULL sta, and mac80211
1805 * doesn't allow HT IBSS.
1806 */
1807 il_set_ht_add_station(il, sta_id, sta, ctx);
1808
1809 /* 3945 only */
1810 rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
1811 /* Turn on both antennas for the station... */
1812 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
1813
1814 return sta_id;
1815
1816}
1817EXPORT_SYMBOL_GPL(il_prep_station);
1818
1819#define STA_WAIT_TIMEOUT (HZ/2)
1820
1821/**
1822 * il_add_station_common -
1823 */
1824int
1825il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
1826 const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
1827 u8 *sta_id_r)
1828{
1829 unsigned long flags_spin;
1830 int ret = 0;
1831 u8 sta_id;
1832 struct il_addsta_cmd sta_cmd;
1833
1834 *sta_id_r = 0;
1835 spin_lock_irqsave(&il->sta_lock, flags_spin);
1836 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
1837 if (sta_id == IL_INVALID_STATION) {
1838 IL_ERR("Unable to prepare station %pM for addition\n", addr);
1839 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1840 return -EINVAL;
1841 }
1842
1843 /*
1844 * uCode is not able to deal with multiple requests to add a
1845 * station. Keep track if one is in progress so that we do not send
1846 * another.
1847 */
1848 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
1849 D_INFO("STA %d already in process of being added.\n", sta_id);
1850 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1851 return -EEXIST;
1852 }
1853
1854 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
1855 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1856 D_ASSOC("STA %d (%pM) already added, not adding again.\n",
1857 sta_id, addr);
1858 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1859 return -EEXIST;
1860 }
1861
1862 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
1863 memcpy(&sta_cmd, &il->stations[sta_id].sta,
1864 sizeof(struct il_addsta_cmd));
1865 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1866
1867 /* Add station to device's station table */
1868 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
1869 if (ret) {
1870 spin_lock_irqsave(&il->sta_lock, flags_spin);
1871 IL_ERR("Adding station %pM failed.\n",
1872 il->stations[sta_id].sta.sta.addr);
1873 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
1874 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
1875 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
1876 }
1877 *sta_id_r = sta_id;
1878 return ret;
1879}
1880EXPORT_SYMBOL(il_add_station_common);
1881
1882/**
1883 * il_sta_ucode_deactivate - deactivate ucode status for a station
1884 *
1885 * il->sta_lock must be held
1886 */
1887static void
1888il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
1889{
1890 /* Ucode must be active and driver must be non active */
1891 if ((il->stations[sta_id].
1892 used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
1893 IL_STA_UCODE_ACTIVE)
1894 IL_ERR("removed non active STA %u\n", sta_id);
1895
1896 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
1897
1898 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
1899 D_ASSOC("Removed STA %u\n", sta_id);
1900}
1901
1902static int
1903il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
1904 bool temporary)
1905{
1906 struct il_rx_pkt *pkt;
1907 int ret;
1908
1909 unsigned long flags_spin;
1910 struct il_rem_sta_cmd rm_sta_cmd;
1911
1912 struct il_host_cmd cmd = {
1913 .id = C_REM_STA,
1914 .len = sizeof(struct il_rem_sta_cmd),
1915 .flags = CMD_SYNC,
1916 .data = &rm_sta_cmd,
1917 };
1918
1919 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
1920 rm_sta_cmd.num_sta = 1;
1921 memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
1922
1923 cmd.flags |= CMD_WANT_SKB;
1924
1925 ret = il_send_cmd(il, &cmd);
1926
1927 if (ret)
1928 return ret;
1929
1930 pkt = (struct il_rx_pkt *)cmd.reply_page;
1931 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
1932 IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
1933 ret = -EIO;
1934 }
1935
1936 if (!ret) {
1937 switch (pkt->u.rem_sta.status) {
1938 case REM_STA_SUCCESS_MSK:
1939 if (!temporary) {
1940 spin_lock_irqsave(&il->sta_lock, flags_spin);
1941 il_sta_ucode_deactivate(il, sta_id);
1942 spin_unlock_irqrestore(&il->sta_lock,
1943 flags_spin);
1944 }
1945 D_ASSOC("C_REM_STA PASSED\n");
1946 break;
1947 default:
1948 ret = -EIO;
1949 IL_ERR("C_REM_STA failed\n");
1950 break;
1951 }
1952 }
1953 il_free_pages(il, cmd.reply_page);
1954
1955 return ret;
1956}
1957
1958/**
1959 * il_remove_station - Remove driver's knowledge of station.
1960 */
1961int
1962il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
1963{
1964 unsigned long flags;
1965
1966 if (!il_is_ready(il)) {
1967 D_INFO("Unable to remove station %pM, device not ready.\n",
1968 addr);
1969 /*
1970 * It is typical for stations to be removed when we are
1971 * going down. Return success since device will be down
1972 * soon anyway
1973 */
1974 return 0;
1975 }
1976
1977 D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
1978
1979 if (WARN_ON(sta_id == IL_INVALID_STATION))
1980 return -EINVAL;
1981
1982 spin_lock_irqsave(&il->sta_lock, flags);
1983
1984 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
1985 D_INFO("Removing %pM but non DRIVER active\n", addr);
1986 goto out_err;
1987 }
1988
1989 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
1990 D_INFO("Removing %pM but non UCODE active\n", addr);
1991 goto out_err;
1992 }
1993
1994 if (il->stations[sta_id].used & IL_STA_LOCAL) {
1995 kfree(il->stations[sta_id].lq);
1996 il->stations[sta_id].lq = NULL;
1997 }
1998
1999 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
2000
2001 il->num_stations--;
2002
2003 BUG_ON(il->num_stations < 0);
2004
2005 spin_unlock_irqrestore(&il->sta_lock, flags);
2006
2007 return il_send_remove_station(il, addr, sta_id, false);
2008out_err:
2009 spin_unlock_irqrestore(&il->sta_lock, flags);
2010 return -EINVAL;
2011}
2012EXPORT_SYMBOL_GPL(il_remove_station);
2013
2014/**
2015 * il_clear_ucode_stations - clear ucode station table bits
2016 *
2017 * This function clears all the bits in the driver indicating
2018 * which stations are active in the ucode. Call when something
2019 * other than explicit station management would cause this in
2020 * the ucode, e.g. unassociated RXON.
2021 */
2022void
2023il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
2024{
2025 int i;
2026 unsigned long flags_spin;
2027 bool cleared = false;
2028
2029 D_INFO("Clearing ucode stations in driver\n");
2030
2031 spin_lock_irqsave(&il->sta_lock, flags_spin);
2032 for (i = 0; i < il->hw_params.max_stations; i++) {
2033 if (ctx && ctx->ctxid != il->stations[i].ctxid)
2034 continue;
2035
2036 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
2037 D_INFO("Clearing ucode active for station %d\n", i);
2038 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2039 cleared = true;
2040 }
2041 }
2042 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2043
2044 if (!cleared)
2045 D_INFO("No active stations found to be cleared\n");
2046}
2047EXPORT_SYMBOL(il_clear_ucode_stations);
2048
2049/**
2050 * il_restore_stations() - Restore driver known stations to device
2051 *
2052 * All stations considered active by driver, but not present in ucode, is
2053 * restored.
2054 *
2055 * Function sleeps.
2056 */
2057void
2058il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
2059{
2060 struct il_addsta_cmd sta_cmd;
2061 struct il_link_quality_cmd lq;
2062 unsigned long flags_spin;
2063 int i;
2064 bool found = false;
2065 int ret;
2066 bool send_lq;
2067
2068 if (!il_is_ready(il)) {
2069 D_INFO("Not ready yet, not restoring any stations.\n");
2070 return;
2071 }
2072
2073 D_ASSOC("Restoring all known stations ... start.\n");
2074 spin_lock_irqsave(&il->sta_lock, flags_spin);
2075 for (i = 0; i < il->hw_params.max_stations; i++) {
2076 if (ctx->ctxid != il->stations[i].ctxid)
2077 continue;
2078 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
2079 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
2080 D_ASSOC("Restoring sta %pM\n",
2081 il->stations[i].sta.sta.addr);
2082 il->stations[i].sta.mode = 0;
2083 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
2084 found = true;
2085 }
2086 }
2087
2088 for (i = 0; i < il->hw_params.max_stations; i++) {
2089 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
2090 memcpy(&sta_cmd, &il->stations[i].sta,
2091 sizeof(struct il_addsta_cmd));
2092 send_lq = false;
2093 if (il->stations[i].lq) {
2094 memcpy(&lq, il->stations[i].lq,
2095 sizeof(struct il_link_quality_cmd));
2096 send_lq = true;
2097 }
2098 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2099 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
2100 if (ret) {
2101 spin_lock_irqsave(&il->sta_lock, flags_spin);
2102 IL_ERR("Adding station %pM failed.\n",
2103 il->stations[i].sta.sta.addr);
2104 il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
2105 il->stations[i].used &=
2106 ~IL_STA_UCODE_INPROGRESS;
2107 spin_unlock_irqrestore(&il->sta_lock,
2108 flags_spin);
2109 }
2110 /*
2111 * Rate scaling has already been initialized, send
2112 * current LQ command
2113 */
2114 if (send_lq)
2115 il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
2116 spin_lock_irqsave(&il->sta_lock, flags_spin);
2117 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
2118 }
2119 }
2120
2121 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2122 if (!found)
2123 D_INFO("Restoring all known stations"
2124 " .... no stations to be restored.\n");
2125 else
2126 D_INFO("Restoring all known stations" " .... complete.\n");
2127}
2128EXPORT_SYMBOL(il_restore_stations);
2129
2130int
2131il_get_free_ucode_key_idx(struct il_priv *il)
2132{
2133 int i;
2134
2135 for (i = 0; i < il->sta_key_max_num; i++)
2136 if (!test_and_set_bit(i, &il->ucode_key_table))
2137 return i;
2138
2139 return WEP_INVALID_OFFSET;
2140}
2141EXPORT_SYMBOL(il_get_free_ucode_key_idx);
2142
2143void
2144il_dealloc_bcast_stations(struct il_priv *il)
2145{
2146 unsigned long flags;
2147 int i;
2148
2149 spin_lock_irqsave(&il->sta_lock, flags);
2150 for (i = 0; i < il->hw_params.max_stations; i++) {
2151 if (!(il->stations[i].used & IL_STA_BCAST))
2152 continue;
2153
2154 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
2155 il->num_stations--;
2156 BUG_ON(il->num_stations < 0);
2157 kfree(il->stations[i].lq);
2158 il->stations[i].lq = NULL;
2159 }
2160 spin_unlock_irqrestore(&il->sta_lock, flags);
2161}
2162EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
2163
2164#ifdef CONFIG_IWLEGACY_DEBUG
2165static void
2166il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2167{
2168 int i;
2169 D_RATE("lq station id 0x%x\n", lq->sta_id);
2170 D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
2171 lq->general_params.dual_stream_ant_msk);
2172
2173 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
2174 D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
2175}
2176#else
2177static inline void
2178il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
2179{
2180}
2181#endif
2182
2183/**
2184 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
2185 *
2186 * It sometimes happens when a HT rate has been in use and we
2187 * loose connectivity with AP then mac80211 will first tell us that the
2188 * current channel is not HT anymore before removing the station. In such a
2189 * scenario the RXON flags will be updated to indicate we are not
2190 * communicating HT anymore, but the LQ command may still contain HT rates.
2191 * Test for this to prevent driver from sending LQ command between the time
2192 * RXON flags are updated and when LQ command is updated.
2193 */
2194static bool
2195il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
2196 struct il_link_quality_cmd *lq)
2197{
2198 int i;
2199
2200 if (ctx->ht.enabled)
2201 return true;
2202
2203 D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
2204 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2205 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
2206 D_INFO("idx %d of LQ expects HT channel\n", i);
2207 return false;
2208 }
2209 }
2210 return true;
2211}
2212
2213/**
2214 * il_send_lq_cmd() - Send link quality command
2215 * @init: This command is sent as part of station initialization right
2216 * after station has been added.
2217 *
2218 * The link quality command is sent as the last step of station creation.
2219 * This is the special case in which init is set and we call a callback in
2220 * this case to clear the state indicating that station creation is in
2221 * progress.
2222 */
2223int
2224il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2225 struct il_link_quality_cmd *lq, u8 flags, bool init)
2226{
2227 int ret = 0;
2228 unsigned long flags_spin;
2229
2230 struct il_host_cmd cmd = {
2231 .id = C_TX_LINK_QUALITY_CMD,
2232 .len = sizeof(struct il_link_quality_cmd),
2233 .flags = flags,
2234 .data = lq,
2235 };
2236
2237 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
2238 return -EINVAL;
2239
2240 spin_lock_irqsave(&il->sta_lock, flags_spin);
2241 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
2242 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2243 return -EINVAL;
2244 }
2245 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2246
2247 il_dump_lq_cmd(il, lq);
2248 BUG_ON(init && (cmd.flags & CMD_ASYNC));
2249
2250 if (il_is_lq_table_valid(il, ctx, lq))
2251 ret = il_send_cmd(il, &cmd);
2252 else
2253 ret = -EINVAL;
2254
2255 if (cmd.flags & CMD_ASYNC)
2256 return ret;
2257
2258 if (init) {
2259 D_INFO("init LQ command complete,"
2260 " clearing sta addition status for sta %d\n",
2261 lq->sta_id);
2262 spin_lock_irqsave(&il->sta_lock, flags_spin);
2263 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
2264 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
2265 }
2266 return ret;
2267}
2268EXPORT_SYMBOL(il_send_lq_cmd);
2269
2270int
2271il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2272 struct ieee80211_sta *sta)
2273{
2274 struct il_priv *il = hw->priv;
2275 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
2276 int ret;
2277
2278 D_INFO("received request to remove station %pM\n", sta->addr);
2279 mutex_lock(&il->mutex);
2280 D_INFO("proceeding to remove station %pM\n", sta->addr);
2281 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
2282 if (ret)
2283 IL_ERR("Error removing station %pM\n", sta->addr);
2284 mutex_unlock(&il->mutex);
2285 return ret;
2286}
2287EXPORT_SYMBOL(il_mac_sta_remove);
2288
2289/************************** RX-FUNCTIONS ****************************/
2290/*
2291 * Rx theory of operation
2292 *
2293 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2294 * each of which point to Receive Buffers to be filled by the NIC. These get
2295 * used not only for Rx frames, but for any command response or notification
2296 * from the NIC. The driver and NIC manage the Rx buffers by means
2297 * of idxes into the circular buffer.
2298 *
2299 * Rx Queue Indexes
2300 * The host/firmware share two idx registers for managing the Rx buffers.
2301 *
2302 * The READ idx maps to the first position that the firmware may be writing
2303 * to -- the driver can read up to (but not including) this position and get
2304 * good data.
2305 * The READ idx is managed by the firmware once the card is enabled.
2306 *
2307 * The WRITE idx maps to the last position the driver has read from -- the
2308 * position preceding WRITE is the last slot the firmware can place a packet.
2309 *
2310 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
2311 * WRITE = READ.
2312 *
2313 * During initialization, the host sets up the READ queue position to the first
2314 * IDX position, and WRITE to the last (READ - 1 wrapped)
2315 *
2316 * When the firmware places a packet in a buffer, it will advance the READ idx
2317 * and fire the RX interrupt. The driver can then query the READ idx and
2318 * process as many packets as possible, moving the WRITE idx forward as it
2319 * resets the Rx queue buffers with new memory.
2320 *
2321 * The management in the driver is as follows:
2322 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2323 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2324 * to replenish the iwl->rxq->rx_free.
2325 * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
2326 * iwl->rxq is replenished and the READ IDX is updated (updating the
2327 * 'processed' and 'read' driver idxes as well)
2328 * + A received packet is processed and handed to the kernel network stack,
2329 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2330 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2331 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2332 * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
2333 * were enough free buffers and RX_STALLED is set it is cleared.
2334 *
2335 *
2336 * Driver sequence:
2337 *
2338 * il_rx_queue_alloc() Allocates rx_free
2339 * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
2340 * il_rx_queue_restock
2341 * il_rx_queue_restock() Moves available buffers from rx_free into Rx
2342 * queue, updates firmware pointers, and updates
2343 * the WRITE idx. If insufficient rx_free buffers
2344 * are available, schedules il_rx_replenish
2345 *
2346 * -- enable interrupts --
2347 * ISR - il_rx() Detach il_rx_bufs from pool up to the
2348 * READ IDX, detaching the SKB from the pool.
2349 * Moves the packet buffer from queue to rx_used.
2350 * Calls il_rx_queue_restock to refill any empty
2351 * slots.
2352 * ...
2353 *
2354 */
2355
2356/**
2357 * il_rx_queue_space - Return number of free slots available in queue.
2358 */
2359int
2360il_rx_queue_space(const struct il_rx_queue *q)
2361{
2362 int s = q->read - q->write;
2363 if (s <= 0)
2364 s += RX_QUEUE_SIZE;
2365 /* keep some buffer to not confuse full and empty queue */
2366 s -= 2;
2367 if (s < 0)
2368 s = 0;
2369 return s;
2370}
2371EXPORT_SYMBOL(il_rx_queue_space);
2372
2373/**
2374 * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
2375 */
2376void
2377il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
2378{
2379 unsigned long flags;
2380 u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
2381 u32 reg;
2382
2383 spin_lock_irqsave(&q->lock, flags);
2384
2385 if (q->need_update == 0)
2386 goto exit_unlock;
2387
2388 /* If power-saving is in use, make sure device is awake */
2389 if (test_bit(S_POWER_PMI, &il->status)) {
2390 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2391
2392 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2393 D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
2394 reg);
2395 il_set_bit(il, CSR_GP_CNTRL,
2396 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2397 goto exit_unlock;
2398 }
2399
2400 q->write_actual = (q->write & ~0x7);
2401 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2402
2403 /* Else device is assumed to be awake */
2404 } else {
2405 /* Device expects a multiple of 8 */
2406 q->write_actual = (q->write & ~0x7);
2407 il_wr(il, rx_wrt_ptr_reg, q->write_actual);
2408 }
2409
2410 q->need_update = 0;
2411
2412exit_unlock:
2413 spin_unlock_irqrestore(&q->lock, flags);
2414}
2415EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
2416
2417int
2418il_rx_queue_alloc(struct il_priv *il)
2419{
2420 struct il_rx_queue *rxq = &il->rxq;
2421 struct device *dev = &il->pci_dev->dev;
2422 int i;
2423
2424 spin_lock_init(&rxq->lock);
2425 INIT_LIST_HEAD(&rxq->rx_free);
2426 INIT_LIST_HEAD(&rxq->rx_used);
2427
2428 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2429 rxq->bd =
2430 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2431 GFP_KERNEL);
2432 if (!rxq->bd)
2433 goto err_bd;
2434
2435 rxq->rb_stts =
2436 dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2437 &rxq->rb_stts_dma, GFP_KERNEL);
2438 if (!rxq->rb_stts)
2439 goto err_rb;
2440
2441 /* Fill the rx_used queue with _all_ of the Rx buffers */
2442 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
2443 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2444
2445 /* Set us so that we have processed and used all buffers, but have
2446 * not restocked the Rx queue with fresh buffers */
2447 rxq->read = rxq->write = 0;
2448 rxq->write_actual = 0;
2449 rxq->free_count = 0;
2450 rxq->need_update = 0;
2451 return 0;
2452
2453err_rb:
2454 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
2455 rxq->bd_dma);
2456err_bd:
2457 return -ENOMEM;
2458}
2459EXPORT_SYMBOL(il_rx_queue_alloc);
2460
2461void
2462il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
2463{
2464 struct il_rx_pkt *pkt = rxb_addr(rxb);
2465 struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
2466
2467 if (!report->state) {
2468 D_11H("Spectrum Measure Notification: Start\n");
2469 return;
2470 }
2471
2472 memcpy(&il->measure_report, report, sizeof(*report));
2473 il->measurement_status |= MEASUREMENT_READY;
2474}
2475EXPORT_SYMBOL(il_hdl_spectrum_measurement);
2476
2477/*
2478 * returns non-zero if packet should be dropped
2479 */
2480int
2481il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
2482 u32 decrypt_res, struct ieee80211_rx_status *stats)
2483{
2484 u16 fc = le16_to_cpu(hdr->frame_control);
2485
2486 /*
2487 * All contexts have the same setting here due to it being
2488 * a module parameter, so OK to check any context.
2489 */
2490 if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2491 return 0;
2492
2493 if (!(fc & IEEE80211_FCTL_PROTECTED))
2494 return 0;
2495
2496 D_RX("decrypt_res:0x%x\n", decrypt_res);
2497 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2498 case RX_RES_STATUS_SEC_TYPE_TKIP:
2499 /* The uCode has got a bad phase 1 Key, pushes the packet.
2500 * Decryption will be done in SW. */
2501 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2502 RX_RES_STATUS_BAD_KEY_TTAK)
2503 break;
2504
2505 case RX_RES_STATUS_SEC_TYPE_WEP:
2506 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2507 RX_RES_STATUS_BAD_ICV_MIC) {
2508 /* bad ICV, the packet is destroyed since the
2509 * decryption is inplace, drop it */
2510 D_RX("Packet destroyed\n");
2511 return -1;
2512 }
2513 case RX_RES_STATUS_SEC_TYPE_CCMP:
2514 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2515 RX_RES_STATUS_DECRYPT_OK) {
2516 D_RX("hw decrypt successfully!!!\n");
2517 stats->flag |= RX_FLAG_DECRYPTED;
2518 }
2519 break;
2520
2521 default:
2522 break;
2523 }
2524 return 0;
2525}
2526EXPORT_SYMBOL(il_set_decrypted_flag);
2527
2528/**
2529 * il_txq_update_write_ptr - Send new write idx to hardware
2530 */
2531void
2532il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
2533{
2534 u32 reg = 0;
2535 int txq_id = txq->q.id;
2536
2537 if (txq->need_update == 0)
2538 return;
2539
2540 /* if we're trying to save power */
2541 if (test_bit(S_POWER_PMI, &il->status)) {
2542 /* wake up nic if it's powered down ...
2543 * uCode will wake up, and interrupt us again, so next
2544 * time we'll skip this part. */
2545 reg = _il_rd(il, CSR_UCODE_DRV_GP1);
2546
2547 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
2548 D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
2549 txq_id, reg);
2550 il_set_bit(il, CSR_GP_CNTRL,
2551 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2552 return;
2553 }
2554
2555 il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2556
2557 /*
2558 * else not in power-save mode,
2559 * uCode will never sleep when we're
2560 * trying to tx (during RFKILL, we're not trying to tx).
2561 */
2562 } else
2563 _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
2564 txq->need_update = 0;
2565}
2566EXPORT_SYMBOL(il_txq_update_write_ptr);
2567
2568/**
2569 * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
2570 */
2571void
2572il_tx_queue_unmap(struct il_priv *il, int txq_id)
2573{
2574 struct il_tx_queue *txq = &il->txq[txq_id];
2575 struct il_queue *q = &txq->q;
2576
2577 if (q->n_bd == 0)
2578 return;
2579
2580 while (q->write_ptr != q->read_ptr) {
2581 il->cfg->ops->lib->txq_free_tfd(il, txq);
2582 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2583 }
2584}
2585EXPORT_SYMBOL(il_tx_queue_unmap);
2586
2587/**
2588 * il_tx_queue_free - Deallocate DMA queue.
2589 * @txq: Transmit queue to deallocate.
2590 *
2591 * Empty queue by removing and destroying all BD's.
2592 * Free all buffers.
2593 * 0-fill, but do not free "txq" descriptor structure.
2594 */
2595void
2596il_tx_queue_free(struct il_priv *il, int txq_id)
2597{
2598 struct il_tx_queue *txq = &il->txq[txq_id];
2599 struct device *dev = &il->pci_dev->dev;
2600 int i;
2601
2602 il_tx_queue_unmap(il, txq_id);
2603
2604 /* De-alloc array of command/tx buffers */
2605 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
2606 kfree(txq->cmd[i]);
2607
2608 /* De-alloc circular buffer of TFDs */
2609 if (txq->q.n_bd)
2610 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2611 txq->tfds, txq->q.dma_addr);
2612
2613 /* De-alloc array of per-TFD driver data */
2614 kfree(txq->txb);
2615 txq->txb = NULL;
2616
2617 /* deallocate arrays */
2618 kfree(txq->cmd);
2619 kfree(txq->meta);
2620 txq->cmd = NULL;
2621 txq->meta = NULL;
2622
2623 /* 0-fill queue descriptor structure */
2624 memset(txq, 0, sizeof(*txq));
2625}
2626EXPORT_SYMBOL(il_tx_queue_free);
2627
2628/**
2629 * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
2630 */
2631void
2632il_cmd_queue_unmap(struct il_priv *il)
2633{
2634 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2635 struct il_queue *q = &txq->q;
2636 int i;
2637
2638 if (q->n_bd == 0)
2639 return;
2640
2641 while (q->read_ptr != q->write_ptr) {
2642 i = il_get_cmd_idx(q, q->read_ptr, 0);
2643
2644 if (txq->meta[i].flags & CMD_MAPPED) {
2645 pci_unmap_single(il->pci_dev,
2646 dma_unmap_addr(&txq->meta[i], mapping),
2647 dma_unmap_len(&txq->meta[i], len),
2648 PCI_DMA_BIDIRECTIONAL);
2649 txq->meta[i].flags = 0;
2650 }
2651
2652 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
2653 }
2654
2655 i = q->n_win;
2656 if (txq->meta[i].flags & CMD_MAPPED) {
2657 pci_unmap_single(il->pci_dev,
2658 dma_unmap_addr(&txq->meta[i], mapping),
2659 dma_unmap_len(&txq->meta[i], len),
2660 PCI_DMA_BIDIRECTIONAL);
2661 txq->meta[i].flags = 0;
2662 }
2663}
2664EXPORT_SYMBOL(il_cmd_queue_unmap);
2665
2666/**
2667 * il_cmd_queue_free - Deallocate DMA queue.
2668 * @txq: Transmit queue to deallocate.
2669 *
2670 * Empty queue by removing and destroying all BD's.
2671 * Free all buffers.
2672 * 0-fill, but do not free "txq" descriptor structure.
2673 */
2674void
2675il_cmd_queue_free(struct il_priv *il)
2676{
2677 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2678 struct device *dev = &il->pci_dev->dev;
2679 int i;
2680
2681 il_cmd_queue_unmap(il);
2682
2683 /* De-alloc array of command/tx buffers */
2684 for (i = 0; i <= TFD_CMD_SLOTS; i++)
2685 kfree(txq->cmd[i]);
2686
2687 /* De-alloc circular buffer of TFDs */
2688 if (txq->q.n_bd)
2689 dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
2690 txq->tfds, txq->q.dma_addr);
2691
2692 /* deallocate arrays */
2693 kfree(txq->cmd);
2694 kfree(txq->meta);
2695 txq->cmd = NULL;
2696 txq->meta = NULL;
2697
2698 /* 0-fill queue descriptor structure */
2699 memset(txq, 0, sizeof(*txq));
2700}
2701EXPORT_SYMBOL(il_cmd_queue_free);
2702
2703/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
2704 * DMA services
2705 *
2706 * Theory of operation
2707 *
2708 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
2709 * of buffer descriptors, each of which points to one or more data buffers for
2710 * the device to read from or fill. Driver and device exchange status of each
2711 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
2712 * entries in each circular buffer, to protect against confusing empty and full
2713 * queue states.
2714 *
2715 * The device reads or writes the data in the queues via the device's several
2716 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
2717 *
2718 * For Tx queue, there are low mark and high mark limits. If, after queuing
2719 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2720 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2721 * Tx queue resumed.
2722 *
2723 * See more detailed info in 4965.h.
2724 ***************************************************/
2725
2726int
2727il_queue_space(const struct il_queue *q)
2728{
2729 int s = q->read_ptr - q->write_ptr;
2730
2731 if (q->read_ptr > q->write_ptr)
2732 s -= q->n_bd;
2733
2734 if (s <= 0)
2735 s += q->n_win;
2736 /* keep some reserve to not confuse empty and full situations */
2737 s -= 2;
2738 if (s < 0)
2739 s = 0;
2740 return s;
2741}
2742EXPORT_SYMBOL(il_queue_space);
2743
2744
2745/**
2746 * il_queue_init - Initialize queue's high/low-water and read/write idxes
2747 */
2748static int
2749il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
2750 u32 id)
2751{
2752 q->n_bd = count;
2753 q->n_win = slots_num;
2754 q->id = id;
2755
2756 /* count must be power-of-two size, otherwise il_queue_inc_wrap
2757 * and il_queue_dec_wrap are broken. */
2758 BUG_ON(!is_power_of_2(count));
2759
2760 /* slots_num must be power-of-two size, otherwise
2761 * il_get_cmd_idx is broken. */
2762 BUG_ON(!is_power_of_2(slots_num));
2763
2764 q->low_mark = q->n_win / 4;
2765 if (q->low_mark < 4)
2766 q->low_mark = 4;
2767
2768 q->high_mark = q->n_win / 8;
2769 if (q->high_mark < 2)
2770 q->high_mark = 2;
2771
2772 q->write_ptr = q->read_ptr = 0;
2773
2774 return 0;
2775}
2776
2777/**
2778 * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
2779 */
2780static int
2781il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2782{
2783 struct device *dev = &il->pci_dev->dev;
2784 size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
2785
2786 /* Driver ilate data, only for Tx (not command) queues,
2787 * not shared with device. */
2788 if (id != il->cmd_queue) {
2789 txq->txb =
2790 kzalloc(sizeof(txq->txb[0]) * TFD_QUEUE_SIZE_MAX,
2791 GFP_KERNEL);
2792 if (!txq->txb) {
2793 IL_ERR("kmalloc for auxiliary BD "
2794 "structures failed\n");
2795 goto error;
2796 }
2797 } else {
2798 txq->txb = NULL;
2799 }
2800
2801 /* Circular buffer of transmit frame descriptors (TFDs),
2802 * shared with device */
2803 txq->tfds =
2804 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2805 if (!txq->tfds) {
2806 IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
2807 goto error;
2808 }
2809 txq->q.id = id;
2810
2811 return 0;
2812
2813error:
2814 kfree(txq->txb);
2815 txq->txb = NULL;
2816
2817 return -ENOMEM;
2818}
2819
2820/**
2821 * il_tx_queue_init - Allocate and initialize one tx/cmd queue
2822 */
2823int
2824il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2825 u32 txq_id)
2826{
2827 int i, len;
2828 int ret;
2829 int actual_slots = slots_num;
2830
2831 /*
2832 * Alloc buffer array for commands (Tx or other types of commands).
2833 * For the command queue (#4/#9), allocate command space + one big
2834 * command for scan, since scan command is very huge; the system will
2835 * not have two scans at the same time, so only one is needed.
2836 * For normal Tx queues (all other queues), no super-size command
2837 * space is needed.
2838 */
2839 if (txq_id == il->cmd_queue)
2840 actual_slots++;
2841
2842 txq->meta =
2843 kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
2844 txq->cmd =
2845 kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
2846
2847 if (!txq->meta || !txq->cmd)
2848 goto out_free_arrays;
2849
2850 len = sizeof(struct il_device_cmd);
2851 for (i = 0; i < actual_slots; i++) {
2852 /* only happens for cmd queue */
2853 if (i == slots_num)
2854 len = IL_MAX_CMD_SIZE;
2855
2856 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
2857 if (!txq->cmd[i])
2858 goto err;
2859 }
2860
2861 /* Alloc driver data array and TFD circular buffer */
2862 ret = il_tx_queue_alloc(il, txq, txq_id);
2863 if (ret)
2864 goto err;
2865
2866 txq->need_update = 0;
2867
2868 /*
2869 * For the default queues 0-3, set up the swq_id
2870 * already -- all others need to get one later
2871 * (if they need one at all).
2872 */
2873 if (txq_id < 4)
2874 il_set_swq_id(txq, txq_id, txq_id);
2875
2876 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
2877 * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
2878 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
2879
2880 /* Initialize queue's high/low-water marks, and head/tail idxes */
2881 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2882
2883 /* Tell device where to find queue */
2884 il->cfg->ops->lib->txq_init(il, txq);
2885
2886 return 0;
2887err:
2888 for (i = 0; i < actual_slots; i++)
2889 kfree(txq->cmd[i]);
2890out_free_arrays:
2891 kfree(txq->meta);
2892 kfree(txq->cmd);
2893
2894 return -ENOMEM;
2895}
2896EXPORT_SYMBOL(il_tx_queue_init);
2897
2898void
2899il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
2900 u32 txq_id)
2901{
2902 int actual_slots = slots_num;
2903
2904 if (txq_id == il->cmd_queue)
2905 actual_slots++;
2906
2907 memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
2908
2909 txq->need_update = 0;
2910
2911 /* Initialize queue's high/low-water marks, and head/tail idxes */
2912 il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
2913
2914 /* Tell device where to find queue */
2915 il->cfg->ops->lib->txq_init(il, txq);
2916}
2917EXPORT_SYMBOL(il_tx_queue_reset);
2918
2919/*************** HOST COMMAND QUEUE FUNCTIONS *****/
2920
2921/**
2922 * il_enqueue_hcmd - enqueue a uCode command
2923 * @il: device ilate data point
2924 * @cmd: a point to the ucode command structure
2925 *
2926 * The function returns < 0 values to indicate the operation is
2927 * failed. On success, it turns the idx (> 0) of command in the
2928 * command queue.
2929 */
2930int
2931il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
2932{
2933 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
2934 struct il_queue *q = &txq->q;
2935 struct il_device_cmd *out_cmd;
2936 struct il_cmd_meta *out_meta;
2937 dma_addr_t phys_addr;
2938 unsigned long flags;
2939 int len;
2940 u32 idx;
2941 u16 fix_size;
2942
2943 cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
2944 fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
2945
2946 /* If any of the command structures end up being larger than
2947 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
2948 * we will need to increase the size of the TFD entries
2949 * Also, check to see if command buffer should not exceed the size
2950 * of device_cmd and max_cmd_size. */
2951 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
2952 !(cmd->flags & CMD_SIZE_HUGE));
2953 BUG_ON(fix_size > IL_MAX_CMD_SIZE);
2954
2955 if (il_is_rfkill(il) || il_is_ctkill(il)) {
2956 IL_WARN("Not sending command - %s KILL\n",
2957 il_is_rfkill(il) ? "RF" : "CT");
2958 return -EIO;
2959 }
2960
2961 spin_lock_irqsave(&il->hcmd_lock, flags);
2962
2963 if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
2964 spin_unlock_irqrestore(&il->hcmd_lock, flags);
2965
2966 IL_ERR("Restarting adapter due to command queue full\n");
2967 queue_work(il->workqueue, &il->restart);
2968 return -ENOSPC;
2969 }
2970
2971 idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
2972 out_cmd = txq->cmd[idx];
2973 out_meta = &txq->meta[idx];
2974
2975 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
2976 spin_unlock_irqrestore(&il->hcmd_lock, flags);
2977 return -ENOSPC;
2978 }
2979
2980 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
2981 out_meta->flags = cmd->flags | CMD_MAPPED;
2982 if (cmd->flags & CMD_WANT_SKB)
2983 out_meta->source = cmd;
2984 if (cmd->flags & CMD_ASYNC)
2985 out_meta->callback = cmd->callback;
2986
2987 out_cmd->hdr.cmd = cmd->id;
2988 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
2989
2990 /* At this point, the out_cmd now has all of the incoming cmd
2991 * information */
2992
2993 out_cmd->hdr.flags = 0;
2994 out_cmd->hdr.sequence =
2995 cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
2996 if (cmd->flags & CMD_SIZE_HUGE)
2997 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
2998 len = sizeof(struct il_device_cmd);
2999 if (idx == TFD_CMD_SLOTS)
3000 len = IL_MAX_CMD_SIZE;
3001
3002#ifdef CONFIG_IWLEGACY_DEBUG
3003 switch (out_cmd->hdr.cmd) {
3004 case C_TX_LINK_QUALITY_CMD:
3005 case C_SENSITIVITY:
3006 D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
3007 "%d bytes at %d[%d]:%d\n",
3008 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3009 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
3010 q->write_ptr, idx, il->cmd_queue);
3011 break;
3012 default:
3013 D_HC("Sending command %s (#%x), seq: 0x%04X, "
3014 "%d bytes at %d[%d]:%d\n",
3015 il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
3016 le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
3017 idx, il->cmd_queue);
3018 }
3019#endif
3020 txq->need_update = 1;
3021
3022 if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
3023 /* Set up entry in queue's byte count circular buffer */
3024 il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
3025
3026 phys_addr =
3027 pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
3028 PCI_DMA_BIDIRECTIONAL);
3029 dma_unmap_addr_set(out_meta, mapping, phys_addr);
3030 dma_unmap_len_set(out_meta, len, fix_size);
3031
3032 il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
3033 1, U32_PAD(cmd->len));
3034
3035 /* Increment and update queue's write idx */
3036 q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
3037 il_txq_update_write_ptr(il, txq);
3038
3039 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3040 return idx;
3041}
3042
3043/**
3044 * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
3045 *
3046 * When FW advances 'R' idx, all entries between old and new 'R' idx
3047 * need to be reclaimed. As result, some free space forms. If there is
3048 * enough free space (> low mark), wake the stack that feeds us.
3049 */
3050static void
3051il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
3052{
3053 struct il_tx_queue *txq = &il->txq[txq_id];
3054 struct il_queue *q = &txq->q;
3055 int nfreed = 0;
3056
3057 if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
3058 IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
3059 "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
3060 q->write_ptr, q->read_ptr);
3061 return;
3062 }
3063
3064 for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
3065 q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3066
3067 if (nfreed++ > 0) {
3068 IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
3069 q->write_ptr, q->read_ptr);
3070 queue_work(il->workqueue, &il->restart);
3071 }
3072
3073 }
3074}
3075
3076/**
3077 * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3078 * @rxb: Rx buffer to reclaim
3079 *
3080 * If an Rx buffer has an async callback associated with it the callback
3081 * will be executed. The attached skb (if present) will only be freed
3082 * if the callback returns 1
3083 */
3084void
3085il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
3086{
3087 struct il_rx_pkt *pkt = rxb_addr(rxb);
3088 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3089 int txq_id = SEQ_TO_QUEUE(sequence);
3090 int idx = SEQ_TO_IDX(sequence);
3091 int cmd_idx;
3092 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
3093 struct il_device_cmd *cmd;
3094 struct il_cmd_meta *meta;
3095 struct il_tx_queue *txq = &il->txq[il->cmd_queue];
3096 unsigned long flags;
3097
3098 /* If a Tx command is being handled and it isn't in the actual
3099 * command queue then there a command routing bug has been introduced
3100 * in the queue management code. */
3101 if (WARN
3102 (txq_id != il->cmd_queue,
3103 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
3104 txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
3105 il->txq[il->cmd_queue].q.write_ptr)) {
3106 il_print_hex_error(il, pkt, 32);
3107 return;
3108 }
3109
3110 cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
3111 cmd = txq->cmd[cmd_idx];
3112 meta = &txq->meta[cmd_idx];
3113
3114 txq->time_stamp = jiffies;
3115
3116 pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
3117 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
3118
3119 /* Input error checking is done when commands are added to queue. */
3120 if (meta->flags & CMD_WANT_SKB) {
3121 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
3122 rxb->page = NULL;
3123 } else if (meta->callback)
3124 meta->callback(il, cmd, pkt);
3125
3126 spin_lock_irqsave(&il->hcmd_lock, flags);
3127
3128 il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
3129
3130 if (!(meta->flags & CMD_ASYNC)) {
3131 clear_bit(S_HCMD_ACTIVE, &il->status);
3132 D_INFO("Clearing HCMD_ACTIVE for command %s\n",
3133 il_get_cmd_string(cmd->hdr.cmd));
3134 wake_up(&il->wait_command_queue);
3135 }
3136
3137 /* Mark as unmapped */
3138 meta->flags = 0;
3139
3140 spin_unlock_irqrestore(&il->hcmd_lock, flags);
3141}
3142EXPORT_SYMBOL(il_tx_cmd_complete);
3143
3144MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
3145MODULE_VERSION(IWLWIFI_VERSION);
3146MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
3147MODULE_LICENSE("GPL");
3148
3149/*
3150 * set bt_coex_active to true, uCode will do kill/defer
3151 * every time the priority line is asserted (BT is sending signals on the
3152 * priority line in the PCIx).
3153 * set bt_coex_active to false, uCode will ignore the BT activity and
3154 * perform the normal operation
3155 *
3156 * User might experience transmit issue on some platform due to WiFi/BT
3157 * co-exist problem. The possible behaviors are:
3158 * Able to scan and finding all the available AP
3159 * Not able to associate with any AP
3160 * On those platforms, WiFi communication can be restored by set
3161 * "bt_coex_active" module parameter to "false"
3162 *
3163 * default: bt_coex_active = true (BT_COEX_ENABLE)
3164 */
3165static bool bt_coex_active = true;
3166module_param(bt_coex_active, bool, S_IRUGO);
3167MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
3168
3169u32 il_debug_level;
3170EXPORT_SYMBOL(il_debug_level);
3171
3172const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3173EXPORT_SYMBOL(il_bcast_addr);
3174
3175/* This function both allocates and initializes hw and il. */
3176struct ieee80211_hw *
3177il_alloc_all(struct il_cfg *cfg)
3178{
3179 struct il_priv *il;
3180 /* mac80211 allocates memory for this device instance, including
3181 * space for this driver's ilate structure */
3182 struct ieee80211_hw *hw;
3183
3184 hw = ieee80211_alloc_hw(sizeof(struct il_priv),
3185 cfg->ops->ieee80211_ops);
3186 if (hw == NULL) {
3187 pr_err("%s: Can not allocate network device\n", cfg->name);
3188 goto out;
3189 }
3190
3191 il = hw->priv;
3192 il->hw = hw;
3193
3194out:
3195 return hw;
3196}
3197EXPORT_SYMBOL(il_alloc_all);
3198
3199#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
3200#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
3201static void
3202il_init_ht_hw_capab(const struct il_priv *il,
3203 struct ieee80211_sta_ht_cap *ht_info,
3204 enum ieee80211_band band)
3205{
3206 u16 max_bit_rate = 0;
3207 u8 rx_chains_num = il->hw_params.rx_chains_num;
3208 u8 tx_chains_num = il->hw_params.tx_chains_num;
3209
3210 ht_info->cap = 0;
3211 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
3212
3213 ht_info->ht_supported = true;
3214
3215 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
3216 max_bit_rate = MAX_BIT_RATE_20_MHZ;
3217 if (il->hw_params.ht40_channel & BIT(band)) {
3218 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
3219 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
3220 ht_info->mcs.rx_mask[4] = 0x01;
3221 max_bit_rate = MAX_BIT_RATE_40_MHZ;
3222 }
3223
3224 if (il->cfg->mod_params->amsdu_size_8K)
3225 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
3226
3227 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3228 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
3229
3230 ht_info->mcs.rx_mask[0] = 0xFF;
3231 if (rx_chains_num >= 2)
3232 ht_info->mcs.rx_mask[1] = 0xFF;
3233 if (rx_chains_num >= 3)
3234 ht_info->mcs.rx_mask[2] = 0xFF;
3235
3236 /* Highest supported Rx data rate */
3237 max_bit_rate *= rx_chains_num;
3238 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
3239 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
3240
3241 /* Tx MCS capabilities */
3242 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
3243 if (tx_chains_num != rx_chains_num) {
3244 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
3245 ht_info->mcs.tx_params |=
3246 ((tx_chains_num -
3247 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3248 }
3249}
3250
3251/**
3252 * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
3253 */
3254int
3255il_init_geos(struct il_priv *il)
3256{
3257 struct il_channel_info *ch;
3258 struct ieee80211_supported_band *sband;
3259 struct ieee80211_channel *channels;
3260 struct ieee80211_channel *geo_ch;
3261 struct ieee80211_rate *rates;
3262 int i = 0;
3263 s8 max_tx_power = 0;
3264
3265 if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
3266 il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
3267 D_INFO("Geography modes already initialized.\n");
3268 set_bit(S_GEO_CONFIGURED, &il->status);
3269 return 0;
3270 }
3271
3272 channels =
3273 kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
3274 GFP_KERNEL);
3275 if (!channels)
3276 return -ENOMEM;
3277
3278 rates =
3279 kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
3280 GFP_KERNEL);
3281 if (!rates) {
3282 kfree(channels);
3283 return -ENOMEM;
3284 }
3285
3286 /* 5.2GHz channels start after the 2.4GHz channels */
3287 sband = &il->bands[IEEE80211_BAND_5GHZ];
3288 sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
3289 /* just OFDM */
3290 sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
3291 sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
3292
3293 if (il->cfg->sku & IL_SKU_N)
3294 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
3295
3296 sband = &il->bands[IEEE80211_BAND_2GHZ];
3297 sband->channels = channels;
3298 /* OFDM & CCK */
3299 sband->bitrates = rates;
3300 sband->n_bitrates = RATE_COUNT_LEGACY;
3301
3302 if (il->cfg->sku & IL_SKU_N)
3303 il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
3304
3305 il->ieee_channels = channels;
3306 il->ieee_rates = rates;
3307
3308 for (i = 0; i < il->channel_count; i++) {
3309 ch = &il->channel_info[i];
3310
3311 if (!il_is_channel_valid(ch))
3312 continue;
3313
3314 sband = &il->bands[ch->band];
3315
3316 geo_ch = &sband->channels[sband->n_channels++];
3317
3318 geo_ch->center_freq =
3319 ieee80211_channel_to_frequency(ch->channel, ch->band);
3320 geo_ch->max_power = ch->max_power_avg;
3321 geo_ch->max_antenna_gain = 0xff;
3322 geo_ch->hw_value = ch->channel;
3323
3324 if (il_is_channel_valid(ch)) {
3325 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
3326 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
3327
3328 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
3329 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
3330
3331 if (ch->flags & EEPROM_CHANNEL_RADAR)
3332 geo_ch->flags |= IEEE80211_CHAN_RADAR;
3333
3334 geo_ch->flags |= ch->ht40_extension_channel;
3335
3336 if (ch->max_power_avg > max_tx_power)
3337 max_tx_power = ch->max_power_avg;
3338 } else {
3339 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
3340 }
3341
3342 D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
3343 geo_ch->center_freq,
3344 il_is_channel_a_band(ch) ? "5.2" : "2.4",
3345 geo_ch->
3346 flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
3347 geo_ch->flags);
3348 }
3349
3350 il->tx_power_device_lmt = max_tx_power;
3351 il->tx_power_user_lmt = max_tx_power;
3352 il->tx_power_next = max_tx_power;
3353
3354 if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
3355 (il->cfg->sku & IL_SKU_A)) {
3356 IL_INFO("Incorrectly detected BG card as ABG. "
3357 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
3358 il->pci_dev->device, il->pci_dev->subsystem_device);
3359 il->cfg->sku &= ~IL_SKU_A;
3360 }
3361
3362 IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
3363 il->bands[IEEE80211_BAND_2GHZ].n_channels,
3364 il->bands[IEEE80211_BAND_5GHZ].n_channels);
3365
3366 set_bit(S_GEO_CONFIGURED, &il->status);
3367
3368 return 0;
3369}
3370EXPORT_SYMBOL(il_init_geos);
3371
3372/*
3373 * il_free_geos - undo allocations in il_init_geos
3374 */
3375void
3376il_free_geos(struct il_priv *il)
3377{
3378 kfree(il->ieee_channels);
3379 kfree(il->ieee_rates);
3380 clear_bit(S_GEO_CONFIGURED, &il->status);
3381}
3382EXPORT_SYMBOL(il_free_geos);
3383
3384static bool
3385il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
3386 u16 channel, u8 extension_chan_offset)
3387{
3388 const struct il_channel_info *ch_info;
3389
3390 ch_info = il_get_channel_info(il, band, channel);
3391 if (!il_is_channel_valid(ch_info))
3392 return false;
3393
3394 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
3395 return !(ch_info->
3396 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
3397 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
3398 return !(ch_info->
3399 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
3400
3401 return false;
3402}
3403
3404bool
3405il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
3406 struct ieee80211_sta_ht_cap *ht_cap)
3407{
3408 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
3409 return false;
3410
3411 /*
3412 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
3413 * the bit will not set if it is pure 40MHz case
3414 */
3415 if (ht_cap && !ht_cap->ht_supported)
3416 return false;
3417
3418#ifdef CONFIG_IWLEGACY_DEBUGFS
3419 if (il->disable_ht40)
3420 return false;
3421#endif
3422
3423 return il_is_channel_extension(il, il->band,
3424 le16_to_cpu(ctx->staging.channel),
3425 ctx->ht.extension_chan_offset);
3426}
3427EXPORT_SYMBOL(il_is_ht40_tx_allowed);
3428
3429static u16
3430il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
3431{
3432 u16 new_val;
3433 u16 beacon_factor;
3434
3435 /*
3436 * If mac80211 hasn't given us a beacon interval, program
3437 * the default into the device.
3438 */
3439 if (!beacon_val)
3440 return DEFAULT_BEACON_INTERVAL;
3441
3442 /*
3443 * If the beacon interval we obtained from the peer
3444 * is too large, we'll have to wake up more often
3445 * (and in IBSS case, we'll beacon too much)
3446 *
3447 * For example, if max_beacon_val is 4096, and the
3448 * requested beacon interval is 7000, we'll have to
3449 * use 3500 to be able to wake up on the beacons.
3450 *
3451 * This could badly influence beacon detection stats.
3452 */
3453
3454 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
3455 new_val = beacon_val / beacon_factor;
3456
3457 if (!new_val)
3458 new_val = max_beacon_val;
3459
3460 return new_val;
3461}
3462
3463int
3464il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
3465{
3466 u64 tsf;
3467 s32 interval_tm, rem;
3468 struct ieee80211_conf *conf = NULL;
3469 u16 beacon_int;
3470 struct ieee80211_vif *vif = ctx->vif;
3471
3472 conf = &il->hw->conf;
3473
3474 lockdep_assert_held(&il->mutex);
3475
3476 memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
3477
3478 ctx->timing.timestamp = cpu_to_le64(il->timestamp);
3479 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
3480
3481 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
3482
3483 /*
3484 * TODO: For IBSS we need to get atim_win from mac80211,
3485 * for now just always use 0
3486 */
3487 ctx->timing.atim_win = 0;
3488
3489 beacon_int =
3490 il_adjust_beacon_interval(beacon_int,
3491 il->hw_params.max_beacon_itrvl *
3492 TIME_UNIT);
3493 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
3494
3495 tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
3496 interval_tm = beacon_int * TIME_UNIT;
3497 rem = do_div(tsf, interval_tm);
3498 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
3499
3500 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
3501
3502 D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
3503 le16_to_cpu(ctx->timing.beacon_interval),
3504 le32_to_cpu(ctx->timing.beacon_init_val),
3505 le16_to_cpu(ctx->timing.atim_win));
3506
3507 return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
3508 &ctx->timing);
3509}
3510EXPORT_SYMBOL(il_send_rxon_timing);
3511
3512void
3513il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
3514 int hw_decrypt)
3515{
3516 struct il_rxon_cmd *rxon = &ctx->staging;
3517
3518 if (hw_decrypt)
3519 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
3520 else
3521 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
3522
3523}
3524EXPORT_SYMBOL(il_set_rxon_hwcrypto);
3525
3526/* validate RXON structure is valid */
3527int
3528il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3529{
3530 struct il_rxon_cmd *rxon = &ctx->staging;
3531 bool error = false;
3532
3533 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
3534 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
3535 IL_WARN("check 2.4G: wrong narrow\n");
3536 error = true;
3537 }
3538 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
3539 IL_WARN("check 2.4G: wrong radar\n");
3540 error = true;
3541 }
3542 } else {
3543 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
3544 IL_WARN("check 5.2G: not short slot!\n");
3545 error = true;
3546 }
3547 if (rxon->flags & RXON_FLG_CCK_MSK) {
3548 IL_WARN("check 5.2G: CCK!\n");
3549 error = true;
3550 }
3551 }
3552 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
3553 IL_WARN("mac/bssid mcast!\n");
3554 error = true;
3555 }
3556
3557 /* make sure basic rates 6Mbps and 1Mbps are supported */
3558 if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
3559 (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
3560 IL_WARN("neither 1 nor 6 are basic\n");
3561 error = true;
3562 }
3563
3564 if (le16_to_cpu(rxon->assoc_id) > 2007) {
3565 IL_WARN("aid > 2007\n");
3566 error = true;
3567 }
3568
3569 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
3570 (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
3571 IL_WARN("CCK and short slot\n");
3572 error = true;
3573 }
3574
3575 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
3576 (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
3577 IL_WARN("CCK and auto detect");
3578 error = true;
3579 }
3580
3581 if ((rxon->
3582 flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
3583 RXON_FLG_TGG_PROTECT_MSK) {
3584 IL_WARN("TGg but no auto-detect\n");
3585 error = true;
3586 }
3587
3588 if (error)
3589 IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
3590
3591 if (error) {
3592 IL_ERR("Invalid RXON\n");
3593 return -EINVAL;
3594 }
3595 return 0;
3596}
3597EXPORT_SYMBOL(il_check_rxon_cmd);
3598
3599/**
3600 * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
3601 * @il: staging_rxon is compared to active_rxon
3602 *
3603 * If the RXON structure is changing enough to require a new tune,
3604 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
3605 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
3606 */
3607int
3608il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
3609{
3610 const struct il_rxon_cmd *staging = &ctx->staging;
3611 const struct il_rxon_cmd *active = &ctx->active;
3612
3613#define CHK(cond) \
3614 if ((cond)) { \
3615 D_INFO("need full RXON - " #cond "\n"); \
3616 return 1; \
3617 }
3618
3619#define CHK_NEQ(c1, c2) \
3620 if ((c1) != (c2)) { \
3621 D_INFO("need full RXON - " \
3622 #c1 " != " #c2 " - %d != %d\n", \
3623 (c1), (c2)); \
3624 return 1; \
3625 }
3626
3627 /* These items are only settable from the full RXON command */
3628 CHK(!il_is_associated_ctx(ctx));
3629 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
3630 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
3631 CHK(compare_ether_addr
3632 (staging->wlap_bssid_addr, active->wlap_bssid_addr));
3633 CHK_NEQ(staging->dev_type, active->dev_type);
3634 CHK_NEQ(staging->channel, active->channel);
3635 CHK_NEQ(staging->air_propagation, active->air_propagation);
3636 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
3637 active->ofdm_ht_single_stream_basic_rates);
3638 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
3639 active->ofdm_ht_dual_stream_basic_rates);
3640 CHK_NEQ(staging->assoc_id, active->assoc_id);
3641
3642 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
3643 * be updated with the RXON_ASSOC command -- however only some
3644 * flag transitions are allowed using RXON_ASSOC */
3645
3646 /* Check if we are not switching bands */
3647 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
3648 active->flags & RXON_FLG_BAND_24G_MSK);
3649
3650 /* Check if we are switching association toggle */
3651 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
3652 active->filter_flags & RXON_FILTER_ASSOC_MSK);
3653
3654#undef CHK
3655#undef CHK_NEQ
3656
3657 return 0;
3658}
3659EXPORT_SYMBOL(il_full_rxon_required);
3660
3661u8
3662il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
3663{
3664 /*
3665 * Assign the lowest rate -- should really get this from
3666 * the beacon skb from mac80211.
3667 */
3668 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
3669 return RATE_1M_PLCP;
3670 else
3671 return RATE_6M_PLCP;
3672}
3673EXPORT_SYMBOL(il_get_lowest_plcp);
3674
3675static void
3676_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
3677 struct il_rxon_context *ctx)
3678{
3679 struct il_rxon_cmd *rxon = &ctx->staging;
3680
3681 if (!ctx->ht.enabled) {
3682 rxon->flags &=
3683 ~(RXON_FLG_CHANNEL_MODE_MSK |
3684 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
3685 | RXON_FLG_HT_PROT_MSK);
3686 return;
3687 }
3688
3689 rxon->flags |=
3690 cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
3691
3692 /* Set up channel bandwidth:
3693 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
3694 /* clear the HT channel mode before set the mode */
3695 rxon->flags &=
3696 ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3697 if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
3698 /* pure ht40 */
3699 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
3700 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
3701 /* Note: control channel is opposite of extension channel */
3702 switch (ctx->ht.extension_chan_offset) {
3703 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3704 rxon->flags &=
3705 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3706 break;
3707 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3708 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3709 break;
3710 }
3711 } else {
3712 /* Note: control channel is opposite of extension channel */
3713 switch (ctx->ht.extension_chan_offset) {
3714 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3715 rxon->flags &=
3716 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
3717 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3718 break;
3719 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3720 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
3721 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
3722 break;
3723 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
3724 default:
3725 /* channel location only valid if in Mixed mode */
3726 IL_ERR("invalid extension channel offset\n");
3727 break;
3728 }
3729 }
3730 } else {
3731 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
3732 }
3733
3734 if (il->cfg->ops->hcmd->set_rxon_chain)
3735 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
3736
3737 D_ASSOC("rxon flags 0x%X operation mode :0x%X "
3738 "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
3739 ctx->ht.protection, ctx->ht.extension_chan_offset);
3740}
3741
3742void
3743il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
3744{
3745 _il_set_rxon_ht(il, ht_conf, &il->ctx);
3746}
3747EXPORT_SYMBOL(il_set_rxon_ht);
3748
3749/* Return valid, unused, channel for a passive scan to reset the RF */
3750u8
3751il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
3752{
3753 const struct il_channel_info *ch_info;
3754 int i;
3755 u8 channel = 0;
3756 u8 min, max;
3757
3758 if (band == IEEE80211_BAND_5GHZ) {
3759 min = 14;
3760 max = il->channel_count;
3761 } else {
3762 min = 0;
3763 max = 14;
3764 }
3765
3766 for (i = min; i < max; i++) {
3767 channel = il->channel_info[i].channel;
3768 if (channel == le16_to_cpu(il->ctx.staging.channel))
3769 continue;
3770
3771 ch_info = il_get_channel_info(il, band, channel);
3772 if (il_is_channel_valid(ch_info))
3773 break;
3774 }
3775
3776 return channel;
3777}
3778EXPORT_SYMBOL(il_get_single_channel_number);
3779
3780/**
3781 * il_set_rxon_channel - Set the band and channel values in staging RXON
3782 * @ch: requested channel as a pointer to struct ieee80211_channel
3783
3784 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
3785 * in the staging RXON flag structure based on the ch->band
3786 */
3787int
3788il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
3789 struct il_rxon_context *ctx)
3790{
3791 enum ieee80211_band band = ch->band;
3792 u16 channel = ch->hw_value;
3793
3794 if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
3795 return 0;
3796
3797 ctx->staging.channel = cpu_to_le16(channel);
3798 if (band == IEEE80211_BAND_5GHZ)
3799 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
3800 else
3801 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3802
3803 il->band = band;
3804
3805 D_INFO("Staging channel set to %d [%d]\n", channel, band);
3806
3807 return 0;
3808}
3809EXPORT_SYMBOL(il_set_rxon_channel);
3810
3811void
3812il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
3813 enum ieee80211_band band, struct ieee80211_vif *vif)
3814{
3815 if (band == IEEE80211_BAND_5GHZ) {
3816 ctx->staging.flags &=
3817 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
3818 RXON_FLG_CCK_MSK);
3819 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3820 } else {
3821 /* Copied from il_post_associate() */
3822 if (vif && vif->bss_conf.use_short_slot)
3823 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3824 else
3825 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3826
3827 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
3828 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
3829 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
3830 }
3831}
3832EXPORT_SYMBOL(il_set_flags_for_band);
3833
3834/*
3835 * initialize rxon structure with default values from eeprom
3836 */
3837void
3838il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
3839{
3840 const struct il_channel_info *ch_info;
3841
3842 memset(&ctx->staging, 0, sizeof(ctx->staging));
3843
3844 if (!ctx->vif) {
3845 ctx->staging.dev_type = ctx->unused_devtype;
3846 } else
3847 switch (ctx->vif->type) {
3848
3849 case NL80211_IFTYPE_STATION:
3850 ctx->staging.dev_type = ctx->station_devtype;
3851 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
3852 break;
3853
3854 case NL80211_IFTYPE_ADHOC:
3855 ctx->staging.dev_type = ctx->ibss_devtype;
3856 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
3857 ctx->staging.filter_flags =
3858 RXON_FILTER_BCON_AWARE_MSK |
3859 RXON_FILTER_ACCEPT_GRP_MSK;
3860 break;
3861
3862 default:
3863 IL_ERR("Unsupported interface type %d\n",
3864 ctx->vif->type);
3865 break;
3866 }
3867
3868#if 0
3869 /* TODO: Figure out when short_preamble would be set and cache from
3870 * that */
3871 if (!hw_to_local(il->hw)->short_preamble)
3872 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3873 else
3874 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3875#endif
3876
3877 ch_info =
3878 il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
3879
3880 if (!ch_info)
3881 ch_info = &il->channel_info[0];
3882
3883 ctx->staging.channel = cpu_to_le16(ch_info->channel);
3884 il->band = ch_info->band;
3885
3886 il_set_flags_for_band(il, ctx, il->band, ctx->vif);
3887
3888 ctx->staging.ofdm_basic_rates =
3889 (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3890 ctx->staging.cck_basic_rates =
3891 (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3892
3893 /* clear both MIX and PURE40 mode flag */
3894 ctx->staging.flags &=
3895 ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
3896 if (ctx->vif)
3897 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
3898
3899 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
3900 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
3901}
3902EXPORT_SYMBOL(il_connection_init_rx_config);
3903
3904void
3905il_set_rate(struct il_priv *il)
3906{
3907 const struct ieee80211_supported_band *hw = NULL;
3908 struct ieee80211_rate *rate;
3909 int i;
3910
3911 hw = il_get_hw_mode(il, il->band);
3912 if (!hw) {
3913 IL_ERR("Failed to set rate: unable to get hw mode\n");
3914 return;
3915 }
3916
3917 il->active_rate = 0;
3918
3919 for (i = 0; i < hw->n_bitrates; i++) {
3920 rate = &(hw->bitrates[i]);
3921 if (rate->hw_value < RATE_COUNT_LEGACY)
3922 il->active_rate |= (1 << rate->hw_value);
3923 }
3924
3925 D_RATE("Set active_rate = %0x\n", il->active_rate);
3926
3927 il->ctx.staging.cck_basic_rates =
3928 (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
3929
3930 il->ctx.staging.ofdm_basic_rates =
3931 (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
3932}
3933EXPORT_SYMBOL(il_set_rate);
3934
3935void
3936il_chswitch_done(struct il_priv *il, bool is_success)
3937{
3938 struct il_rxon_context *ctx = &il->ctx;
3939
3940 if (test_bit(S_EXIT_PENDING, &il->status))
3941 return;
3942
3943 if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
3944 ieee80211_chswitch_done(ctx->vif, is_success);
3945}
3946EXPORT_SYMBOL(il_chswitch_done);
3947
3948void
3949il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
3950{
3951 struct il_rx_pkt *pkt = rxb_addr(rxb);
3952 struct il_csa_notification *csa = &(pkt->u.csa_notif);
3953
3954 struct il_rxon_context *ctx = &il->ctx;
3955 struct il_rxon_cmd *rxon = (void *)&ctx->active;
3956
3957 if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
3958 return;
3959
3960 if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
3961 rxon->channel = csa->channel;
3962 ctx->staging.channel = csa->channel;
3963 D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
3964 il_chswitch_done(il, true);
3965 } else {
3966 IL_ERR("CSA notif (fail) : channel %d\n",
3967 le16_to_cpu(csa->channel));
3968 il_chswitch_done(il, false);
3969 }
3970}
3971EXPORT_SYMBOL(il_hdl_csa);
3972
3973#ifdef CONFIG_IWLEGACY_DEBUG
3974void
3975il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
3976{
3977 struct il_rxon_cmd *rxon = &ctx->staging;
3978
3979 D_RADIO("RX CONFIG:\n");
3980 il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
3981 D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
3982 D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
3983 D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
3984 D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
3985 D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
3986 D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
3987 D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
3988 D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
3989 D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
3990}
3991EXPORT_SYMBOL(il_print_rx_config_cmd);
3992#endif
3993/**
3994 * il_irq_handle_error - called for HW or SW error interrupt from card
3995 */
3996void
3997il_irq_handle_error(struct il_priv *il)
3998{
3999 /* Set the FW error flag -- cleared on il_down */
4000 set_bit(S_FW_ERROR, &il->status);
4001
4002 /* Cancel currently queued command. */
4003 clear_bit(S_HCMD_ACTIVE, &il->status);
4004
4005 IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
4006
4007 il->cfg->ops->lib->dump_nic_error_log(il);
4008 if (il->cfg->ops->lib->dump_fh)
4009 il->cfg->ops->lib->dump_fh(il, NULL, false);
4010#ifdef CONFIG_IWLEGACY_DEBUG
4011 if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
4012 il_print_rx_config_cmd(il, &il->ctx);
4013#endif
4014
4015 wake_up(&il->wait_command_queue);
4016
4017 /* Keep the restart process from trying to send host
4018 * commands by clearing the INIT status bit */
4019 clear_bit(S_READY, &il->status);
4020
4021 if (!test_bit(S_EXIT_PENDING, &il->status)) {
4022 IL_DBG(IL_DL_FW_ERRORS,
4023 "Restarting adapter due to uCode error.\n");
4024
4025 if (il->cfg->mod_params->restart_fw)
4026 queue_work(il->workqueue, &il->restart);
4027 }
4028}
4029EXPORT_SYMBOL(il_irq_handle_error);
4030
4031static int
4032il_apm_stop_master(struct il_priv *il)
4033{
4034 int ret = 0;
4035
4036 /* stop device's busmaster DMA activity */
4037 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
4038
4039 ret =
4040 _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
4041 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
4042 if (ret)
4043 IL_WARN("Master Disable Timed Out, 100 usec\n");
4044
4045 D_INFO("stop master\n");
4046
4047 return ret;
4048}
4049
4050void
4051il_apm_stop(struct il_priv *il)
4052{
4053 D_INFO("Stop card, put in low power state\n");
4054
4055 /* Stop device's DMA activity */
4056 il_apm_stop_master(il);
4057
4058 /* Reset the entire device */
4059 il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
4060
4061 udelay(10);
4062
4063 /*
4064 * Clear "initialization complete" bit to move adapter from
4065 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
4066 */
4067 il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4068}
4069EXPORT_SYMBOL(il_apm_stop);
4070
4071/*
4072 * Start up NIC's basic functionality after it has been reset
4073 * (e.g. after platform boot, or shutdown via il_apm_stop())
4074 * NOTE: This does not load uCode nor start the embedded processor
4075 */
4076int
4077il_apm_init(struct il_priv *il)
4078{
4079 int ret = 0;
4080 u16 lctl;
4081
4082 D_INFO("Init card's basic functions\n");
4083
4084 /*
4085 * Use "set_bit" below rather than "write", to preserve any hardware
4086 * bits already set by default after reset.
4087 */
4088
4089 /* Disable L0S exit timer (platform NMI Work/Around) */
4090 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4091 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
4092
4093 /*
4094 * Disable L0s without affecting L1;
4095 * don't wait for ICH L0s (ICH bug W/A)
4096 */
4097 il_set_bit(il, CSR_GIO_CHICKEN_BITS,
4098 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
4099
4100 /* Set FH wait threshold to maximum (HW error during stress W/A) */
4101 il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
4102
4103 /*
4104 * Enable HAP INTA (interrupt from management bus) to
4105 * wake device's PCI Express link L1a -> L0s
4106 * NOTE: This is no-op for 3945 (non-existent bit)
4107 */
4108 il_set_bit(il, CSR_HW_IF_CONFIG_REG,
4109 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
4110
4111 /*
4112 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
4113 * Check if BIOS (or OS) enabled L1-ASPM on this device.
4114 * If so (likely), disable L0S, so device moves directly L0->L1;
4115 * costs negligible amount of power savings.
4116 * If not (unlikely), enable L0S, so there is at least some
4117 * power savings, even without L1.
4118 */
4119 if (il->cfg->base_params->set_l0s) {
4120 lctl = il_pcie_link_ctl(il);
4121 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
4122 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
4123 /* L1-ASPM enabled; disable(!) L0S */
4124 il_set_bit(il, CSR_GIO_REG,
4125 CSR_GIO_REG_VAL_L0S_ENABLED);
4126 D_POWER("L1 Enabled; Disabling L0S\n");
4127 } else {
4128 /* L1-ASPM disabled; enable(!) L0S */
4129 il_clear_bit(il, CSR_GIO_REG,
4130 CSR_GIO_REG_VAL_L0S_ENABLED);
4131 D_POWER("L1 Disabled; Enabling L0S\n");
4132 }
4133 }
4134
4135 /* Configure analog phase-lock-loop before activating to D0A */
4136 if (il->cfg->base_params->pll_cfg_val)
4137 il_set_bit(il, CSR_ANA_PLL_CFG,
4138 il->cfg->base_params->pll_cfg_val);
4139
4140 /*
4141 * Set "initialization complete" bit to move adapter from
4142 * D0U* --> D0A* (powered-up active) state.
4143 */
4144 il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
4145
4146 /*
4147 * Wait for clock stabilization; once stabilized, access to
4148 * device-internal resources is supported, e.g. il_wr_prph()
4149 * and accesses to uCode SRAM.
4150 */
4151 ret =
4152 _il_poll_bit(il, CSR_GP_CNTRL,
4153 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
4154 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
4155 if (ret < 0) {
4156 D_INFO("Failed to init the card\n");
4157 goto out;
4158 }
4159
4160 /*
4161 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
4162 * BSM (Boostrap State Machine) is only in 3945 and 4965.
4163 *
4164 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
4165 * do not disable clocks. This preserves any hardware bits already
4166 * set by default in "CLK_CTRL_REG" after reset.
4167 */
4168 if (il->cfg->base_params->use_bsm)
4169 il_wr_prph(il, APMG_CLK_EN_REG,
4170 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
4171 else
4172 il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
4173 udelay(20);
4174
4175 /* Disable L1-Active */
4176 il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
4177 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
4178
4179out:
4180 return ret;
4181}
4182EXPORT_SYMBOL(il_apm_init);
4183
4184int
4185il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
4186{
4187 int ret;
4188 s8 prev_tx_power;
4189 bool defer;
4190 struct il_rxon_context *ctx = &il->ctx;
4191
4192 lockdep_assert_held(&il->mutex);
4193
4194 if (il->tx_power_user_lmt == tx_power && !force)
4195 return 0;
4196
4197 if (!il->cfg->ops->lib->send_tx_power)
4198 return -EOPNOTSUPP;
4199
4200 /* 0 dBm mean 1 milliwatt */
4201 if (tx_power < 0) {
4202 IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
4203 return -EINVAL;
4204 }
4205
4206 if (tx_power > il->tx_power_device_lmt) {
4207 IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
4208 tx_power, il->tx_power_device_lmt);
4209 return -EINVAL;
4210 }
4211
4212 if (!il_is_ready_rf(il))
4213 return -EIO;
4214
4215 /* scan complete and commit_rxon use tx_power_next value,
4216 * it always need to be updated for newest request */
4217 il->tx_power_next = tx_power;
4218
4219 /* do not set tx power when scanning or channel changing */
4220 defer = test_bit(S_SCANNING, &il->status) ||
4221 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
4222 if (defer && !force) {
4223 D_INFO("Deferring tx power set\n");
4224 return 0;
4225 }
4226
4227 prev_tx_power = il->tx_power_user_lmt;
4228 il->tx_power_user_lmt = tx_power;
4229
4230 ret = il->cfg->ops->lib->send_tx_power(il);
4231
4232 /* if fail to set tx_power, restore the orig. tx power */
4233 if (ret) {
4234 il->tx_power_user_lmt = prev_tx_power;
4235 il->tx_power_next = prev_tx_power;
4236 }
4237 return ret;
4238}
4239EXPORT_SYMBOL(il_set_tx_power);
4240
4241void
4242il_send_bt_config(struct il_priv *il)
4243{
4244 struct il_bt_cmd bt_cmd = {
4245 .lead_time = BT_LEAD_TIME_DEF,
4246 .max_kill = BT_MAX_KILL_DEF,
4247 .kill_ack_mask = 0,
4248 .kill_cts_mask = 0,
4249 };
4250
4251 if (!bt_coex_active)
4252 bt_cmd.flags = BT_COEX_DISABLE;
4253 else
4254 bt_cmd.flags = BT_COEX_ENABLE;
4255
4256 D_INFO("BT coex %s\n",
4257 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
4258
4259 if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
4260 IL_ERR("failed to send BT Coex Config\n");
4261}
4262EXPORT_SYMBOL(il_send_bt_config);
4263
4264int
4265il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
4266{
4267 struct il_stats_cmd stats_cmd = {
4268 .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
4269 };
4270
4271 if (flags & CMD_ASYNC)
4272 return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
4273 &stats_cmd, NULL);
4274 else
4275 return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
4276 &stats_cmd);
4277}
4278EXPORT_SYMBOL(il_send_stats_request);
4279
4280void
4281il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
4282{
4283#ifdef CONFIG_IWLEGACY_DEBUG
4284 struct il_rx_pkt *pkt = rxb_addr(rxb);
4285 struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
4286 D_RX("sleep mode: %d, src: %d\n",
4287 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
4288#endif
4289}
4290EXPORT_SYMBOL(il_hdl_pm_sleep);
4291
4292void
4293il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
4294{
4295 struct il_rx_pkt *pkt = rxb_addr(rxb);
4296 u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
4297 D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
4298 il_get_cmd_string(pkt->hdr.cmd));
4299 il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
4300}
4301EXPORT_SYMBOL(il_hdl_pm_debug_stats);
4302
4303void
4304il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
4305{
4306 struct il_rx_pkt *pkt = rxb_addr(rxb);
4307
4308 IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
4309 "seq 0x%04X ser 0x%08X\n",
4310 le32_to_cpu(pkt->u.err_resp.error_type),
4311 il_get_cmd_string(pkt->u.err_resp.cmd_id),
4312 pkt->u.err_resp.cmd_id,
4313 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
4314 le32_to_cpu(pkt->u.err_resp.error_info));
4315}
4316EXPORT_SYMBOL(il_hdl_error);
4317
4318void
4319il_clear_isr_stats(struct il_priv *il)
4320{
4321 memset(&il->isr_stats, 0, sizeof(il->isr_stats));
4322}
4323
4324int
4325il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
4326 const struct ieee80211_tx_queue_params *params)
4327{
4328 struct il_priv *il = hw->priv;
4329 unsigned long flags;
4330 int q;
4331
4332 D_MAC80211("enter\n");
4333
4334 if (!il_is_ready_rf(il)) {
4335 D_MAC80211("leave - RF not ready\n");
4336 return -EIO;
4337 }
4338
4339 if (queue >= AC_NUM) {
4340 D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
4341 return 0;
4342 }
4343
4344 q = AC_NUM - 1 - queue;
4345
4346 spin_lock_irqsave(&il->lock, flags);
4347
4348 il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
4349 cpu_to_le16(params->cw_min);
4350 il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
4351 cpu_to_le16(params->cw_max);
4352 il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
4353 il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
4354 cpu_to_le16((params->txop * 32));
4355
4356 il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
4357
4358 spin_unlock_irqrestore(&il->lock, flags);
4359
4360 D_MAC80211("leave\n");
4361 return 0;
4362}
4363EXPORT_SYMBOL(il_mac_conf_tx);
4364
4365int
4366il_mac_tx_last_beacon(struct ieee80211_hw *hw)
4367{
4368 struct il_priv *il = hw->priv;
4369
4370 return il->ibss_manager == IL_IBSS_MANAGER;
4371}
4372EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
4373
4374static int
4375il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
4376{
4377 il_connection_init_rx_config(il, ctx);
4378
4379 if (il->cfg->ops->hcmd->set_rxon_chain)
4380 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
4381
4382 return il_commit_rxon(il, ctx);
4383}
4384
4385static int
4386il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
4387{
4388 struct ieee80211_vif *vif = ctx->vif;
4389 int err;
4390
4391 lockdep_assert_held(&il->mutex);
4392
4393 /*
4394 * This variable will be correct only when there's just
4395 * a single context, but all code using it is for hardware
4396 * that supports only one context.
4397 */
4398 il->iw_mode = vif->type;
4399
4400 ctx->is_active = true;
4401
4402 err = il_set_mode(il, ctx);
4403 if (err) {
4404 if (!ctx->always_active)
4405 ctx->is_active = false;
4406 return err;
4407 }
4408
4409 return 0;
4410}
4411
4412int
4413il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4414{
4415 struct il_priv *il = hw->priv;
4416 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
4417 int err;
4418 u32 modes;
4419
4420 D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
4421
4422 mutex_lock(&il->mutex);
4423
4424 if (!il_is_ready_rf(il)) {
4425 IL_WARN("Try to add interface when device not ready\n");
4426 err = -EINVAL;
4427 goto out;
4428 }
4429
4430 /* check if busy context is exclusive */
4431 if (il->ctx.vif &&
4432 (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
4433 err = -EINVAL;
4434 goto out;
4435 }
4436
4437 modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
4438 if (!(modes & BIT(vif->type))) {
4439 err = -EOPNOTSUPP;
4440 goto out;
4441 }
4442
4443 vif_priv->ctx = &il->ctx;
4444 il->ctx.vif = vif;
4445
4446 err = il_setup_interface(il, &il->ctx);
4447 if (err) {
4448 il->ctx.vif = NULL;
4449 il->iw_mode = NL80211_IFTYPE_STATION;
4450 }
4451
4452out:
4453 mutex_unlock(&il->mutex);
4454
4455 D_MAC80211("leave\n");
4456 return err;
4457}
4458EXPORT_SYMBOL(il_mac_add_interface);
4459
4460static void
4461il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
4462 bool mode_change)
4463{
4464 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4465
4466 lockdep_assert_held(&il->mutex);
4467
4468 if (il->scan_vif == vif) {
4469 il_scan_cancel_timeout(il, 200);
4470 il_force_scan_end(il);
4471 }
4472
4473 if (!mode_change) {
4474 il_set_mode(il, ctx);
4475 if (!ctx->always_active)
4476 ctx->is_active = false;
4477 }
4478}
4479
4480void
4481il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
4482{
4483 struct il_priv *il = hw->priv;
4484 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4485
4486 D_MAC80211("enter\n");
4487
4488 mutex_lock(&il->mutex);
4489
4490 WARN_ON(ctx->vif != vif);
4491 ctx->vif = NULL;
4492
4493 il_teardown_interface(il, vif, false);
4494
4495 memset(il->bssid, 0, ETH_ALEN);
4496 mutex_unlock(&il->mutex);
4497
4498 D_MAC80211("leave\n");
4499
4500}
4501EXPORT_SYMBOL(il_mac_remove_interface);
4502
4503int
4504il_alloc_txq_mem(struct il_priv *il)
4505{
4506 if (!il->txq)
4507 il->txq =
4508 kzalloc(sizeof(struct il_tx_queue) *
4509 il->cfg->base_params->num_of_queues, GFP_KERNEL);
4510 if (!il->txq) {
4511 IL_ERR("Not enough memory for txq\n");
4512 return -ENOMEM;
4513 }
4514 return 0;
4515}
4516EXPORT_SYMBOL(il_alloc_txq_mem);
4517
4518void
4519il_txq_mem(struct il_priv *il)
4520{
4521 kfree(il->txq);
4522 il->txq = NULL;
4523}
4524EXPORT_SYMBOL(il_txq_mem);
4525
4526#ifdef CONFIG_IWLEGACY_DEBUGFS
4527
4528#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
4529
4530void
4531il_reset_traffic_log(struct il_priv *il)
4532{
4533 il->tx_traffic_idx = 0;
4534 il->rx_traffic_idx = 0;
4535 if (il->tx_traffic)
4536 memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4537 if (il->rx_traffic)
4538 memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
4539}
4540
4541int
4542il_alloc_traffic_mem(struct il_priv *il)
4543{
4544 u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
4545
4546 if (il_debug_level & IL_DL_TX) {
4547 if (!il->tx_traffic) {
4548 il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4549 if (!il->tx_traffic)
4550 return -ENOMEM;
4551 }
4552 }
4553 if (il_debug_level & IL_DL_RX) {
4554 if (!il->rx_traffic) {
4555 il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
4556 if (!il->rx_traffic)
4557 return -ENOMEM;
4558 }
4559 }
4560 il_reset_traffic_log(il);
4561 return 0;
4562}
4563EXPORT_SYMBOL(il_alloc_traffic_mem);
4564
4565void
4566il_free_traffic_mem(struct il_priv *il)
4567{
4568 kfree(il->tx_traffic);
4569 il->tx_traffic = NULL;
4570
4571 kfree(il->rx_traffic);
4572 il->rx_traffic = NULL;
4573}
4574EXPORT_SYMBOL(il_free_traffic_mem);
4575
4576void
4577il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
4578 struct ieee80211_hdr *header)
4579{
4580 __le16 fc;
4581 u16 len;
4582
4583 if (likely(!(il_debug_level & IL_DL_TX)))
4584 return;
4585
4586 if (!il->tx_traffic)
4587 return;
4588
4589 fc = header->frame_control;
4590 if (ieee80211_is_data(fc)) {
4591 len =
4592 (length >
4593 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4594 memcpy((il->tx_traffic +
4595 (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4596 len);
4597 il->tx_traffic_idx =
4598 (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4599 }
4600}
4601EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
4602
4603void
4604il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
4605 struct ieee80211_hdr *header)
4606{
4607 __le16 fc;
4608 u16 len;
4609
4610 if (likely(!(il_debug_level & IL_DL_RX)))
4611 return;
4612
4613 if (!il->rx_traffic)
4614 return;
4615
4616 fc = header->frame_control;
4617 if (ieee80211_is_data(fc)) {
4618 len =
4619 (length >
4620 IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
4621 memcpy((il->rx_traffic +
4622 (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
4623 len);
4624 il->rx_traffic_idx =
4625 (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
4626 }
4627}
4628EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
4629
4630const char *
4631il_get_mgmt_string(int cmd)
4632{
4633 switch (cmd) {
4634 IL_CMD(MANAGEMENT_ASSOC_REQ);
4635 IL_CMD(MANAGEMENT_ASSOC_RESP);
4636 IL_CMD(MANAGEMENT_REASSOC_REQ);
4637 IL_CMD(MANAGEMENT_REASSOC_RESP);
4638 IL_CMD(MANAGEMENT_PROBE_REQ);
4639 IL_CMD(MANAGEMENT_PROBE_RESP);
4640 IL_CMD(MANAGEMENT_BEACON);
4641 IL_CMD(MANAGEMENT_ATIM);
4642 IL_CMD(MANAGEMENT_DISASSOC);
4643 IL_CMD(MANAGEMENT_AUTH);
4644 IL_CMD(MANAGEMENT_DEAUTH);
4645 IL_CMD(MANAGEMENT_ACTION);
4646 default:
4647 return "UNKNOWN";
4648
4649 }
4650}
4651
4652const char *
4653il_get_ctrl_string(int cmd)
4654{
4655 switch (cmd) {
4656 IL_CMD(CONTROL_BACK_REQ);
4657 IL_CMD(CONTROL_BACK);
4658 IL_CMD(CONTROL_PSPOLL);
4659 IL_CMD(CONTROL_RTS);
4660 IL_CMD(CONTROL_CTS);
4661 IL_CMD(CONTROL_ACK);
4662 IL_CMD(CONTROL_CFEND);
4663 IL_CMD(CONTROL_CFENDACK);
4664 default:
4665 return "UNKNOWN";
4666
4667 }
4668}
4669
4670void
4671il_clear_traffic_stats(struct il_priv *il)
4672{
4673 memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
4674 memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
4675}
4676
4677/*
4678 * if CONFIG_IWLEGACY_DEBUGFS defined,
4679 * il_update_stats function will
4680 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
4681 * Use debugFs to display the rx/rx_stats
4682 * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
4683 * information will be recorded, but DATA pkt still will be recorded
4684 * for the reason of il_led.c need to control the led blinking based on
4685 * number of tx and rx data.
4686 *
4687 */
4688void
4689il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
4690{
4691 struct traffic_stats *stats;
4692
4693 if (is_tx)
4694 stats = &il->tx_stats;
4695 else
4696 stats = &il->rx_stats;
4697
4698 if (ieee80211_is_mgmt(fc)) {
4699 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4700 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
4701 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
4702 break;
4703 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4704 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
4705 break;
4706 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
4707 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
4708 break;
4709 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4710 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
4711 break;
4712 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4713 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
4714 break;
4715 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4716 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
4717 break;
4718 case cpu_to_le16(IEEE80211_STYPE_BEACON):
4719 stats->mgmt[MANAGEMENT_BEACON]++;
4720 break;
4721 case cpu_to_le16(IEEE80211_STYPE_ATIM):
4722 stats->mgmt[MANAGEMENT_ATIM]++;
4723 break;
4724 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4725 stats->mgmt[MANAGEMENT_DISASSOC]++;
4726 break;
4727 case cpu_to_le16(IEEE80211_STYPE_AUTH):
4728 stats->mgmt[MANAGEMENT_AUTH]++;
4729 break;
4730 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4731 stats->mgmt[MANAGEMENT_DEAUTH]++;
4732 break;
4733 case cpu_to_le16(IEEE80211_STYPE_ACTION):
4734 stats->mgmt[MANAGEMENT_ACTION]++;
4735 break;
4736 }
4737 } else if (ieee80211_is_ctl(fc)) {
4738 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
4739 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
4740 stats->ctrl[CONTROL_BACK_REQ]++;
4741 break;
4742 case cpu_to_le16(IEEE80211_STYPE_BACK):
4743 stats->ctrl[CONTROL_BACK]++;
4744 break;
4745 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
4746 stats->ctrl[CONTROL_PSPOLL]++;
4747 break;
4748 case cpu_to_le16(IEEE80211_STYPE_RTS):
4749 stats->ctrl[CONTROL_RTS]++;
4750 break;
4751 case cpu_to_le16(IEEE80211_STYPE_CTS):
4752 stats->ctrl[CONTROL_CTS]++;
4753 break;
4754 case cpu_to_le16(IEEE80211_STYPE_ACK):
4755 stats->ctrl[CONTROL_ACK]++;
4756 break;
4757 case cpu_to_le16(IEEE80211_STYPE_CFEND):
4758 stats->ctrl[CONTROL_CFEND]++;
4759 break;
4760 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
4761 stats->ctrl[CONTROL_CFENDACK]++;
4762 break;
4763 }
4764 } else {
4765 /* data */
4766 stats->data_cnt++;
4767 stats->data_bytes += len;
4768 }
4769}
4770EXPORT_SYMBOL(il_update_stats);
4771#endif
4772
4773int
4774il_force_reset(struct il_priv *il, bool external)
4775{
4776 struct il_force_reset *force_reset;
4777
4778 if (test_bit(S_EXIT_PENDING, &il->status))
4779 return -EINVAL;
4780
4781 force_reset = &il->force_reset;
4782 force_reset->reset_request_count++;
4783 if (!external) {
4784 if (force_reset->last_force_reset_jiffies &&
4785 time_after(force_reset->last_force_reset_jiffies +
4786 force_reset->reset_duration, jiffies)) {
4787 D_INFO("force reset rejected\n");
4788 force_reset->reset_reject_count++;
4789 return -EAGAIN;
4790 }
4791 }
4792 force_reset->reset_success_count++;
4793 force_reset->last_force_reset_jiffies = jiffies;
4794
4795 /*
4796 * if the request is from external(ex: debugfs),
4797 * then always perform the request in regardless the module
4798 * parameter setting
4799 * if the request is from internal (uCode error or driver
4800 * detect failure), then fw_restart module parameter
4801 * need to be check before performing firmware reload
4802 */
4803
4804 if (!external && !il->cfg->mod_params->restart_fw) {
4805 D_INFO("Cancel firmware reload based on "
4806 "module parameter setting\n");
4807 return 0;
4808 }
4809
4810 IL_ERR("On demand firmware reload\n");
4811
4812 /* Set the FW error flag -- cleared on il_down */
4813 set_bit(S_FW_ERROR, &il->status);
4814 wake_up(&il->wait_command_queue);
4815 /*
4816 * Keep the restart process from trying to send host
4817 * commands by clearing the INIT status bit
4818 */
4819 clear_bit(S_READY, &il->status);
4820 queue_work(il->workqueue, &il->restart);
4821
4822 return 0;
4823}
4824
4825int
4826il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
4827 enum nl80211_iftype newtype, bool newp2p)
4828{
4829 struct il_priv *il = hw->priv;
4830 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
4831 u32 modes;
4832 int err;
4833
4834 newtype = ieee80211_iftype_p2p(newtype, newp2p);
4835
4836 mutex_lock(&il->mutex);
4837
4838 if (!ctx->vif || !il_is_ready_rf(il)) {
4839 /*
4840 * Huh? But wait ... this can maybe happen when
4841 * we're in the middle of a firmware restart!
4842 */
4843 err = -EBUSY;
4844 goto out;
4845 }
4846
4847 modes = ctx->interface_modes | ctx->exclusive_interface_modes;
4848 if (!(modes & BIT(newtype))) {
4849 err = -EOPNOTSUPP;
4850 goto out;
4851 }
4852
4853 if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
4854 (il->ctx.exclusive_interface_modes & BIT(newtype))) {
4855 err = -EINVAL;
4856 goto out;
4857 }
4858
4859 /* success */
4860 il_teardown_interface(il, vif, true);
4861 vif->type = newtype;
4862 vif->p2p = newp2p;
4863 err = il_setup_interface(il, ctx);
4864 WARN_ON(err);
4865 /*
4866 * We've switched internally, but submitting to the
4867 * device may have failed for some reason. Mask this
4868 * error, because otherwise mac80211 will not switch
4869 * (and set the interface type back) and we'll be
4870 * out of sync with it.
4871 */
4872 err = 0;
4873
4874out:
4875 mutex_unlock(&il->mutex);
4876 return err;
4877}
4878EXPORT_SYMBOL(il_mac_change_interface);
4879
4880/*
4881 * On every watchdog tick we check (latest) time stamp. If it does not
4882 * change during timeout period and queue is not empty we reset firmware.
4883 */
4884static int
4885il_check_stuck_queue(struct il_priv *il, int cnt)
4886{
4887 struct il_tx_queue *txq = &il->txq[cnt];
4888 struct il_queue *q = &txq->q;
4889 unsigned long timeout;
4890 int ret;
4891
4892 if (q->read_ptr == q->write_ptr) {
4893 txq->time_stamp = jiffies;
4894 return 0;
4895 }
4896
4897 timeout =
4898 txq->time_stamp +
4899 msecs_to_jiffies(il->cfg->base_params->wd_timeout);
4900
4901 if (time_after(jiffies, timeout)) {
4902 IL_ERR("Queue %d stuck for %u ms.\n", q->id,
4903 il->cfg->base_params->wd_timeout);
4904 ret = il_force_reset(il, false);
4905 return (ret == -EAGAIN) ? 0 : 1;
4906 }
4907
4908 return 0;
4909}
4910
4911/*
4912 * Making watchdog tick be a quarter of timeout assure we will
4913 * discover the queue hung between timeout and 1.25*timeout
4914 */
4915#define IL_WD_TICK(timeout) ((timeout) / 4)
4916
4917/*
4918 * Watchdog timer callback, we check each tx queue for stuck, if if hung
4919 * we reset the firmware. If everything is fine just rearm the timer.
4920 */
4921void
4922il_bg_watchdog(unsigned long data)
4923{
4924 struct il_priv *il = (struct il_priv *)data;
4925 int cnt;
4926 unsigned long timeout;
4927
4928 if (test_bit(S_EXIT_PENDING, &il->status))
4929 return;
4930
4931 timeout = il->cfg->base_params->wd_timeout;
4932 if (timeout == 0)
4933 return;
4934
4935 /* monitor and check for stuck cmd queue */
4936 if (il_check_stuck_queue(il, il->cmd_queue))
4937 return;
4938
4939 /* monitor and check for other stuck queues */
4940 if (il_is_any_associated(il)) {
4941 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
4942 /* skip as we already checked the command queue */
4943 if (cnt == il->cmd_queue)
4944 continue;
4945 if (il_check_stuck_queue(il, cnt))
4946 return;
4947 }
4948 }
4949
4950 mod_timer(&il->watchdog,
4951 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4952}
4953EXPORT_SYMBOL(il_bg_watchdog);
4954
4955void
4956il_setup_watchdog(struct il_priv *il)
4957{
4958 unsigned int timeout = il->cfg->base_params->wd_timeout;
4959
4960 if (timeout)
4961 mod_timer(&il->watchdog,
4962 jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
4963 else
4964 del_timer(&il->watchdog);
4965}
4966EXPORT_SYMBOL(il_setup_watchdog);
4967
4968/*
4969 * extended beacon time format
4970 * time in usec will be changed into a 32-bit value in extended:internal format
4971 * the extended part is the beacon counts
4972 * the internal part is the time in usec within one beacon interval
4973 */
4974u32
4975il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
4976{
4977 u32 quot;
4978 u32 rem;
4979 u32 interval = beacon_interval * TIME_UNIT;
4980
4981 if (!interval || !usec)
4982 return 0;
4983
4984 quot =
4985 (usec /
4986 interval) & (il_beacon_time_mask_high(il,
4987 il->hw_params.
4988 beacon_time_tsf_bits) >> il->
4989 hw_params.beacon_time_tsf_bits);
4990 rem =
4991 (usec % interval) & il_beacon_time_mask_low(il,
4992 il->hw_params.
4993 beacon_time_tsf_bits);
4994
4995 return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
4996}
4997EXPORT_SYMBOL(il_usecs_to_beacons);
4998
4999/* base is usually what we get from ucode with each received frame,
5000 * the same as HW timer counter counting down
5001 */
5002__le32
5003il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
5004 u32 beacon_interval)
5005{
5006 u32 base_low = base & il_beacon_time_mask_low(il,
5007 il->hw_params.
5008 beacon_time_tsf_bits);
5009 u32 addon_low = addon & il_beacon_time_mask_low(il,
5010 il->hw_params.
5011 beacon_time_tsf_bits);
5012 u32 interval = beacon_interval * TIME_UNIT;
5013 u32 res = (base & il_beacon_time_mask_high(il,
5014 il->hw_params.
5015 beacon_time_tsf_bits)) +
5016 (addon & il_beacon_time_mask_high(il,
5017 il->hw_params.
5018 beacon_time_tsf_bits));
5019
5020 if (base_low > addon_low)
5021 res += base_low - addon_low;
5022 else if (base_low < addon_low) {
5023 res += interval + base_low - addon_low;
5024 res += (1 << il->hw_params.beacon_time_tsf_bits);
5025 } else
5026 res += (1 << il->hw_params.beacon_time_tsf_bits);
5027
5028 return cpu_to_le32(res);
5029}
5030EXPORT_SYMBOL(il_add_beacon_time);
5031
5032#ifdef CONFIG_PM
5033
5034int
5035il_pci_suspend(struct device *device)
5036{
5037 struct pci_dev *pdev = to_pci_dev(device);
5038 struct il_priv *il = pci_get_drvdata(pdev);
5039
5040 /*
5041 * This function is called when system goes into suspend state
5042 * mac80211 will call il_mac_stop() from the mac80211 suspend function
5043 * first but since il_mac_stop() has no knowledge of who the caller is,
5044 * it will not call apm_ops.stop() to stop the DMA operation.
5045 * Calling apm_ops.stop here to make sure we stop the DMA.
5046 */
5047 il_apm_stop(il);
5048
5049 return 0;
5050}
5051EXPORT_SYMBOL(il_pci_suspend);
5052
5053int
5054il_pci_resume(struct device *device)
5055{
5056 struct pci_dev *pdev = to_pci_dev(device);
5057 struct il_priv *il = pci_get_drvdata(pdev);
5058 bool hw_rfkill = false;
5059
5060 /*
5061 * We disable the RETRY_TIMEOUT register (0x41) to keep
5062 * PCI Tx retries from interfering with C3 CPU state.
5063 */
5064 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
5065
5066 il_enable_interrupts(il);
5067
5068 if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5069 hw_rfkill = true;
5070
5071 if (hw_rfkill)
5072 set_bit(S_RF_KILL_HW, &il->status);
5073 else
5074 clear_bit(S_RF_KILL_HW, &il->status);
5075
5076 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
5077
5078 return 0;
5079}
5080EXPORT_SYMBOL(il_pci_resume);
5081
5082const struct dev_pm_ops il_pm_ops = {
5083 .suspend = il_pci_suspend,
5084 .resume = il_pci_resume,
5085 .freeze = il_pci_suspend,
5086 .thaw = il_pci_resume,
5087 .poweroff = il_pci_suspend,
5088 .restore = il_pci_resume,
5089};
5090EXPORT_SYMBOL(il_pm_ops);
5091
5092#endif /* CONFIG_PM */
5093
5094static void
5095il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
5096{
5097 if (test_bit(S_EXIT_PENDING, &il->status))
5098 return;
5099
5100 if (!ctx->is_active)
5101 return;
5102
5103 ctx->qos_data.def_qos_parm.qos_flags = 0;
5104
5105 if (ctx->qos_data.qos_active)
5106 ctx->qos_data.def_qos_parm.qos_flags |=
5107 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
5108
5109 if (ctx->ht.enabled)
5110 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
5111
5112 D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
5113 ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
5114
5115 il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
5116 &ctx->qos_data.def_qos_parm, NULL);
5117}
5118
5119/**
5120 * il_mac_config - mac80211 config callback
5121 */
5122int
5123il_mac_config(struct ieee80211_hw *hw, u32 changed)
5124{
5125 struct il_priv *il = hw->priv;
5126 const struct il_channel_info *ch_info;
5127 struct ieee80211_conf *conf = &hw->conf;
5128 struct ieee80211_channel *channel = conf->channel;
5129 struct il_ht_config *ht_conf = &il->current_ht_config;
5130 struct il_rxon_context *ctx = &il->ctx;
5131 unsigned long flags = 0;
5132 int ret = 0;
5133 u16 ch;
5134 int scan_active = 0;
5135 bool ht_changed = false;
5136
5137 if (WARN_ON(!il->cfg->ops->legacy))
5138 return -EOPNOTSUPP;
5139
5140 mutex_lock(&il->mutex);
5141
5142 D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
5143 changed);
5144
5145 if (unlikely(test_bit(S_SCANNING, &il->status))) {
5146 scan_active = 1;
5147 D_MAC80211("scan active\n");
5148 }
5149
5150 if (changed &
5151 (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
5152 /* mac80211 uses static for non-HT which is what we want */
5153 il->current_ht_config.smps = conf->smps_mode;
5154
5155 /*
5156 * Recalculate chain counts.
5157 *
5158 * If monitor mode is enabled then mac80211 will
5159 * set up the SM PS mode to OFF if an HT channel is
5160 * configured.
5161 */
5162 if (il->cfg->ops->hcmd->set_rxon_chain)
5163 il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
5164 }
5165
5166 /* during scanning mac80211 will delay channel setting until
5167 * scan finish with changed = 0
5168 */
5169 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
5170
5171 if (scan_active)
5172 goto set_ch_out;
5173
5174 ch = channel->hw_value;
5175 ch_info = il_get_channel_info(il, channel->band, ch);
5176 if (!il_is_channel_valid(ch_info)) {
5177 D_MAC80211("leave - invalid channel\n");
5178 ret = -EINVAL;
5179 goto set_ch_out;
5180 }
5181
5182 if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
5183 !il_is_channel_ibss(ch_info)) {
5184 D_MAC80211("leave - not IBSS channel\n");
5185 ret = -EINVAL;
5186 goto set_ch_out;
5187 }
5188
5189 spin_lock_irqsave(&il->lock, flags);
5190
5191 /* Configure HT40 channels */
5192 if (ctx->ht.enabled != conf_is_ht(conf)) {
5193 ctx->ht.enabled = conf_is_ht(conf);
5194 ht_changed = true;
5195 }
5196 if (ctx->ht.enabled) {
5197 if (conf_is_ht40_minus(conf)) {
5198 ctx->ht.extension_chan_offset =
5199 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
5200 ctx->ht.is_40mhz = true;
5201 } else if (conf_is_ht40_plus(conf)) {
5202 ctx->ht.extension_chan_offset =
5203 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
5204 ctx->ht.is_40mhz = true;
5205 } else {
5206 ctx->ht.extension_chan_offset =
5207 IEEE80211_HT_PARAM_CHA_SEC_NONE;
5208 ctx->ht.is_40mhz = false;
5209 }
5210 } else
5211 ctx->ht.is_40mhz = false;
5212
5213 /*
5214 * Default to no protection. Protection mode will
5215 * later be set from BSS config in il_ht_conf
5216 */
5217 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
5218
5219 /* if we are switching from ht to 2.4 clear flags
5220 * from any ht related info since 2.4 does not
5221 * support ht */
5222 if ((le16_to_cpu(ctx->staging.channel) != ch))
5223 ctx->staging.flags = 0;
5224
5225 il_set_rxon_channel(il, channel, ctx);
5226 il_set_rxon_ht(il, ht_conf);
5227
5228 il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
5229
5230 spin_unlock_irqrestore(&il->lock, flags);
5231
5232 if (il->cfg->ops->legacy->update_bcast_stations)
5233 ret = il->cfg->ops->legacy->update_bcast_stations(il);
5234
5235set_ch_out:
5236 /* The list of supported rates and rate mask can be different
5237 * for each band; since the band may have changed, reset
5238 * the rate mask to what mac80211 lists */
5239 il_set_rate(il);
5240 }
5241
5242 if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
5243 ret = il_power_update_mode(il, false);
5244 if (ret)
5245 D_MAC80211("Error setting sleep level\n");
5246 }
5247
5248 if (changed & IEEE80211_CONF_CHANGE_POWER) {
5249 D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
5250 conf->power_level);
5251
5252 il_set_tx_power(il, conf->power_level, false);
5253 }
5254
5255 if (!il_is_ready(il)) {
5256 D_MAC80211("leave - not ready\n");
5257 goto out;
5258 }
5259
5260 if (scan_active)
5261 goto out;
5262
5263 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
5264 il_commit_rxon(il, ctx);
5265 else
5266 D_INFO("Not re-sending same RXON configuration.\n");
5267 if (ht_changed)
5268 il_update_qos(il, ctx);
5269
5270out:
5271 D_MAC80211("leave\n");
5272 mutex_unlock(&il->mutex);
5273 return ret;
5274}
5275EXPORT_SYMBOL(il_mac_config);
5276
5277void
5278il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5279{
5280 struct il_priv *il = hw->priv;
5281 unsigned long flags;
5282 struct il_rxon_context *ctx = &il->ctx;
5283
5284 if (WARN_ON(!il->cfg->ops->legacy))
5285 return;
5286
5287 mutex_lock(&il->mutex);
5288 D_MAC80211("enter\n");
5289
5290 spin_lock_irqsave(&il->lock, flags);
5291 memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
5292 spin_unlock_irqrestore(&il->lock, flags);
5293
5294 spin_lock_irqsave(&il->lock, flags);
5295
5296 /* new association get rid of ibss beacon skb */
5297 if (il->beacon_skb)
5298 dev_kfree_skb(il->beacon_skb);
5299
5300 il->beacon_skb = NULL;
5301
5302 il->timestamp = 0;
5303
5304 spin_unlock_irqrestore(&il->lock, flags);
5305
5306 il_scan_cancel_timeout(il, 100);
5307 if (!il_is_ready_rf(il)) {
5308 D_MAC80211("leave - not ready\n");
5309 mutex_unlock(&il->mutex);
5310 return;
5311 }
5312
5313 /* we are restarting association process
5314 * clear RXON_FILTER_ASSOC_MSK bit
5315 */
5316 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5317 il_commit_rxon(il, ctx);
5318
5319 il_set_rate(il);
5320
5321 mutex_unlock(&il->mutex);
5322
5323 D_MAC80211("leave\n");
5324}
5325EXPORT_SYMBOL(il_mac_reset_tsf);
5326
5327static void
5328il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
5329{
5330 struct il_ht_config *ht_conf = &il->current_ht_config;
5331 struct ieee80211_sta *sta;
5332 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
5333 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5334
5335 D_ASSOC("enter:\n");
5336
5337 if (!ctx->ht.enabled)
5338 return;
5339
5340 ctx->ht.protection =
5341 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
5342 ctx->ht.non_gf_sta_present =
5343 !!(bss_conf->
5344 ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
5345
5346 ht_conf->single_chain_sufficient = false;
5347
5348 switch (vif->type) {
5349 case NL80211_IFTYPE_STATION:
5350 rcu_read_lock();
5351 sta = ieee80211_find_sta(vif, bss_conf->bssid);
5352 if (sta) {
5353 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
5354 int maxstreams;
5355
5356 maxstreams =
5357 (ht_cap->mcs.
5358 tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
5359 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
5360 maxstreams += 1;
5361
5362 if (ht_cap->mcs.rx_mask[1] == 0 &&
5363 ht_cap->mcs.rx_mask[2] == 0)
5364 ht_conf->single_chain_sufficient = true;
5365 if (maxstreams <= 1)
5366 ht_conf->single_chain_sufficient = true;
5367 } else {
5368 /*
5369 * If at all, this can only happen through a race
5370 * when the AP disconnects us while we're still
5371 * setting up the connection, in that case mac80211
5372 * will soon tell us about that.
5373 */
5374 ht_conf->single_chain_sufficient = true;
5375 }
5376 rcu_read_unlock();
5377 break;
5378 case NL80211_IFTYPE_ADHOC:
5379 ht_conf->single_chain_sufficient = true;
5380 break;
5381 default:
5382 break;
5383 }
5384
5385 D_ASSOC("leave\n");
5386}
5387
5388static inline void
5389il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
5390{
5391 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5392
5393 /*
5394 * inform the ucode that there is no longer an
5395 * association and that no more packets should be
5396 * sent
5397 */
5398 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5399 ctx->staging.assoc_id = 0;
5400 il_commit_rxon(il, ctx);
5401}
5402
5403static void
5404il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
5405{
5406 struct il_priv *il = hw->priv;
5407 unsigned long flags;
5408 __le64 timestamp;
5409 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
5410
5411 if (!skb)
5412 return;
5413
5414 D_MAC80211("enter\n");
5415
5416 lockdep_assert_held(&il->mutex);
5417
5418 if (!il->beacon_ctx) {
5419 IL_ERR("update beacon but no beacon context!\n");
5420 dev_kfree_skb(skb);
5421 return;
5422 }
5423
5424 spin_lock_irqsave(&il->lock, flags);
5425
5426 if (il->beacon_skb)
5427 dev_kfree_skb(il->beacon_skb);
5428
5429 il->beacon_skb = skb;
5430
5431 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
5432 il->timestamp = le64_to_cpu(timestamp);
5433
5434 D_MAC80211("leave\n");
5435 spin_unlock_irqrestore(&il->lock, flags);
5436
5437 if (!il_is_ready_rf(il)) {
5438 D_MAC80211("leave - RF not ready\n");
5439 return;
5440 }
5441
5442 il->cfg->ops->legacy->post_associate(il);
5443}
5444
5445void
5446il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5447 struct ieee80211_bss_conf *bss_conf, u32 changes)
5448{
5449 struct il_priv *il = hw->priv;
5450 struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
5451 int ret;
5452
5453 if (WARN_ON(!il->cfg->ops->legacy))
5454 return;
5455
5456 D_MAC80211("changes = 0x%X\n", changes);
5457
5458 mutex_lock(&il->mutex);
5459
5460 if (!il_is_alive(il)) {
5461 mutex_unlock(&il->mutex);
5462 return;
5463 }
5464
5465 if (changes & BSS_CHANGED_QOS) {
5466 unsigned long flags;
5467
5468 spin_lock_irqsave(&il->lock, flags);
5469 ctx->qos_data.qos_active = bss_conf->qos;
5470 il_update_qos(il, ctx);
5471 spin_unlock_irqrestore(&il->lock, flags);
5472 }
5473
5474 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5475 /*
5476 * the add_interface code must make sure we only ever
5477 * have a single interface that could be beaconing at
5478 * any time.
5479 */
5480 if (vif->bss_conf.enable_beacon)
5481 il->beacon_ctx = ctx;
5482 else
5483 il->beacon_ctx = NULL;
5484 }
5485
5486 if (changes & BSS_CHANGED_BSSID) {
5487 D_MAC80211("BSSID %pM\n", bss_conf->bssid);
5488
5489 /*
5490 * If there is currently a HW scan going on in the
5491 * background then we need to cancel it else the RXON
5492 * below/in post_associate will fail.
5493 */
5494 if (il_scan_cancel_timeout(il, 100)) {
5495 IL_WARN("Aborted scan still in progress after 100ms\n");
5496 D_MAC80211("leaving - scan abort failed.\n");
5497 mutex_unlock(&il->mutex);
5498 return;
5499 }
5500
5501 /* mac80211 only sets assoc when in STATION mode */
5502 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
5503 memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
5504 ETH_ALEN);
5505
5506 /* currently needed in a few places */
5507 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5508 } else {
5509 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5510 }
5511
5512 }
5513
5514 /*
5515 * This needs to be after setting the BSSID in case
5516 * mac80211 decides to do both changes at once because
5517 * it will invoke post_associate.
5518 */
5519 if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
5520 il_beacon_update(hw, vif);
5521
5522 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
5523 D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
5524 if (bss_conf->use_short_preamble)
5525 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
5526 else
5527 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
5528 }
5529
5530 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
5531 D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
5532 if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
5533 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
5534 else
5535 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
5536 if (bss_conf->use_cts_prot)
5537 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
5538 else
5539 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
5540 }
5541
5542 if (changes & BSS_CHANGED_BASIC_RATES) {
5543 /* XXX use this information
5544 *
5545 * To do that, remove code from il_set_rate() and put something
5546 * like this here:
5547 *
5548 if (A-band)
5549 ctx->staging.ofdm_basic_rates =
5550 bss_conf->basic_rates;
5551 else
5552 ctx->staging.ofdm_basic_rates =
5553 bss_conf->basic_rates >> 4;
5554 ctx->staging.cck_basic_rates =
5555 bss_conf->basic_rates & 0xF;
5556 */
5557 }
5558
5559 if (changes & BSS_CHANGED_HT) {
5560 il_ht_conf(il, vif);
5561
5562 if (il->cfg->ops->hcmd->set_rxon_chain)
5563 il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
5564 }
5565
5566 if (changes & BSS_CHANGED_ASSOC) {
5567 D_MAC80211("ASSOC %d\n", bss_conf->assoc);
5568 if (bss_conf->assoc) {
5569 il->timestamp = bss_conf->timestamp;
5570
5571 if (!il_is_rfkill(il))
5572 il->cfg->ops->legacy->post_associate(il);
5573 } else
5574 il_set_no_assoc(il, vif);
5575 }
5576
5577 if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
5578 D_MAC80211("Changes (%#x) while associated\n", changes);
5579 ret = il_send_rxon_assoc(il, ctx);
5580 if (!ret) {
5581 /* Sync active_rxon with latest change. */
5582 memcpy((void *)&ctx->active, &ctx->staging,
5583 sizeof(struct il_rxon_cmd));
5584 }
5585 }
5586
5587 if (changes & BSS_CHANGED_BEACON_ENABLED) {
5588 if (vif->bss_conf.enable_beacon) {
5589 memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
5590 ETH_ALEN);
5591 memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
5592 il->cfg->ops->legacy->config_ap(il);
5593 } else
5594 il_set_no_assoc(il, vif);
5595 }
5596
5597 if (changes & BSS_CHANGED_IBSS) {
5598 ret =
5599 il->cfg->ops->legacy->manage_ibss_station(il, vif,
5600 bss_conf->
5601 ibss_joined);
5602 if (ret)
5603 IL_ERR("failed to %s IBSS station %pM\n",
5604 bss_conf->ibss_joined ? "add" : "remove",
5605 bss_conf->bssid);
5606 }
5607
5608 mutex_unlock(&il->mutex);
5609
5610 D_MAC80211("leave\n");
5611}
5612EXPORT_SYMBOL(il_mac_bss_info_changed);
5613
5614irqreturn_t
5615il_isr(int irq, void *data)
5616{
5617 struct il_priv *il = data;
5618 u32 inta, inta_mask;
5619 u32 inta_fh;
5620 unsigned long flags;
5621 if (!il)
5622 return IRQ_NONE;
5623
5624 spin_lock_irqsave(&il->lock, flags);
5625
5626 /* Disable (but don't clear!) interrupts here to avoid
5627 * back-to-back ISRs and sporadic interrupts from our NIC.
5628 * If we have something to service, the tasklet will re-enable ints.
5629 * If we *don't* have something, we'll re-enable before leaving here. */
5630 inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
5631 _il_wr(il, CSR_INT_MASK, 0x00000000);
5632
5633 /* Discover which interrupts are active/pending */
5634 inta = _il_rd(il, CSR_INT);
5635 inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
5636
5637 /* Ignore interrupt if there's nothing in NIC to service.
5638 * This may be due to IRQ shared with another device,
5639 * or due to sporadic interrupts thrown from our NIC. */
5640 if (!inta && !inta_fh) {
5641 D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5642 goto none;
5643 }
5644
5645 if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
5646 /* Hardware disappeared. It might have already raised
5647 * an interrupt */
5648 IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
5649 goto unplugged;
5650 }
5651
5652 D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
5653 inta_fh);
5654
5655 inta &= ~CSR_INT_BIT_SCD;
5656
5657 /* il_irq_tasklet() will service interrupts and re-enable them */
5658 if (likely(inta || inta_fh))
5659 tasklet_schedule(&il->irq_tasklet);
5660
5661unplugged:
5662 spin_unlock_irqrestore(&il->lock, flags);
5663 return IRQ_HANDLED;
5664
5665none:
5666 /* re-enable interrupts here since we don't have anything to service. */
5667 /* only Re-enable if disabled by irq */
5668 if (test_bit(S_INT_ENABLED, &il->status))
5669 il_enable_interrupts(il);
5670 spin_unlock_irqrestore(&il->lock, flags);
5671 return IRQ_NONE;
5672}
5673EXPORT_SYMBOL(il_isr);
5674
5675/*
5676 * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
5677 * function.
5678 */
5679void
5680il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
5681 __le16 fc, __le32 *tx_flags)
5682{
5683 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
5684 *tx_flags |= TX_CMD_FLG_RTS_MSK;
5685 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
5686 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5687
5688 if (!ieee80211_is_mgmt(fc))
5689 return;
5690
5691 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
5692 case cpu_to_le16(IEEE80211_STYPE_AUTH):
5693 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
5694 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
5695 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
5696 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5697 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5698 break;
5699 }
5700 } else if (info->control.rates[0].
5701 flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
5702 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
5703 *tx_flags |= TX_CMD_FLG_CTS_MSK;
5704 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
5705 }
5706}
5707EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 000000000000..1bc0b02f559c
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3424 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#ifndef __il_core_h__
27#define __il_core_h__
28
29#include <linux/interrupt.h>
30#include <linux/pci.h> /* for struct pci_device_id */
31#include <linux/kernel.h>
32#include <linux/leds.h>
33#include <linux/wait.h>
34#include <net/mac80211.h>
35#include <net/ieee80211_radiotap.h>
36
37#include "commands.h"
38#include "csr.h"
39#include "prph.h"
40
41struct il_host_cmd;
42struct il_cmd;
43struct il_tx_queue;
44
45#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
46#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
47#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
48
49#define RX_QUEUE_SIZE 256
50#define RX_QUEUE_MASK 255
51#define RX_QUEUE_SIZE_LOG 8
52
53/*
54 * RX related structures and functions
55 */
56#define RX_FREE_BUFFERS 64
57#define RX_LOW_WATERMARK 8
58
59#define U32_PAD(n) ((4-(n))&0x3)
60
61/* CT-KILL constants */
62#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
63
64/* Default noise level to report when noise measurement is not available.
65 * This may be because we're:
66 * 1) Not associated (4965, no beacon stats being sent to driver)
67 * 2) Scanning (noise measurement does not apply to associated channel)
68 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
69 * Use default noise value of -127 ... this is below the range of measurable
70 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
71 * Also, -127 works better than 0 when averaging frames with/without
72 * noise info (e.g. averaging might be done in app); measured dBm values are
73 * always negative ... using a negative value as the default keeps all
74 * averages within an s8's (used in some apps) range of negative values. */
75#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
76
77/*
78 * RTS threshold here is total size [2347] minus 4 FCS bytes
79 * Per spec:
80 * a value of 0 means RTS on all data/management packets
81 * a value > max MSDU size means no RTS
82 * else RTS for data/management frames where MPDU is larger
83 * than RTS value.
84 */
85#define DEFAULT_RTS_THRESHOLD 2347U
86#define MIN_RTS_THRESHOLD 0U
87#define MAX_RTS_THRESHOLD 2347U
88#define MAX_MSDU_SIZE 2304U
89#define MAX_MPDU_SIZE 2346U
90#define DEFAULT_BEACON_INTERVAL 100U
91#define DEFAULT_SHORT_RETRY_LIMIT 7U
92#define DEFAULT_LONG_RETRY_LIMIT 4U
93
94struct il_rx_buf {
95 dma_addr_t page_dma;
96 struct page *page;
97 struct list_head list;
98};
99
100#define rxb_addr(r) page_address(r->page)
101
102/* defined below */
103struct il_device_cmd;
104
105struct il_cmd_meta {
106 /* only for SYNC commands, iff the reply skb is wanted */
107 struct il_host_cmd *source;
108 /*
109 * only for ASYNC commands
110 * (which is somewhat stupid -- look at common.c for instance
111 * which duplicates a bunch of code because the callback isn't
112 * invoked for SYNC commands, if it were and its result passed
113 * through it would be simpler...)
114 */
115 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
116 struct il_rx_pkt *pkt);
117
118 /* The CMD_SIZE_HUGE flag bit indicates that the command
119 * structure is stored at the end of the shared queue memory. */
120 u32 flags;
121
122 DEFINE_DMA_UNMAP_ADDR(mapping);
123 DEFINE_DMA_UNMAP_LEN(len);
124};
125
126/*
127 * Generic queue structure
128 *
129 * Contains common data for Rx and Tx queues
130 */
131struct il_queue {
132 int n_bd; /* number of BDs in this queue */
133 int write_ptr; /* 1-st empty entry (idx) host_w */
134 int read_ptr; /* last used entry (idx) host_r */
135 /* use for monitoring and recovering the stuck queue */
136 dma_addr_t dma_addr; /* physical addr for BD's */
137 int n_win; /* safe queue win */
138 u32 id;
139 int low_mark; /* low watermark, resume queue if free
140 * space more than this */
141 int high_mark; /* high watermark, stop queue if free
142 * space less than this */
143};
144
145/* One for each TFD */
146struct il_tx_info {
147 struct sk_buff *skb;
148 struct il_rxon_context *ctx;
149};
150
151/**
152 * struct il_tx_queue - Tx Queue for DMA
153 * @q: generic Rx/Tx queue descriptor
154 * @bd: base of circular buffer of TFDs
155 * @cmd: array of command/TX buffer pointers
156 * @meta: array of meta data for each command/tx buffer
157 * @dma_addr_cmd: physical address of cmd/tx buffer array
158 * @txb: array of per-TFD driver data
159 * @time_stamp: time (in jiffies) of last read_ptr change
160 * @need_update: indicates need to update read/write idx
161 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
162 *
163 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
164 * descriptors) and required locking structures.
165 */
166#define TFD_TX_CMD_SLOTS 256
167#define TFD_CMD_SLOTS 32
168
169struct il_tx_queue {
170 struct il_queue q;
171 void *tfds;
172 struct il_device_cmd **cmd;
173 struct il_cmd_meta *meta;
174 struct il_tx_info *txb;
175 unsigned long time_stamp;
176 u8 need_update;
177 u8 sched_retry;
178 u8 active;
179 u8 swq_id;
180};
181
182/*
183 * EEPROM access time values:
184 *
185 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
186 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
187 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
188 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
189 */
190#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
191
192#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
193#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
194
195/*
196 * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
197 *
198 * IBSS and/or AP operation is allowed *only* on those channels with
199 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
200 * RADAR detection is not supported by the 4965 driver, but is a
201 * requirement for establishing a new network for legal operation on channels
202 * requiring RADAR detection or restricting ACTIVE scanning.
203 *
204 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
205 * It only indicates that 20 MHz channel use is supported; HT40 channel
206 * usage is indicated by a separate set of regulatory flags for each
207 * HT40 channel pair.
208 *
209 * NOTE: Using a channel inappropriately will result in a uCode error!
210 */
211#define IL_NUM_TX_CALIB_GROUPS 5
212enum {
213 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
214 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
215 /* Bit 2 Reserved */
216 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
217 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
218 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
219 /* Bit 6 Reserved (was Narrow Channel) */
220 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
221};
222
223/* SKU Capabilities */
224/* 3945 only */
225#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
226#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
227
228/* *regulatory* channel data format in eeprom, one for each channel.
229 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
230struct il_eeprom_channel {
231 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
232 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
233} __packed;
234
235/* 3945 Specific */
236#define EEPROM_3945_EEPROM_VERSION (0x2f)
237
238/* 4965 has two radio transmitters (and 3 radio receivers) */
239#define EEPROM_TX_POWER_TX_CHAINS (2)
240
241/* 4965 has room for up to 8 sets of txpower calibration data */
242#define EEPROM_TX_POWER_BANDS (8)
243
244/* 4965 factory calibration measures txpower gain settings for
245 * each of 3 target output levels */
246#define EEPROM_TX_POWER_MEASUREMENTS (3)
247
248/* 4965 Specific */
249/* 4965 driver does not work with txpower calibration version < 5 */
250#define EEPROM_4965_TX_POWER_VERSION (5)
251#define EEPROM_4965_EEPROM_VERSION (0x2f)
252#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
253#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
254#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
255#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
256
257/* 2.4 GHz */
258extern const u8 il_eeprom_band_1[14];
259
260/*
261 * factory calibration data for one txpower level, on one channel,
262 * measured on one of the 2 tx chains (radio transmitter and associated
263 * antenna). EEPROM contains:
264 *
265 * 1) Temperature (degrees Celsius) of device when measurement was made.
266 *
267 * 2) Gain table idx used to achieve the target measurement power.
268 * This refers to the "well-known" gain tables (see 4965.h).
269 *
270 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
271 *
272 * 4) RF power amplifier detector level measurement (not used).
273 */
274struct il_eeprom_calib_measure {
275 u8 temperature; /* Device temperature (Celsius) */
276 u8 gain_idx; /* Index into gain table */
277 u8 actual_pow; /* Measured RF output power, half-dBm */
278 s8 pa_det; /* Power amp detector level (not used) */
279} __packed;
280
281/*
282 * measurement set for one channel. EEPROM contains:
283 *
284 * 1) Channel number measured
285 *
286 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
287 * (a.k.a. "tx chains") (6 measurements altogether)
288 */
289struct il_eeprom_calib_ch_info {
290 u8 ch_num;
291 struct il_eeprom_calib_measure
292 measurements[EEPROM_TX_POWER_TX_CHAINS]
293 [EEPROM_TX_POWER_MEASUREMENTS];
294} __packed;
295
296/*
297 * txpower subband info.
298 *
299 * For each frequency subband, EEPROM contains the following:
300 *
301 * 1) First and last channels within range of the subband. "0" values
302 * indicate that this sample set is not being used.
303 *
304 * 2) Sample measurement sets for 2 channels close to the range endpoints.
305 */
306struct il_eeprom_calib_subband_info {
307 u8 ch_from; /* channel number of lowest channel in subband */
308 u8 ch_to; /* channel number of highest channel in subband */
309 struct il_eeprom_calib_ch_info ch1;
310 struct il_eeprom_calib_ch_info ch2;
311} __packed;
312
313/*
314 * txpower calibration info. EEPROM contains:
315 *
316 * 1) Factory-measured saturation power levels (maximum levels at which
317 * tx power amplifier can output a signal without too much distortion).
318 * There is one level for 2.4 GHz band and one for 5 GHz band. These
319 * values apply to all channels within each of the bands.
320 *
321 * 2) Factory-measured power supply voltage level. This is assumed to be
322 * constant (i.e. same value applies to all channels/bands) while the
323 * factory measurements are being made.
324 *
325 * 3) Up to 8 sets of factory-measured txpower calibration values.
326 * These are for different frequency ranges, since txpower gain
327 * characteristics of the analog radio circuitry vary with frequency.
328 *
329 * Not all sets need to be filled with data;
330 * struct il_eeprom_calib_subband_info contains range of channels
331 * (0 if unused) for each set of data.
332 */
333struct il_eeprom_calib_info {
334 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
335 u8 saturation_power52; /* half-dBm */
336 __le16 voltage; /* signed */
337 struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
338} __packed;
339
340/* General */
341#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
342#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
343#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
344#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
345#define EEPROM_VERSION (2*0x44) /* 2 bytes */
346#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
347#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
348#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
349#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
350#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
351
352/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
353#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
354#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
355#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
356#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
357#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
358#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
359
360#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
361#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
362
363/*
364 * Per-channel regulatory data.
365 *
366 * Each channel that *might* be supported by iwl has a fixed location
367 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
368 * txpower (MSB).
369 *
370 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
371 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
372 *
373 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
374 */
375#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
376#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
377#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
378
379/*
380 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
381 * 5.0 GHz channels 7, 8, 11, 12, 16
382 * (4915-5080MHz) (none of these is ever supported)
383 */
384#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
385#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
386
387/*
388 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
389 * (5170-5320MHz)
390 */
391#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
392#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
393
394/*
395 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
396 * (5500-5700MHz)
397 */
398#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
399#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
400
401/*
402 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
403 * (5725-5825MHz)
404 */
405#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
406#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
407
408/*
409 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
410 *
411 * The channel listed is the center of the lower 20 MHz half of the channel.
412 * The overall center frequency is actually 2 channels (10 MHz) above that,
413 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
414 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
415 * and the overall HT40 channel width centers on channel 3.
416 *
417 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
418 * control channel to which to tune. RXON also specifies whether the
419 * control channel is the upper or lower half of a HT40 channel.
420 *
421 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
422 */
423#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
424
425/*
426 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
427 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
428 */
429#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
430
431#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
432
433struct il_eeprom_ops {
434 const u32 regulatory_bands[7];
435 int (*acquire_semaphore) (struct il_priv *il);
436 void (*release_semaphore) (struct il_priv *il);
437};
438
439int il_eeprom_init(struct il_priv *il);
440void il_eeprom_free(struct il_priv *il);
441const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
442u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
443int il_init_channel_map(struct il_priv *il);
444void il_free_channel_map(struct il_priv *il);
445const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
446 enum ieee80211_band band,
447 u16 channel);
448
449#define IL_NUM_SCAN_RATES (2)
450
451struct il4965_channel_tgd_info {
452 u8 type;
453 s8 max_power;
454};
455
456struct il4965_channel_tgh_info {
457 s64 last_radar_time;
458};
459
460#define IL4965_MAX_RATE (33)
461
462struct il3945_clip_group {
463 /* maximum power level to prevent clipping for each rate, derived by
464 * us from this band's saturation power in EEPROM */
465 const s8 clip_powers[IL_MAX_RATES];
466};
467
468/* current Tx power values to use, one for each rate for each channel.
469 * requested power is limited by:
470 * -- regulatory EEPROM limits for this channel
471 * -- hardware capabilities (clip-powers)
472 * -- spectrum management
473 * -- user preference (e.g. iwconfig)
474 * when requested power is set, base power idx must also be set. */
475struct il3945_channel_power_info {
476 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
477 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
478 s8 base_power_idx; /* gain idx for power at factory temp. */
479 s8 requested_power; /* power (dBm) requested for this chnl/rate */
480};
481
482/* current scan Tx power values to use, one for each scan rate for each
483 * channel. */
484struct il3945_scan_power_info {
485 struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
486 s8 power_table_idx; /* actual (compenst'd) idx into gain table */
487 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
488};
489
490/*
491 * One for each channel, holds all channel setup data
492 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
493 * with one another!
494 */
495struct il_channel_info {
496 struct il4965_channel_tgd_info tgd;
497 struct il4965_channel_tgh_info tgh;
498 struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
499 struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
500 * HT40 channel */
501
502 u8 channel; /* channel number */
503 u8 flags; /* flags copied from EEPROM */
504 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
505 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
506 s8 min_power; /* always 0 */
507 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
508
509 u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
510 u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
511 enum ieee80211_band band;
512
513 /* HT40 channel info */
514 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
515 u8 ht40_flags; /* flags copied from EEPROM */
516 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
517
518 /* Radio/DSP gain settings for each "normal" data Tx rate.
519 * These include, in addition to RF and DSP gain, a few fields for
520 * remembering/modifying gain settings (idxes). */
521 struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
522
523 /* Radio/DSP gain settings for each scan rate, for directed scans. */
524 struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
525};
526
527#define IL_TX_FIFO_BK 0 /* shared */
528#define IL_TX_FIFO_BE 1
529#define IL_TX_FIFO_VI 2 /* shared */
530#define IL_TX_FIFO_VO 3
531#define IL_TX_FIFO_UNUSED -1
532
533/* Minimum number of queues. MAX_NUM is defined in hw specific files.
534 * Set the minimum to accommodate the 4 standard TX queues, 1 command
535 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
536#define IL_MIN_NUM_QUEUES 10
537
538#define IL_DEFAULT_CMD_QUEUE_NUM 4
539
540#define IEEE80211_DATA_LEN 2304
541#define IEEE80211_4ADDR_LEN 30
542#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
543#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
544
545struct il_frame {
546 union {
547 struct ieee80211_hdr frame;
548 struct il_tx_beacon_cmd beacon;
549 u8 raw[IEEE80211_FRAME_LEN];
550 u8 cmd[360];
551 } u;
552 struct list_head list;
553};
554
555#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
556#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
557#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
558
559enum {
560 CMD_SYNC = 0,
561 CMD_SIZE_NORMAL = 0,
562 CMD_NO_SKB = 0,
563 CMD_SIZE_HUGE = (1 << 0),
564 CMD_ASYNC = (1 << 1),
565 CMD_WANT_SKB = (1 << 2),
566 CMD_MAPPED = (1 << 3),
567};
568
569#define DEF_CMD_PAYLOAD_SIZE 320
570
571/**
572 * struct il_device_cmd
573 *
574 * For allocation of the command and tx queues, this establishes the overall
575 * size of the largest command we send to uCode, except for a scan command
576 * (which is relatively huge; space is allocated separately).
577 */
578struct il_device_cmd {
579 struct il_cmd_header hdr; /* uCode API */
580 union {
581 u32 flags;
582 u8 val8;
583 u16 val16;
584 u32 val32;
585 struct il_tx_cmd tx;
586 u8 payload[DEF_CMD_PAYLOAD_SIZE];
587 } __packed cmd;
588} __packed;
589
590#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
591
592struct il_host_cmd {
593 const void *data;
594 unsigned long reply_page;
595 void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
596 struct il_rx_pkt *pkt);
597 u32 flags;
598 u16 len;
599 u8 id;
600};
601
602#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
603#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
604#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
605
606/**
607 * struct il_rx_queue - Rx queue
608 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
609 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
610 * @read: Shared idx to newest available Rx buffer
611 * @write: Shared idx to oldest written Rx packet
612 * @free_count: Number of pre-allocated buffers in rx_free
613 * @rx_free: list of free SKBs for use
614 * @rx_used: List of Rx buffers with no SKB
615 * @need_update: flag to indicate we need to update read/write idx
616 * @rb_stts: driver's pointer to receive buffer status
617 * @rb_stts_dma: bus address of receive buffer status
618 *
619 * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
620 */
621struct il_rx_queue {
622 __le32 *bd;
623 dma_addr_t bd_dma;
624 struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
625 struct il_rx_buf *queue[RX_QUEUE_SIZE];
626 u32 read;
627 u32 write;
628 u32 free_count;
629 u32 write_actual;
630 struct list_head rx_free;
631 struct list_head rx_used;
632 int need_update;
633 struct il_rb_status *rb_stts;
634 dma_addr_t rb_stts_dma;
635 spinlock_t lock;
636};
637
638#define IL_SUPPORTED_RATES_IE_LEN 8
639
640#define MAX_TID_COUNT 9
641
642#define IL_INVALID_RATE 0xFF
643#define IL_INVALID_VALUE -1
644
645/**
646 * struct il_ht_agg -- aggregation status while waiting for block-ack
647 * @txq_id: Tx queue used for Tx attempt
648 * @frame_count: # frames attempted by Tx command
649 * @wait_for_ba: Expect block-ack before next Tx reply
650 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
651 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
652 * @bitmap1: High order, one bit for each frame pending ACK in Tx win
653 * @rate_n_flags: Rate at which Tx was attempted
654 *
655 * If C_TX indicates that aggregation was attempted, driver must wait
656 * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
657 * until block ack arrives.
658 */
659struct il_ht_agg {
660 u16 txq_id;
661 u16 frame_count;
662 u16 wait_for_ba;
663 u16 start_idx;
664 u64 bitmap;
665 u32 rate_n_flags;
666#define IL_AGG_OFF 0
667#define IL_AGG_ON 1
668#define IL_EMPTYING_HW_QUEUE_ADDBA 2
669#define IL_EMPTYING_HW_QUEUE_DELBA 3
670 u8 state;
671};
672
673struct il_tid_data {
674 u16 seq_number; /* 4965 only */
675 u16 tfds_in_queue;
676 struct il_ht_agg agg;
677};
678
679struct il_hw_key {
680 u32 cipher;
681 int keylen;
682 u8 keyidx;
683 u8 key[32];
684};
685
686union il_ht_rate_supp {
687 u16 rates;
688 struct {
689 u8 siso_rate;
690 u8 mimo_rate;
691 };
692};
693
694#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
695#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
696#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
697#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
698#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
699#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
700#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
701
702/*
703 * Maximal MPDU density for TX aggregation
704 * 4 - 2us density
705 * 5 - 4us density
706 * 6 - 8us density
707 * 7 - 16us density
708 */
709#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
710#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
711#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
712#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
713#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
714#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
715#define CFG_HT_MPDU_DENSITY_MIN (0x1)
716
717struct il_ht_config {
718 bool single_chain_sufficient;
719 enum ieee80211_smps_mode smps; /* current smps mode */
720};
721
722/* QoS structures */
723struct il_qos_info {
724 int qos_active;
725 struct il_qosparam_cmd def_qos_parm;
726};
727
728/*
729 * Structure should be accessed with sta_lock held. When station addition
730 * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
731 * the commands (il_addsta_cmd and il_link_quality_cmd) without
732 * sta_lock held.
733 */
734struct il_station_entry {
735 struct il_addsta_cmd sta;
736 struct il_tid_data tid[MAX_TID_COUNT];
737 u8 used, ctxid;
738 struct il_hw_key keyinfo;
739 struct il_link_quality_cmd *lq;
740};
741
742struct il_station_priv_common {
743 struct il_rxon_context *ctx;
744 u8 sta_id;
745};
746
747/**
748 * struct il_vif_priv - driver's ilate per-interface information
749 *
750 * When mac80211 allocates a virtual interface, it can allocate
751 * space for us to put data into.
752 */
753struct il_vif_priv {
754 struct il_rxon_context *ctx;
755 u8 ibss_bssid_sta_id;
756};
757
758/* one for each uCode image (inst/data, boot/init/runtime) */
759struct fw_desc {
760 void *v_addr; /* access by driver */
761 dma_addr_t p_addr; /* access by card's busmaster DMA */
762 u32 len; /* bytes */
763};
764
765/* uCode file layout */
766struct il_ucode_header {
767 __le32 ver; /* major/minor/API/serial */
768 struct {
769 __le32 inst_size; /* bytes of runtime code */
770 __le32 data_size; /* bytes of runtime data */
771 __le32 init_size; /* bytes of init code */
772 __le32 init_data_size; /* bytes of init data */
773 __le32 boot_size; /* bytes of bootstrap code */
774 u8 data[0]; /* in same order as sizes */
775 } v1;
776};
777
778struct il4965_ibss_seq {
779 u8 mac[ETH_ALEN];
780 u16 seq_num;
781 u16 frag_num;
782 unsigned long packet_time;
783 struct list_head list;
784};
785
786struct il_sensitivity_ranges {
787 u16 min_nrg_cck;
788 u16 max_nrg_cck;
789
790 u16 nrg_th_cck;
791 u16 nrg_th_ofdm;
792
793 u16 auto_corr_min_ofdm;
794 u16 auto_corr_min_ofdm_mrc;
795 u16 auto_corr_min_ofdm_x1;
796 u16 auto_corr_min_ofdm_mrc_x1;
797
798 u16 auto_corr_max_ofdm;
799 u16 auto_corr_max_ofdm_mrc;
800 u16 auto_corr_max_ofdm_x1;
801 u16 auto_corr_max_ofdm_mrc_x1;
802
803 u16 auto_corr_max_cck;
804 u16 auto_corr_max_cck_mrc;
805 u16 auto_corr_min_cck;
806 u16 auto_corr_min_cck_mrc;
807
808 u16 barker_corr_th_min;
809 u16 barker_corr_th_min_mrc;
810 u16 nrg_th_cca;
811};
812
813#define KELVIN_TO_CELSIUS(x) ((x)-273)
814#define CELSIUS_TO_KELVIN(x) ((x)+273)
815
816/**
817 * struct il_hw_params
818 * @max_txq_num: Max # Tx queues supported
819 * @dma_chnl_num: Number of Tx DMA/FIFO channels
820 * @scd_bc_tbls_size: size of scheduler byte count tables
821 * @tfd_size: TFD size
822 * @tx/rx_chains_num: Number of TX/RX chains
823 * @valid_tx/rx_ant: usable antennas
824 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
825 * @max_rxq_log: Log-base-2 of max_rxq_size
826 * @rx_page_order: Rx buffer page order
827 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
828 * @max_stations:
829 * @ht40_channel: is 40MHz width possible in band 2.4
830 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
831 * @sw_crypto: 0 for hw, 1 for sw
832 * @max_xxx_size: for ucode uses
833 * @ct_kill_threshold: temperature threshold
834 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
835 * @struct il_sensitivity_ranges: range of sensitivity values
836 */
837struct il_hw_params {
838 u8 max_txq_num;
839 u8 dma_chnl_num;
840 u16 scd_bc_tbls_size;
841 u32 tfd_size;
842 u8 tx_chains_num;
843 u8 rx_chains_num;
844 u8 valid_tx_ant;
845 u8 valid_rx_ant;
846 u16 max_rxq_size;
847 u16 max_rxq_log;
848 u32 rx_page_order;
849 u32 rx_wrt_ptr_reg;
850 u8 max_stations;
851 u8 ht40_channel;
852 u8 max_beacon_itrvl; /* in 1024 ms */
853 u32 max_inst_size;
854 u32 max_data_size;
855 u32 max_bsm_size;
856 u32 ct_kill_threshold; /* value in hw-dependent units */
857 u16 beacon_time_tsf_bits;
858 const struct il_sensitivity_ranges *sens;
859};
860
861/******************************************************************************
862 *
863 * Functions implemented in core module which are forward declared here
864 * for use by iwl-[4-5].c
865 *
866 * NOTE: The implementation of these functions are not hardware specific
867 * which is why they are in the core module files.
868 *
869 * Naming convention --
870 * il_ <-- Is part of iwlwifi
871 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
872 * il4965_bg_ <-- Called from work queue context
873 * il4965_mac_ <-- mac80211 callback
874 *
875 ****************************************************************************/
876extern void il4965_update_chain_flags(struct il_priv *il);
877extern const u8 il_bcast_addr[ETH_ALEN];
878extern int il_queue_space(const struct il_queue *q);
879static inline int
880il_queue_used(const struct il_queue *q, int i)
881{
882 return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
883 i < q->write_ptr) : !(i <
884 q->read_ptr
885 && i >=
886 q->
887 write_ptr);
888}
889
890static inline u8
891il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
892{
893 /*
894 * This is for init calibration result and scan command which
895 * required buffer > TFD_MAX_PAYLOAD_SIZE,
896 * the big buffer at end of command array
897 */
898 if (is_huge)
899 return q->n_win; /* must be power of 2 */
900
901 /* Otherwise, use normal size buffers */
902 return idx & (q->n_win - 1);
903}
904
905struct il_dma_ptr {
906 dma_addr_t dma;
907 void *addr;
908 size_t size;
909};
910
911#define IL_OPERATION_MODE_AUTO 0
912#define IL_OPERATION_MODE_HT_ONLY 1
913#define IL_OPERATION_MODE_MIXED 2
914#define IL_OPERATION_MODE_20MHZ 3
915
916#define IL_TX_CRC_SIZE 4
917#define IL_TX_DELIMITER_SIZE 4
918
919#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
920
921/* Sensitivity and chain noise calibration */
922#define INITIALIZATION_VALUE 0xFFFF
923#define IL4965_CAL_NUM_BEACONS 20
924#define IL_CAL_NUM_BEACONS 16
925#define MAXIMUM_ALLOWED_PATHLOSS 15
926
927#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
928
929#define MAX_FA_OFDM 50
930#define MIN_FA_OFDM 5
931#define MAX_FA_CCK 50
932#define MIN_FA_CCK 5
933
934#define AUTO_CORR_STEP_OFDM 1
935
936#define AUTO_CORR_STEP_CCK 3
937#define AUTO_CORR_MAX_TH_CCK 160
938
939#define NRG_DIFF 2
940#define NRG_STEP_CCK 2
941#define NRG_MARGIN 8
942#define MAX_NUMBER_CCK_NO_FA 100
943
944#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
945
946#define CHAIN_A 0
947#define CHAIN_B 1
948#define CHAIN_C 2
949#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
950#define ALL_BAND_FILTER 0xFF00
951#define IN_BAND_FILTER 0xFF
952#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
953
954#define NRG_NUM_PREV_STAT_L 20
955#define NUM_RX_CHAINS 3
956
957enum il4965_false_alarm_state {
958 IL_FA_TOO_MANY = 0,
959 IL_FA_TOO_FEW = 1,
960 IL_FA_GOOD_RANGE = 2,
961};
962
963enum il4965_chain_noise_state {
964 IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
965 IL_CHAIN_NOISE_ACCUMULATE,
966 IL_CHAIN_NOISE_CALIBRATED,
967 IL_CHAIN_NOISE_DONE,
968};
969
970enum il4965_calib_enabled_state {
971 IL_CALIB_DISABLED = 0, /* must be 0 */
972 IL_CALIB_ENABLED = 1,
973};
974
975/*
976 * enum il_calib
977 * defines the order in which results of initial calibrations
978 * should be sent to the runtime uCode
979 */
980enum il_calib {
981 IL_CALIB_MAX,
982};
983
984/* Opaque calibration results */
985struct il_calib_result {
986 void *buf;
987 size_t buf_len;
988};
989
990enum ucode_type {
991 UCODE_NONE = 0,
992 UCODE_INIT,
993 UCODE_RT
994};
995
996/* Sensitivity calib data */
997struct il_sensitivity_data {
998 u32 auto_corr_ofdm;
999 u32 auto_corr_ofdm_mrc;
1000 u32 auto_corr_ofdm_x1;
1001 u32 auto_corr_ofdm_mrc_x1;
1002 u32 auto_corr_cck;
1003 u32 auto_corr_cck_mrc;
1004
1005 u32 last_bad_plcp_cnt_ofdm;
1006 u32 last_fa_cnt_ofdm;
1007 u32 last_bad_plcp_cnt_cck;
1008 u32 last_fa_cnt_cck;
1009
1010 u32 nrg_curr_state;
1011 u32 nrg_prev_state;
1012 u32 nrg_value[10];
1013 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
1014 u32 nrg_silence_ref;
1015 u32 nrg_energy_idx;
1016 u32 nrg_silence_idx;
1017 u32 nrg_th_cck;
1018 s32 nrg_auto_corr_silence_diff;
1019 u32 num_in_cck_no_fa;
1020 u32 nrg_th_ofdm;
1021
1022 u16 barker_corr_th_min;
1023 u16 barker_corr_th_min_mrc;
1024 u16 nrg_th_cca;
1025};
1026
1027/* Chain noise (differential Rx gain) calib data */
1028struct il_chain_noise_data {
1029 u32 active_chains;
1030 u32 chain_noise_a;
1031 u32 chain_noise_b;
1032 u32 chain_noise_c;
1033 u32 chain_signal_a;
1034 u32 chain_signal_b;
1035 u32 chain_signal_c;
1036 u16 beacon_count;
1037 u8 disconn_array[NUM_RX_CHAINS];
1038 u8 delta_gain_code[NUM_RX_CHAINS];
1039 u8 radio_write;
1040 u8 state;
1041};
1042
1043#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
1044#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
1045
1046#define IL_TRAFFIC_ENTRIES (256)
1047#define IL_TRAFFIC_ENTRY_SIZE (64)
1048
1049enum {
1050 MEASUREMENT_READY = (1 << 0),
1051 MEASUREMENT_ACTIVE = (1 << 1),
1052};
1053
1054/* interrupt stats */
1055struct isr_stats {
1056 u32 hw;
1057 u32 sw;
1058 u32 err_code;
1059 u32 sch;
1060 u32 alive;
1061 u32 rfkill;
1062 u32 ctkill;
1063 u32 wakeup;
1064 u32 rx;
1065 u32 handlers[IL_CN_MAX];
1066 u32 tx;
1067 u32 unhandled;
1068};
1069
1070/* management stats */
1071enum il_mgmt_stats {
1072 MANAGEMENT_ASSOC_REQ = 0,
1073 MANAGEMENT_ASSOC_RESP,
1074 MANAGEMENT_REASSOC_REQ,
1075 MANAGEMENT_REASSOC_RESP,
1076 MANAGEMENT_PROBE_REQ,
1077 MANAGEMENT_PROBE_RESP,
1078 MANAGEMENT_BEACON,
1079 MANAGEMENT_ATIM,
1080 MANAGEMENT_DISASSOC,
1081 MANAGEMENT_AUTH,
1082 MANAGEMENT_DEAUTH,
1083 MANAGEMENT_ACTION,
1084 MANAGEMENT_MAX,
1085};
1086/* control stats */
1087enum il_ctrl_stats {
1088 CONTROL_BACK_REQ = 0,
1089 CONTROL_BACK,
1090 CONTROL_PSPOLL,
1091 CONTROL_RTS,
1092 CONTROL_CTS,
1093 CONTROL_ACK,
1094 CONTROL_CFEND,
1095 CONTROL_CFENDACK,
1096 CONTROL_MAX,
1097};
1098
1099struct traffic_stats {
1100#ifdef CONFIG_IWLEGACY_DEBUGFS
1101 u32 mgmt[MANAGEMENT_MAX];
1102 u32 ctrl[CONTROL_MAX];
1103 u32 data_cnt;
1104 u64 data_bytes;
1105#endif
1106};
1107
1108/*
1109 * host interrupt timeout value
1110 * used with setting interrupt coalescing timer
1111 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
1112 *
1113 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
1114 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
1115 */
1116#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
1117#define IL_HOST_INT_TIMEOUT_DEF (0x40)
1118#define IL_HOST_INT_TIMEOUT_MIN (0x0)
1119#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
1120#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
1121#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
1122
1123#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1124
1125/* TX queue watchdog timeouts in mSecs */
1126#define IL_DEF_WD_TIMEOUT (2000)
1127#define IL_LONG_WD_TIMEOUT (10000)
1128#define IL_MAX_WD_TIMEOUT (120000)
1129
1130struct il_force_reset {
1131 int reset_request_count;
1132 int reset_success_count;
1133 int reset_reject_count;
1134 unsigned long reset_duration;
1135 unsigned long last_force_reset_jiffies;
1136};
1137
1138/* extend beacon time format bit shifting */
1139/*
1140 * for _3945 devices
1141 * bits 31:24 - extended
1142 * bits 23:0 - interval
1143 */
1144#define IL3945_EXT_BEACON_TIME_POS 24
1145/*
1146 * for _4965 devices
1147 * bits 31:22 - extended
1148 * bits 21:0 - interval
1149 */
1150#define IL4965_EXT_BEACON_TIME_POS 22
1151
1152struct il_rxon_context {
1153 struct ieee80211_vif *vif;
1154
1155 const u8 *ac_to_fifo;
1156 const u8 *ac_to_queue;
1157 u8 mcast_queue;
1158
1159 /*
1160 * We could use the vif to indicate active, but we
1161 * also need it to be active during disabling when
1162 * we already removed the vif for type setting.
1163 */
1164 bool always_active, is_active;
1165
1166 bool ht_need_multiple_chains;
1167
1168 int ctxid;
1169
1170 u32 interface_modes, exclusive_interface_modes;
1171 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
1172
1173 /*
1174 * We declare this const so it can only be
1175 * changed via explicit cast within the
1176 * routines that actually update the physical
1177 * hardware.
1178 */
1179 const struct il_rxon_cmd active;
1180 struct il_rxon_cmd staging;
1181
1182 struct il_rxon_time_cmd timing;
1183
1184 struct il_qos_info qos_data;
1185
1186 u8 bcast_sta_id, ap_sta_id;
1187
1188 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
1189 u8 qos_cmd;
1190 u8 wep_key_cmd;
1191
1192 struct il_wep_key wep_keys[WEP_KEYS_MAX];
1193 u8 key_mapping_keys;
1194
1195 __le32 station_flags;
1196
1197 struct {
1198 bool non_gf_sta_present;
1199 u8 protection;
1200 bool enabled, is_40mhz;
1201 u8 extension_chan_offset;
1202 } ht;
1203};
1204
1205struct il_power_mgr {
1206 struct il_powertable_cmd sleep_cmd;
1207 struct il_powertable_cmd sleep_cmd_next;
1208 int debug_sleep_level_override;
1209 bool pci_pm;
1210};
1211
1212struct il_priv {
1213
1214 /* ieee device used by generic ieee processing code */
1215 struct ieee80211_hw *hw;
1216 struct ieee80211_channel *ieee_channels;
1217 struct ieee80211_rate *ieee_rates;
1218 struct il_cfg *cfg;
1219
1220 /* temporary frame storage list */
1221 struct list_head free_frames;
1222 int frames_count;
1223
1224 enum ieee80211_band band;
1225 int alloc_rxb_page;
1226
1227 void (*handlers[IL_CN_MAX]) (struct il_priv *il,
1228 struct il_rx_buf *rxb);
1229
1230 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1231
1232 /* spectrum measurement report caching */
1233 struct il_spectrum_notification measure_report;
1234 u8 measurement_status;
1235
1236 /* ucode beacon time */
1237 u32 ucode_beacon_time;
1238 int missed_beacon_threshold;
1239
1240 /* track IBSS manager (last beacon) status */
1241 u32 ibss_manager;
1242
1243 /* force reset */
1244 struct il_force_reset force_reset;
1245
1246 /* we allocate array of il_channel_info for NIC's valid channels.
1247 * Access via channel # using indirect idx array */
1248 struct il_channel_info *channel_info; /* channel info array */
1249 u8 channel_count; /* # of channels */
1250
1251 /* thermal calibration */
1252 s32 temperature; /* degrees Kelvin */
1253 s32 last_temperature;
1254
1255 /* init calibration results */
1256 struct il_calib_result calib_results[IL_CALIB_MAX];
1257
1258 /* Scan related variables */
1259 unsigned long scan_start;
1260 unsigned long scan_start_tsf;
1261 void *scan_cmd;
1262 enum ieee80211_band scan_band;
1263 struct cfg80211_scan_request *scan_request;
1264 struct ieee80211_vif *scan_vif;
1265 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1266 u8 mgmt_tx_ant;
1267
1268 /* spinlock */
1269 spinlock_t lock; /* protect general shared data */
1270 spinlock_t hcmd_lock; /* protect hcmd */
1271 spinlock_t reg_lock; /* protect hw register access */
1272 struct mutex mutex;
1273
1274 /* basic pci-network driver stuff */
1275 struct pci_dev *pci_dev;
1276
1277 /* pci hardware address support */
1278 void __iomem *hw_base;
1279 u32 hw_rev;
1280 u32 hw_wa_rev;
1281 u8 rev_id;
1282
1283 /* command queue number */
1284 u8 cmd_queue;
1285
1286 /* max number of station keys */
1287 u8 sta_key_max_num;
1288
1289 /* EEPROM MAC addresses */
1290 struct mac_address addresses[1];
1291
1292 /* uCode images, save to reload in case of failure */
1293 int fw_idx; /* firmware we're trying to load */
1294 u32 ucode_ver; /* version of ucode, copy of
1295 il_ucode.ver */
1296 struct fw_desc ucode_code; /* runtime inst */
1297 struct fw_desc ucode_data; /* runtime data original */
1298 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1299 struct fw_desc ucode_init; /* initialization inst */
1300 struct fw_desc ucode_init_data; /* initialization data */
1301 struct fw_desc ucode_boot; /* bootstrap inst */
1302 enum ucode_type ucode_type;
1303 u8 ucode_write_complete; /* the image write is complete */
1304 char firmware_name[25];
1305
1306 struct il_rxon_context ctx;
1307
1308 __le16 switch_channel;
1309
1310 /* 1st responses from initialize and runtime uCode images.
1311 * _4965's initialize alive response contains some calibration data. */
1312 struct il_init_alive_resp card_alive_init;
1313 struct il_alive_resp card_alive;
1314
1315 u16 active_rate;
1316
1317 u8 start_calib;
1318 struct il_sensitivity_data sensitivity_data;
1319 struct il_chain_noise_data chain_noise_data;
1320 __le16 sensitivity_tbl[HD_TBL_SIZE];
1321
1322 struct il_ht_config current_ht_config;
1323
1324 /* Rate scaling data */
1325 u8 retry_rate;
1326
1327 wait_queue_head_t wait_command_queue;
1328
1329 int activity_timer_active;
1330
1331 /* Rx and Tx DMA processing queues */
1332 struct il_rx_queue rxq;
1333 struct il_tx_queue *txq;
1334 unsigned long txq_ctx_active_msk;
1335 struct il_dma_ptr kw; /* keep warm address */
1336 struct il_dma_ptr scd_bc_tbls;
1337
1338 u32 scd_base_addr; /* scheduler sram base address */
1339
1340 unsigned long status;
1341
1342 /* counts mgmt, ctl, and data packets */
1343 struct traffic_stats tx_stats;
1344 struct traffic_stats rx_stats;
1345
1346 /* counts interrupts */
1347 struct isr_stats isr_stats;
1348
1349 struct il_power_mgr power_data;
1350
1351 /* context information */
1352 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1353
1354 /* station table variables */
1355
1356 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1357 spinlock_t sta_lock;
1358 int num_stations;
1359 struct il_station_entry stations[IL_STATION_COUNT];
1360 unsigned long ucode_key_table;
1361
1362 /* queue refcounts */
1363#define IL_MAX_HW_QUEUES 32
1364 unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
1365 /* for each AC */
1366 atomic_t queue_stop_count[4];
1367
1368 /* Indication if ieee80211_ops->open has been called */
1369 u8 is_open;
1370
1371 u8 mac80211_registered;
1372
1373 /* eeprom -- this is in the card's little endian byte order */
1374 u8 *eeprom;
1375 struct il_eeprom_calib_info *calib_info;
1376
1377 enum nl80211_iftype iw_mode;
1378
1379 /* Last Rx'd beacon timestamp */
1380 u64 timestamp;
1381
1382 union {
1383#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1384 struct {
1385 void *shared_virt;
1386 dma_addr_t shared_phys;
1387
1388 struct delayed_work thermal_periodic;
1389 struct delayed_work rfkill_poll;
1390
1391 struct il3945_notif_stats stats;
1392#ifdef CONFIG_IWLEGACY_DEBUGFS
1393 struct il3945_notif_stats accum_stats;
1394 struct il3945_notif_stats delta_stats;
1395 struct il3945_notif_stats max_delta;
1396#endif
1397
1398 u32 sta_supp_rates;
1399 int last_rx_rssi; /* From Rx packet stats */
1400
1401 /* Rx'd packet timing information */
1402 u32 last_beacon_time;
1403 u64 last_tsf;
1404
1405 /*
1406 * each calibration channel group in the
1407 * EEPROM has a derived clip setting for
1408 * each rate.
1409 */
1410 const struct il3945_clip_group clip_groups[5];
1411
1412 } _3945;
1413#endif
1414#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1415 struct {
1416 struct il_rx_phy_res last_phy_res;
1417 bool last_phy_res_valid;
1418
1419 struct completion firmware_loading_complete;
1420
1421 /*
1422 * chain noise reset and gain commands are the
1423 * two extra calibration commands follows the standard
1424 * phy calibration commands
1425 */
1426 u8 phy_calib_chain_noise_reset_cmd;
1427 u8 phy_calib_chain_noise_gain_cmd;
1428
1429 struct il_notif_stats stats;
1430#ifdef CONFIG_IWLEGACY_DEBUGFS
1431 struct il_notif_stats accum_stats;
1432 struct il_notif_stats delta_stats;
1433 struct il_notif_stats max_delta;
1434#endif
1435
1436 } _4965;
1437#endif
1438 };
1439
1440 struct il_hw_params hw_params;
1441
1442 u32 inta_mask;
1443
1444 struct workqueue_struct *workqueue;
1445
1446 struct work_struct restart;
1447 struct work_struct scan_completed;
1448 struct work_struct rx_replenish;
1449 struct work_struct abort_scan;
1450
1451 struct il_rxon_context *beacon_ctx;
1452 struct sk_buff *beacon_skb;
1453
1454 struct work_struct tx_flush;
1455
1456 struct tasklet_struct irq_tasklet;
1457
1458 struct delayed_work init_alive_start;
1459 struct delayed_work alive_start;
1460 struct delayed_work scan_check;
1461
1462 /* TX Power */
1463 s8 tx_power_user_lmt;
1464 s8 tx_power_device_lmt;
1465 s8 tx_power_next;
1466
1467#ifdef CONFIG_IWLEGACY_DEBUG
1468 /* debugging info */
1469 u32 debug_level; /* per device debugging will override global
1470 il_debug_level if set */
1471#endif /* CONFIG_IWLEGACY_DEBUG */
1472#ifdef CONFIG_IWLEGACY_DEBUGFS
1473 /* debugfs */
1474 u16 tx_traffic_idx;
1475 u16 rx_traffic_idx;
1476 u8 *tx_traffic;
1477 u8 *rx_traffic;
1478 struct dentry *debugfs_dir;
1479 u32 dbgfs_sram_offset, dbgfs_sram_len;
1480 bool disable_ht40;
1481#endif /* CONFIG_IWLEGACY_DEBUGFS */
1482
1483 struct work_struct txpower_work;
1484 u32 disable_sens_cal;
1485 u32 disable_chain_noise_cal;
1486 u32 disable_tx_power_cal;
1487 struct work_struct run_time_calib_work;
1488 struct timer_list stats_periodic;
1489 struct timer_list watchdog;
1490 bool hw_ready;
1491
1492 struct led_classdev led;
1493 unsigned long blink_on, blink_off;
1494 bool led_registered;
1495}; /*il_priv */
1496
1497static inline void
1498il_txq_ctx_activate(struct il_priv *il, int txq_id)
1499{
1500 set_bit(txq_id, &il->txq_ctx_active_msk);
1501}
1502
1503static inline void
1504il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
1505{
1506 clear_bit(txq_id, &il->txq_ctx_active_msk);
1507}
1508
1509static inline struct ieee80211_hdr *
1510il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
1511{
1512 if (il->txq[txq_id].txb[idx].skb)
1513 return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
1514 data;
1515 return NULL;
1516}
1517
1518static inline struct il_rxon_context *
1519il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1520{
1521 struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
1522
1523 return vif_priv->ctx;
1524}
1525
1526#define for_each_context(il, _ctx) \
1527 for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
1528
1529static inline int
1530il_is_associated(struct il_priv *il)
1531{
1532 return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1533}
1534
1535static inline int
1536il_is_any_associated(struct il_priv *il)
1537{
1538 return il_is_associated(il);
1539}
1540
1541static inline int
1542il_is_associated_ctx(struct il_rxon_context *ctx)
1543{
1544 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1545}
1546
1547static inline int
1548il_is_channel_valid(const struct il_channel_info *ch_info)
1549{
1550 if (ch_info == NULL)
1551 return 0;
1552 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1553}
1554
1555static inline int
1556il_is_channel_radar(const struct il_channel_info *ch_info)
1557{
1558 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1559}
1560
1561static inline u8
1562il_is_channel_a_band(const struct il_channel_info *ch_info)
1563{
1564 return ch_info->band == IEEE80211_BAND_5GHZ;
1565}
1566
1567static inline int
1568il_is_channel_passive(const struct il_channel_info *ch)
1569{
1570 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1571}
1572
1573static inline int
1574il_is_channel_ibss(const struct il_channel_info *ch)
1575{
1576 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1577}
1578
1579static inline void
1580__il_free_pages(struct il_priv *il, struct page *page)
1581{
1582 __free_pages(page, il->hw_params.rx_page_order);
1583 il->alloc_rxb_page--;
1584}
1585
1586static inline void
1587il_free_pages(struct il_priv *il, unsigned long page)
1588{
1589 free_pages(page, il->hw_params.rx_page_order);
1590 il->alloc_rxb_page--;
1591}
1592
1593#define IWLWIFI_VERSION "in-tree:"
1594#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
1595#define DRV_AUTHOR "<ilw@linux.intel.com>"
1596
1597#define IL_PCI_DEVICE(dev, subdev, cfg) \
1598 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
1599 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
1600 .driver_data = (kernel_ulong_t)&(cfg)
1601
1602#define TIME_UNIT 1024
1603
1604#define IL_SKU_G 0x1
1605#define IL_SKU_A 0x2
1606#define IL_SKU_N 0x8
1607
1608#define IL_CMD(x) case x: return #x
1609
1610/* Size of one Rx buffer in host DRAM */
1611#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
1612#define IL_RX_BUF_SIZE_4K (4 * 1024)
1613#define IL_RX_BUF_SIZE_8K (8 * 1024)
1614
1615struct il_hcmd_ops {
1616 int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
1617 int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
1618 void (*set_rxon_chain) (struct il_priv *il,
1619 struct il_rxon_context *ctx);
1620};
1621
1622struct il_hcmd_utils_ops {
1623 u16(*get_hcmd_size) (u8 cmd_id, u16 len);
1624 u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
1625 int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
1626 void (*post_scan) (struct il_priv *il);
1627};
1628
1629struct il_apm_ops {
1630 int (*init) (struct il_priv *il);
1631 void (*config) (struct il_priv *il);
1632};
1633
1634#ifdef CONFIG_IWLEGACY_DEBUGFS
1635struct il_debugfs_ops {
1636 ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
1637 size_t count, loff_t *ppos);
1638 ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
1639 size_t count, loff_t *ppos);
1640 ssize_t(*general_stats_read) (struct file *file,
1641 char __user *user_buf, size_t count,
1642 loff_t *ppos);
1643};
1644#endif
1645
1646struct il_temp_ops {
1647 void (*temperature) (struct il_priv *il);
1648};
1649
1650struct il_lib_ops {
1651 /* set hw dependent parameters */
1652 int (*set_hw_params) (struct il_priv *il);
1653 /* Handling TX */
1654 void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
1655 struct il_tx_queue *txq,
1656 u16 byte_cnt);
1657 int (*txq_attach_buf_to_tfd) (struct il_priv *il,
1658 struct il_tx_queue *txq, dma_addr_t addr,
1659 u16 len, u8 reset, u8 pad);
1660 void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
1661 int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
1662 /* setup Rx handler */
1663 void (*handler_setup) (struct il_priv *il);
1664 /* alive notification after init uCode load */
1665 void (*init_alive_start) (struct il_priv *il);
1666 /* check validity of rtc data address */
1667 int (*is_valid_rtc_data_addr) (u32 addr);
1668 /* 1st ucode load */
1669 int (*load_ucode) (struct il_priv *il);
1670
1671 void (*dump_nic_error_log) (struct il_priv *il);
1672 int (*dump_fh) (struct il_priv *il, char **buf, bool display);
1673 int (*set_channel_switch) (struct il_priv *il,
1674 struct ieee80211_channel_switch *ch_switch);
1675 /* power management */
1676 struct il_apm_ops apm_ops;
1677
1678 /* power */
1679 int (*send_tx_power) (struct il_priv *il);
1680 void (*update_chain_flags) (struct il_priv *il);
1681
1682 /* eeprom operations */
1683 struct il_eeprom_ops eeprom_ops;
1684
1685 /* temperature */
1686 struct il_temp_ops temp_ops;
1687
1688#ifdef CONFIG_IWLEGACY_DEBUGFS
1689 struct il_debugfs_ops debugfs_ops;
1690#endif
1691
1692};
1693
1694struct il_led_ops {
1695 int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
1696};
1697
1698struct il_legacy_ops {
1699 void (*post_associate) (struct il_priv *il);
1700 void (*config_ap) (struct il_priv *il);
1701 /* station management */
1702 int (*update_bcast_stations) (struct il_priv *il);
1703 int (*manage_ibss_station) (struct il_priv *il,
1704 struct ieee80211_vif *vif, bool add);
1705};
1706
1707struct il_ops {
1708 const struct il_lib_ops *lib;
1709 const struct il_hcmd_ops *hcmd;
1710 const struct il_hcmd_utils_ops *utils;
1711 const struct il_led_ops *led;
1712 const struct il_nic_ops *nic;
1713 const struct il_legacy_ops *legacy;
1714 const struct ieee80211_ops *ieee80211_ops;
1715};
1716
1717struct il_mod_params {
1718 int sw_crypto; /* def: 0 = using hardware encryption */
1719 int disable_hw_scan; /* def: 0 = use h/w scan */
1720 int num_of_queues; /* def: HW dependent */
1721 int disable_11n; /* def: 0 = 11n capabilities enabled */
1722 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
1723 int antenna; /* def: 0 = both antennas (use diversity) */
1724 int restart_fw; /* def: 1 = restart firmware */
1725};
1726
1727/*
1728 * @led_compensation: compensate on the led on/off time per HW according
1729 * to the deviation to achieve the desired led frequency.
1730 * The detail algorithm is described in common.c
1731 * @chain_noise_num_beacons: number of beacons used to compute chain noise
1732 * @wd_timeout: TX queues watchdog timeout
1733 * @temperature_kelvin: temperature report by uCode in kelvin
1734 * @ucode_tracing: support ucode continuous tracing
1735 * @sensitivity_calib_by_driver: driver has the capability to perform
1736 * sensitivity calibration operation
1737 * @chain_noise_calib_by_driver: driver has the capability to perform
1738 * chain noise calibration operation
1739 */
1740struct il_base_params {
1741 int eeprom_size;
1742 int num_of_queues; /* def: HW dependent */
1743 int num_of_ampdu_queues; /* def: HW dependent */
1744 /* for il_apm_init() */
1745 u32 pll_cfg_val;
1746 bool set_l0s;
1747 bool use_bsm;
1748
1749 u16 led_compensation;
1750 int chain_noise_num_beacons;
1751 unsigned int wd_timeout;
1752 bool temperature_kelvin;
1753 const bool ucode_tracing;
1754 const bool sensitivity_calib_by_driver;
1755 const bool chain_noise_calib_by_driver;
1756};
1757
1758#define IL_LED_SOLID 11
1759#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
1760
1761#define IL_LED_ACTIVITY (0<<1)
1762#define IL_LED_LINK (1<<1)
1763
1764/*
1765 * LED mode
1766 * IL_LED_DEFAULT: use device default
1767 * IL_LED_RF_STATE: turn LED on/off based on RF state
1768 * LED ON = RF ON
1769 * LED OFF = RF OFF
1770 * IL_LED_BLINK: adjust led blink rate based on blink table
1771 */
1772enum il_led_mode {
1773 IL_LED_DEFAULT,
1774 IL_LED_RF_STATE,
1775 IL_LED_BLINK,
1776};
1777
1778void il_leds_init(struct il_priv *il);
1779void il_leds_exit(struct il_priv *il);
1780
1781/**
1782 * struct il_cfg
1783 * @fw_name_pre: Firmware filename prefix. The api version and extension
1784 * (.ucode) will be added to filename before loading from disk. The
1785 * filename is constructed as fw_name_pre<api>.ucode.
1786 * @ucode_api_max: Highest version of uCode API supported by driver.
1787 * @ucode_api_min: Lowest version of uCode API supported by driver.
1788 * @scan_antennas: available antenna for scan operation
1789 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
1790 *
1791 * We enable the driver to be backward compatible wrt API version. The
1792 * driver specifies which APIs it supports (with @ucode_api_max being the
1793 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
1794 * it has a supported API version. The firmware's API version will be
1795 * stored in @il_priv, enabling the driver to make runtime changes based
1796 * on firmware version used.
1797 *
1798 * For example,
1799 * if (IL_UCODE_API(il->ucode_ver) >= 2) {
1800 * Driver interacts with Firmware API version >= 2.
1801 * } else {
1802 * Driver interacts with Firmware API version 1.
1803 * }
1804 *
1805 * The ideal usage of this infrastructure is to treat a new ucode API
1806 * release as a new hardware revision. That is, through utilizing the
1807 * il_hcmd_utils_ops etc. we accommodate different command structures
1808 * and flows between hardware versions as well as their API
1809 * versions.
1810 *
1811 */
1812struct il_cfg {
1813 /* params specific to an individual device within a device family */
1814 const char *name;
1815 const char *fw_name_pre;
1816 const unsigned int ucode_api_max;
1817 const unsigned int ucode_api_min;
1818 u8 valid_tx_ant;
1819 u8 valid_rx_ant;
1820 unsigned int sku;
1821 u16 eeprom_ver;
1822 u16 eeprom_calib_ver;
1823 const struct il_ops *ops;
1824 /* module based parameters which can be set from modprobe cmd */
1825 const struct il_mod_params *mod_params;
1826 /* params not likely to change within a device family */
1827 struct il_base_params *base_params;
1828 /* params likely to change within a device family */
1829 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
1830 enum il_led_mode led_mode;
1831};
1832
1833/***************************
1834 * L i b *
1835 ***************************/
1836
1837struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
1838int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1839 u16 queue, const struct ieee80211_tx_queue_params *params);
1840int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
1841
1842void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
1843 int hw_decrypt);
1844int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
1845int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
1846int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
1847 struct il_rxon_context *ctx);
1848void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
1849 enum ieee80211_band band, struct ieee80211_vif *vif);
1850u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
1851void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
1852bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
1853 struct ieee80211_sta_ht_cap *ht_cap);
1854void il_connection_init_rx_config(struct il_priv *il,
1855 struct il_rxon_context *ctx);
1856void il_set_rate(struct il_priv *il);
1857int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
1858 u32 decrypt_res, struct ieee80211_rx_status *stats);
1859void il_irq_handle_error(struct il_priv *il);
1860int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1861void il_mac_remove_interface(struct ieee80211_hw *hw,
1862 struct ieee80211_vif *vif);
1863int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1864 enum nl80211_iftype newtype, bool newp2p);
1865int il_alloc_txq_mem(struct il_priv *il);
1866void il_txq_mem(struct il_priv *il);
1867
1868#ifdef CONFIG_IWLEGACY_DEBUGFS
1869int il_alloc_traffic_mem(struct il_priv *il);
1870void il_free_traffic_mem(struct il_priv *il);
1871void il_reset_traffic_log(struct il_priv *il);
1872void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
1873 struct ieee80211_hdr *header);
1874void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
1875 struct ieee80211_hdr *header);
1876const char *il_get_mgmt_string(int cmd);
1877const char *il_get_ctrl_string(int cmd);
1878void il_clear_traffic_stats(struct il_priv *il);
1879void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
1880#else
1881static inline int
1882il_alloc_traffic_mem(struct il_priv *il)
1883{
1884 return 0;
1885}
1886
1887static inline void
1888il_free_traffic_mem(struct il_priv *il)
1889{
1890}
1891
1892static inline void
1893il_reset_traffic_log(struct il_priv *il)
1894{
1895}
1896
1897static inline void
1898il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
1899 struct ieee80211_hdr *header)
1900{
1901}
1902
1903static inline void
1904il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
1905 struct ieee80211_hdr *header)
1906{
1907}
1908
1909static inline void
1910il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
1911{
1912}
1913#endif
1914/*****************************************************
1915 * RX handlers.
1916 * **************************************************/
1917void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
1918void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
1919void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
1920
1921/*****************************************************
1922* RX
1923******************************************************/
1924void il_cmd_queue_unmap(struct il_priv *il);
1925void il_cmd_queue_free(struct il_priv *il);
1926int il_rx_queue_alloc(struct il_priv *il);
1927void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
1928int il_rx_queue_space(const struct il_rx_queue *q);
1929void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
1930/* Handlers */
1931void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
1932void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
1933void il_chswitch_done(struct il_priv *il, bool is_success);
1934void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
1935
1936/* TX helpers */
1937
1938/*****************************************************
1939* TX
1940******************************************************/
1941void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
1942int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
1943 u32 txq_id);
1944void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
1945 int slots_num, u32 txq_id);
1946void il_tx_queue_unmap(struct il_priv *il, int txq_id);
1947void il_tx_queue_free(struct il_priv *il, int txq_id);
1948void il_setup_watchdog(struct il_priv *il);
1949/*****************************************************
1950 * TX power
1951 ****************************************************/
1952int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
1953
1954/*******************************************************************************
1955 * Rate
1956 ******************************************************************************/
1957
1958u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
1959
1960/*******************************************************************************
1961 * Scanning
1962 ******************************************************************************/
1963void il_init_scan_params(struct il_priv *il);
1964int il_scan_cancel(struct il_priv *il);
1965int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
1966void il_force_scan_end(struct il_priv *il);
1967int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1968 struct cfg80211_scan_request *req);
1969void il_internal_short_hw_scan(struct il_priv *il);
1970int il_force_reset(struct il_priv *il, bool external);
1971u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
1972 const u8 *ta, const u8 *ie, int ie_len, int left);
1973void il_setup_rx_scan_handlers(struct il_priv *il);
1974u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1975 u8 n_probes);
1976u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
1977 struct ieee80211_vif *vif);
1978void il_setup_scan_deferred_work(struct il_priv *il);
1979void il_cancel_scan_deferred_work(struct il_priv *il);
1980
1981/* For faster active scanning, scan will move to the next channel if fewer than
1982 * PLCP_QUIET_THRESH packets are heard on this channel within
1983 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
1984 * time if it's a quiet channel (nothing responded to our probe, and there's
1985 * no other traffic).
1986 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
1987#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
1988#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
1989
1990#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
1991
1992/*****************************************************
1993 * S e n d i n g H o s t C o m m a n d s *
1994 *****************************************************/
1995
1996const char *il_get_cmd_string(u8 cmd);
1997int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
1998int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
1999int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
2000 const void *data);
2001int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
2002 void (*callback) (struct il_priv *il,
2003 struct il_device_cmd *cmd,
2004 struct il_rx_pkt *pkt));
2005
2006int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
2007
2008/*****************************************************
2009 * PCI *
2010 *****************************************************/
2011
2012static inline u16
2013il_pcie_link_ctl(struct il_priv *il)
2014{
2015 int pos;
2016 u16 pci_lnk_ctl;
2017 pos = pci_pcie_cap(il->pci_dev);
2018 pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
2019 return pci_lnk_ctl;
2020}
2021
2022void il_bg_watchdog(unsigned long data);
2023u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
2024__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
2025 u32 beacon_interval);
2026
2027#ifdef CONFIG_PM
2028int il_pci_suspend(struct device *device);
2029int il_pci_resume(struct device *device);
2030extern const struct dev_pm_ops il_pm_ops;
2031
2032#define IL_LEGACY_PM_OPS (&il_pm_ops)
2033
2034#else /* !CONFIG_PM */
2035
2036#define IL_LEGACY_PM_OPS NULL
2037
2038#endif /* !CONFIG_PM */
2039
2040/*****************************************************
2041* Error Handling Debugging
2042******************************************************/
2043void il4965_dump_nic_error_log(struct il_priv *il);
2044#ifdef CONFIG_IWLEGACY_DEBUG
2045void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
2046#else
2047static inline void
2048il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
2049{
2050}
2051#endif
2052
2053void il_clear_isr_stats(struct il_priv *il);
2054
2055/*****************************************************
2056* GEOS
2057******************************************************/
2058int il_init_geos(struct il_priv *il);
2059void il_free_geos(struct il_priv *il);
2060
2061/*************** DRIVER STATUS FUNCTIONS *****/
2062
2063#define S_HCMD_ACTIVE 0 /* host command in progress */
2064/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
2065#define S_INT_ENABLED 2
2066#define S_RF_KILL_HW 3
2067#define S_CT_KILL 4
2068#define S_INIT 5
2069#define S_ALIVE 6
2070#define S_READY 7
2071#define S_TEMPERATURE 8
2072#define S_GEO_CONFIGURED 9
2073#define S_EXIT_PENDING 10
2074#define S_STATS 12
2075#define S_SCANNING 13
2076#define S_SCAN_ABORTING 14
2077#define S_SCAN_HW 15
2078#define S_POWER_PMI 16
2079#define S_FW_ERROR 17
2080#define S_CHANNEL_SWITCH_PENDING 18
2081
2082static inline int
2083il_is_ready(struct il_priv *il)
2084{
2085 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
2086 * set but EXIT_PENDING is not */
2087 return test_bit(S_READY, &il->status) &&
2088 test_bit(S_GEO_CONFIGURED, &il->status) &&
2089 !test_bit(S_EXIT_PENDING, &il->status);
2090}
2091
2092static inline int
2093il_is_alive(struct il_priv *il)
2094{
2095 return test_bit(S_ALIVE, &il->status);
2096}
2097
2098static inline int
2099il_is_init(struct il_priv *il)
2100{
2101 return test_bit(S_INIT, &il->status);
2102}
2103
2104static inline int
2105il_is_rfkill_hw(struct il_priv *il)
2106{
2107 return test_bit(S_RF_KILL_HW, &il->status);
2108}
2109
2110static inline int
2111il_is_rfkill(struct il_priv *il)
2112{
2113 return il_is_rfkill_hw(il);
2114}
2115
2116static inline int
2117il_is_ctkill(struct il_priv *il)
2118{
2119 return test_bit(S_CT_KILL, &il->status);
2120}
2121
2122static inline int
2123il_is_ready_rf(struct il_priv *il)
2124{
2125
2126 if (il_is_rfkill(il))
2127 return 0;
2128
2129 return il_is_ready(il);
2130}
2131
2132extern void il_send_bt_config(struct il_priv *il);
2133extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
2134void il_apm_stop(struct il_priv *il);
2135int il_apm_init(struct il_priv *il);
2136
2137int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
2138static inline int
2139il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
2140{
2141 return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
2142}
2143
2144static inline int
2145il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
2146{
2147 return il->cfg->ops->hcmd->commit_rxon(il, ctx);
2148}
2149
2150static inline const struct ieee80211_supported_band *
2151il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
2152{
2153 return il->hw->wiphy->bands[band];
2154}
2155
2156/* mac80211 handlers */
2157int il_mac_config(struct ieee80211_hw *hw, u32 changed);
2158void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
2159void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2160 struct ieee80211_bss_conf *bss_conf, u32 changes);
2161void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
2162 __le16 fc, __le32 *tx_flags);
2163
2164irqreturn_t il_isr(int irq, void *data);
2165
2166#include <linux/io.h>
2167
2168static inline void
2169_il_write8(struct il_priv *il, u32 ofs, u8 val)
2170{
2171 iowrite8(val, il->hw_base + ofs);
2172}
2173#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
2174
2175static inline void
2176_il_wr(struct il_priv *il, u32 ofs, u32 val)
2177{
2178 iowrite32(val, il->hw_base + ofs);
2179}
2180
2181static inline u32
2182_il_rd(struct il_priv *il, u32 ofs)
2183{
2184 return ioread32(il->hw_base + ofs);
2185}
2186
2187#define IL_POLL_INTERVAL 10 /* microseconds */
2188static inline int
2189_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
2190{
2191 int t = 0;
2192
2193 do {
2194 if ((_il_rd(il, addr) & mask) == (bits & mask))
2195 return t;
2196 udelay(IL_POLL_INTERVAL);
2197 t += IL_POLL_INTERVAL;
2198 } while (t < timeout);
2199
2200 return -ETIMEDOUT;
2201}
2202
2203static inline void
2204_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
2205{
2206 _il_wr(il, reg, _il_rd(il, reg) | mask);
2207}
2208
2209static inline void
2210il_set_bit(struct il_priv *p, u32 r, u32 m)
2211{
2212 unsigned long reg_flags;
2213
2214 spin_lock_irqsave(&p->reg_lock, reg_flags);
2215 _il_set_bit(p, r, m);
2216 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
2217}
2218
2219static inline void
2220_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
2221{
2222 _il_wr(il, reg, _il_rd(il, reg) & ~mask);
2223}
2224
2225static inline void
2226il_clear_bit(struct il_priv *p, u32 r, u32 m)
2227{
2228 unsigned long reg_flags;
2229
2230 spin_lock_irqsave(&p->reg_lock, reg_flags);
2231 _il_clear_bit(p, r, m);
2232 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
2233}
2234
2235static inline int
2236_il_grab_nic_access(struct il_priv *il)
2237{
2238 int ret;
2239 u32 val;
2240
2241 /* this bit wakes up the NIC */
2242 _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2243
2244 /*
2245 * These bits say the device is running, and should keep running for
2246 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2247 * but they do not indicate that embedded SRAM is restored yet;
2248 * 3945 and 4965 have volatile SRAM, and must save/restore contents
2249 * to/from host DRAM when sleeping/waking for power-saving.
2250 * Each direction takes approximately 1/4 millisecond; with this
2251 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2252 * series of register accesses are expected (e.g. reading Event Log),
2253 * to keep device from sleeping.
2254 *
2255 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2256 * SRAM is okay/restored. We don't check that here because this call
2257 * is just for hardware register access; but GP1 MAC_SLEEP check is a
2258 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
2259 *
2260 */
2261 ret =
2262 _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2263 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2264 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2265 if (ret < 0) {
2266 val = _il_rd(il, CSR_GP_CNTRL);
2267 IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
2268 _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
2269 return -EIO;
2270 }
2271
2272 return 0;
2273}
2274
2275static inline void
2276_il_release_nic_access(struct il_priv *il)
2277{
2278 _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2279}
2280
2281static inline u32
2282il_rd(struct il_priv *il, u32 reg)
2283{
2284 u32 value;
2285 unsigned long reg_flags;
2286
2287 spin_lock_irqsave(&il->reg_lock, reg_flags);
2288 _il_grab_nic_access(il);
2289 value = _il_rd(il, reg);
2290 _il_release_nic_access(il);
2291 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2292 return value;
2293
2294}
2295
2296static inline void
2297il_wr(struct il_priv *il, u32 reg, u32 value)
2298{
2299 unsigned long reg_flags;
2300
2301 spin_lock_irqsave(&il->reg_lock, reg_flags);
2302 if (!_il_grab_nic_access(il)) {
2303 _il_wr(il, reg, value);
2304 _il_release_nic_access(il);
2305 }
2306 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2307}
2308
2309static inline void
2310il_write_reg_buf(struct il_priv *il, u32 reg, u32 len, u32 * values)
2311{
2312 u32 count = sizeof(u32);
2313
2314 if (il != NULL && values != NULL) {
2315 for (; 0 < len; len -= count, reg += count, values++)
2316 il_wr(il, reg, *values);
2317 }
2318}
2319
2320static inline int
2321il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
2322{
2323 int t = 0;
2324
2325 do {
2326 if ((il_rd(il, addr) & mask) == mask)
2327 return t;
2328 udelay(IL_POLL_INTERVAL);
2329 t += IL_POLL_INTERVAL;
2330 } while (t < timeout);
2331
2332 return -ETIMEDOUT;
2333}
2334
2335static inline u32
2336_il_rd_prph(struct il_priv *il, u32 reg)
2337{
2338 _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
2339 rmb();
2340 return _il_rd(il, HBUS_TARG_PRPH_RDAT);
2341}
2342
2343static inline u32
2344il_rd_prph(struct il_priv *il, u32 reg)
2345{
2346 unsigned long reg_flags;
2347 u32 val;
2348
2349 spin_lock_irqsave(&il->reg_lock, reg_flags);
2350 _il_grab_nic_access(il);
2351 val = _il_rd_prph(il, reg);
2352 _il_release_nic_access(il);
2353 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2354 return val;
2355}
2356
2357static inline void
2358_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2359{
2360 _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
2361 wmb();
2362 _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
2363}
2364
2365static inline void
2366il_wr_prph(struct il_priv *il, u32 addr, u32 val)
2367{
2368 unsigned long reg_flags;
2369
2370 spin_lock_irqsave(&il->reg_lock, reg_flags);
2371 if (!_il_grab_nic_access(il)) {
2372 _il_wr_prph(il, addr, val);
2373 _il_release_nic_access(il);
2374 }
2375 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2376}
2377
2378#define _il_set_bits_prph(il, reg, mask) \
2379_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask))
2380
2381static inline void
2382il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2383{
2384 unsigned long reg_flags;
2385
2386 spin_lock_irqsave(&il->reg_lock, reg_flags);
2387 _il_grab_nic_access(il);
2388 _il_set_bits_prph(il, reg, mask);
2389 _il_release_nic_access(il);
2390 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2391}
2392
2393#define _il_set_bits_mask_prph(il, reg, bits, mask) \
2394_il_wr_prph(il, reg, \
2395 ((_il_rd_prph(il, reg) & mask) | bits))
2396
2397static inline void
2398il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
2399{
2400 unsigned long reg_flags;
2401
2402 spin_lock_irqsave(&il->reg_lock, reg_flags);
2403 _il_grab_nic_access(il);
2404 _il_set_bits_mask_prph(il, reg, bits, mask);
2405 _il_release_nic_access(il);
2406 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2407}
2408
2409static inline void
2410il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
2411{
2412 unsigned long reg_flags;
2413 u32 val;
2414
2415 spin_lock_irqsave(&il->reg_lock, reg_flags);
2416 _il_grab_nic_access(il);
2417 val = _il_rd_prph(il, reg);
2418 _il_wr_prph(il, reg, (val & ~mask));
2419 _il_release_nic_access(il);
2420 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2421}
2422
2423static inline u32
2424il_read_targ_mem(struct il_priv *il, u32 addr)
2425{
2426 unsigned long reg_flags;
2427 u32 value;
2428
2429 spin_lock_irqsave(&il->reg_lock, reg_flags);
2430 _il_grab_nic_access(il);
2431
2432 _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
2433 rmb();
2434 value = _il_rd(il, HBUS_TARG_MEM_RDAT);
2435
2436 _il_release_nic_access(il);
2437 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2438 return value;
2439}
2440
2441static inline void
2442il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
2443{
2444 unsigned long reg_flags;
2445
2446 spin_lock_irqsave(&il->reg_lock, reg_flags);
2447 if (!_il_grab_nic_access(il)) {
2448 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
2449 wmb();
2450 _il_wr(il, HBUS_TARG_MEM_WDAT, val);
2451 _il_release_nic_access(il);
2452 }
2453 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2454}
2455
2456static inline void
2457il_write_targ_mem_buf(struct il_priv *il, u32 addr, u32 len, u32 * values)
2458{
2459 unsigned long reg_flags;
2460
2461 spin_lock_irqsave(&il->reg_lock, reg_flags);
2462 if (!_il_grab_nic_access(il)) {
2463 _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
2464 wmb();
2465 for (; 0 < len; len -= sizeof(u32), values++)
2466 _il_wr(il, HBUS_TARG_MEM_WDAT, *values);
2467
2468 _il_release_nic_access(il);
2469 }
2470 spin_unlock_irqrestore(&il->reg_lock, reg_flags);
2471}
2472
2473#define HW_KEY_DYNAMIC 0
2474#define HW_KEY_DEFAULT 1
2475
2476#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
2477#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
2478#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
2479 being activated */
2480#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
2481 (this is for the IBSS BSSID stations) */
2482#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
2483
2484void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
2485void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
2486void il_dealloc_bcast_stations(struct il_priv *il);
2487int il_get_free_ucode_key_idx(struct il_priv *il);
2488int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
2489int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
2490 const u8 *addr, bool is_ap,
2491 struct ieee80211_sta *sta, u8 *sta_id_r);
2492int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
2493int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2494 struct ieee80211_sta *sta);
2495
2496u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
2497 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
2498
2499int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
2500 struct il_link_quality_cmd *lq, u8 flags, bool init);
2501
2502/**
2503 * il_clear_driver_stations - clear knowledge of all stations from driver
2504 * @il: iwl il struct
2505 *
2506 * This is called during il_down() to make sure that in the case
2507 * we're coming there from a hardware restart mac80211 will be
2508 * able to reconfigure stations -- if we're getting there in the
2509 * normal down flow then the stations will already be cleared.
2510 */
2511static inline void
2512il_clear_driver_stations(struct il_priv *il)
2513{
2514 unsigned long flags;
2515 struct il_rxon_context *ctx = &il->ctx;
2516
2517 spin_lock_irqsave(&il->sta_lock, flags);
2518 memset(il->stations, 0, sizeof(il->stations));
2519 il->num_stations = 0;
2520
2521 il->ucode_key_table = 0;
2522
2523 /*
2524 * Remove all key information that is not stored as part
2525 * of station information since mac80211 may not have had
2526 * a chance to remove all the keys. When device is
2527 * reconfigured by mac80211 after an error all keys will
2528 * be reconfigured.
2529 */
2530 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
2531 ctx->key_mapping_keys = 0;
2532
2533 spin_unlock_irqrestore(&il->sta_lock, flags);
2534}
2535
2536static inline int
2537il_sta_id(struct ieee80211_sta *sta)
2538{
2539 if (WARN_ON(!sta))
2540 return IL_INVALID_STATION;
2541
2542 return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
2543}
2544
2545/**
2546 * il_sta_id_or_broadcast - return sta_id or broadcast sta
2547 * @il: iwl il
2548 * @context: the current context
2549 * @sta: mac80211 station
2550 *
2551 * In certain circumstances mac80211 passes a station pointer
2552 * that may be %NULL, for example during TX or key setup. In
2553 * that case, we need to use the broadcast station, so this
2554 * inline wraps that pattern.
2555 */
2556static inline int
2557il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
2558 struct ieee80211_sta *sta)
2559{
2560 int sta_id;
2561
2562 if (!sta)
2563 return context->bcast_sta_id;
2564
2565 sta_id = il_sta_id(sta);
2566
2567 /*
2568 * mac80211 should not be passing a partially
2569 * initialised station!
2570 */
2571 WARN_ON(sta_id == IL_INVALID_STATION);
2572
2573 return sta_id;
2574}
2575
2576/**
2577 * il_queue_inc_wrap - increment queue idx, wrap back to beginning
2578 * @idx -- current idx
2579 * @n_bd -- total number of entries in queue (must be power of 2)
2580 */
2581static inline int
2582il_queue_inc_wrap(int idx, int n_bd)
2583{
2584 return ++idx & (n_bd - 1);
2585}
2586
2587/**
2588 * il_queue_dec_wrap - decrement queue idx, wrap back to end
2589 * @idx -- current idx
2590 * @n_bd -- total number of entries in queue (must be power of 2)
2591 */
2592static inline int
2593il_queue_dec_wrap(int idx, int n_bd)
2594{
2595 return --idx & (n_bd - 1);
2596}
2597
2598/* TODO: Move fw_desc functions to iwl-pci.ko */
2599static inline void
2600il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2601{
2602 if (desc->v_addr)
2603 dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
2604 desc->p_addr);
2605 desc->v_addr = NULL;
2606 desc->len = 0;
2607}
2608
2609static inline int
2610il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2611{
2612 if (!desc->len) {
2613 desc->v_addr = NULL;
2614 return -EINVAL;
2615 }
2616
2617 desc->v_addr =
2618 dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
2619 GFP_KERNEL);
2620 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2621}
2622
2623/*
2624 * we have 8 bits used like this:
2625 *
2626 * 7 6 5 4 3 2 1 0
2627 * | | | | | | | |
2628 * | | | | | | +-+-------- AC queue (0-3)
2629 * | | | | | |
2630 * | +-+-+-+-+------------ HW queue ID
2631 * |
2632 * +---------------------- unused
2633 */
2634static inline void
2635il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
2636{
2637 BUG_ON(ac > 3); /* only have 2 bits */
2638 BUG_ON(hwq > 31); /* only use 5 bits */
2639
2640 txq->swq_id = (hwq << 2) | ac;
2641}
2642
2643static inline void
2644il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
2645{
2646 u8 queue = txq->swq_id;
2647 u8 ac = queue & 3;
2648 u8 hwq = (queue >> 2) & 0x1f;
2649
2650 if (test_and_clear_bit(hwq, il->queue_stopped))
2651 if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
2652 ieee80211_wake_queue(il->hw, ac);
2653}
2654
2655static inline void
2656il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
2657{
2658 u8 queue = txq->swq_id;
2659 u8 ac = queue & 3;
2660 u8 hwq = (queue >> 2) & 0x1f;
2661
2662 if (!test_and_set_bit(hwq, il->queue_stopped))
2663 if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
2664 ieee80211_stop_queue(il->hw, ac);
2665}
2666
2667#ifdef ieee80211_stop_queue
2668#undef ieee80211_stop_queue
2669#endif
2670
2671#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
2672
2673#ifdef ieee80211_wake_queue
2674#undef ieee80211_wake_queue
2675#endif
2676
2677#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
2678
2679static inline void
2680il_disable_interrupts(struct il_priv *il)
2681{
2682 clear_bit(S_INT_ENABLED, &il->status);
2683
2684 /* disable interrupts from uCode/NIC to host */
2685 _il_wr(il, CSR_INT_MASK, 0x00000000);
2686
2687 /* acknowledge/clear/reset any interrupts still pending
2688 * from uCode or flow handler (Rx/Tx DMA) */
2689 _il_wr(il, CSR_INT, 0xffffffff);
2690 _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
2691}
2692
2693static inline void
2694il_enable_rfkill_int(struct il_priv *il)
2695{
2696 _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
2697}
2698
2699static inline void
2700il_enable_interrupts(struct il_priv *il)
2701{
2702 set_bit(S_INT_ENABLED, &il->status);
2703 _il_wr(il, CSR_INT_MASK, il->inta_mask);
2704}
2705
2706/**
2707 * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
2708 * @il -- pointer to il_priv data structure
2709 * @tsf_bits -- number of bits need to shift for masking)
2710 */
2711static inline u32
2712il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
2713{
2714 return (1 << tsf_bits) - 1;
2715}
2716
2717/**
2718 * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
2719 * @il -- pointer to il_priv data structure
2720 * @tsf_bits -- number of bits need to shift for masking)
2721 */
2722static inline u32
2723il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
2724{
2725 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
2726}
2727
2728/**
2729 * struct il_rb_status - reseve buffer status host memory mapped FH registers
2730 *
2731 * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
2732 * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
2733 * @finished_rb_num [0:11] - Indicates the idx of the current RB
2734 * in which the last frame was written to
2735 * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
2736 * which was transferred
2737 */
2738struct il_rb_status {
2739 __le16 closed_rb_num;
2740 __le16 closed_fr_num;
2741 __le16 finished_rb_num;
2742 __le16 finished_fr_nam;
2743 __le32 __unused; /* 3945 only */
2744} __packed;
2745
2746#define TFD_QUEUE_SIZE_MAX (256)
2747#define TFD_QUEUE_SIZE_BC_DUP (64)
2748#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
2749#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
2750#define IL_NUM_OF_TBS 20
2751
2752static inline u8
2753il_get_dma_hi_addr(dma_addr_t addr)
2754{
2755 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
2756}
2757
2758/**
2759 * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
2760 *
2761 * This structure contains dma address and length of transmission address
2762 *
2763 * @lo: low [31:0] portion of the dma address of TX buffer every even is
2764 * unaligned on 16 bit boundary
2765 * @hi_n_len: 0-3 [35:32] portion of dma
2766 * 4-15 length of the tx buffer
2767 */
2768struct il_tfd_tb {
2769 __le32 lo;
2770 __le16 hi_n_len;
2771} __packed;
2772
2773/**
2774 * struct il_tfd
2775 *
2776 * Transmit Frame Descriptor (TFD)
2777 *
2778 * @ __reserved1[3] reserved
2779 * @ num_tbs 0-4 number of active tbs
2780 * 5 reserved
2781 * 6-7 padding (not used)
2782 * @ tbs[20] transmit frame buffer descriptors
2783 * @ __pad padding
2784 *
2785 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
2786 * Both driver and device share these circular buffers, each of which must be
2787 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
2788 *
2789 * Driver must indicate the physical address of the base of each
2790 * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
2791 *
2792 * Each TFD contains pointer/size information for up to 20 data buffers
2793 * in host DRAM. These buffers collectively contain the (one) frame described
2794 * by the TFD. Each buffer must be a single contiguous block of memory within
2795 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
2796 * of (4K - 4). The concatenates all of a TFD's buffers into a single
2797 * Tx frame, up to 8 KBytes in size.
2798 *
2799 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
2800 */
2801struct il_tfd {
2802 u8 __reserved1[3];
2803 u8 num_tbs;
2804 struct il_tfd_tb tbs[IL_NUM_OF_TBS];
2805 __le32 __pad;
2806} __packed;
2807/* PCI registers */
2808#define PCI_CFG_RETRY_TIMEOUT 0x041
2809
2810/* PCI register values */
2811#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
2812#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
2813
2814struct il_rate_info {
2815 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2816 u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
2817 u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
2818 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2819 u8 prev_ieee; /* previous rate in IEEE speeds */
2820 u8 next_ieee; /* next rate in IEEE speeds */
2821 u8 prev_rs; /* previous rate used in rs algo */
2822 u8 next_rs; /* next rate used in rs algo */
2823 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2824 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2825};
2826
2827struct il3945_rate_info {
2828 u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
2829 u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
2830 u8 prev_ieee; /* previous rate in IEEE speeds */
2831 u8 next_ieee; /* next rate in IEEE speeds */
2832 u8 prev_rs; /* previous rate used in rs algo */
2833 u8 next_rs; /* next rate used in rs algo */
2834 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
2835 u8 next_rs_tgg; /* next rate used in TGG rs algo */
2836 u8 table_rs_idx; /* idx in rate scale table cmd */
2837 u8 prev_table_rs; /* prev in rate table cmd */
2838};
2839
2840/*
2841 * These serve as idxes into
2842 * struct il_rate_info il_rates[RATE_COUNT];
2843 */
2844enum {
2845 RATE_1M_IDX = 0,
2846 RATE_2M_IDX,
2847 RATE_5M_IDX,
2848 RATE_11M_IDX,
2849 RATE_6M_IDX,
2850 RATE_9M_IDX,
2851 RATE_12M_IDX,
2852 RATE_18M_IDX,
2853 RATE_24M_IDX,
2854 RATE_36M_IDX,
2855 RATE_48M_IDX,
2856 RATE_54M_IDX,
2857 RATE_60M_IDX,
2858 RATE_COUNT,
2859 RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
2860 RATE_COUNT_3945 = RATE_COUNT - 1,
2861 RATE_INVM_IDX = RATE_COUNT,
2862 RATE_INVALID = RATE_COUNT,
2863};
2864
2865enum {
2866 RATE_6M_IDX_TBL = 0,
2867 RATE_9M_IDX_TBL,
2868 RATE_12M_IDX_TBL,
2869 RATE_18M_IDX_TBL,
2870 RATE_24M_IDX_TBL,
2871 RATE_36M_IDX_TBL,
2872 RATE_48M_IDX_TBL,
2873 RATE_54M_IDX_TBL,
2874 RATE_1M_IDX_TBL,
2875 RATE_2M_IDX_TBL,
2876 RATE_5M_IDX_TBL,
2877 RATE_11M_IDX_TBL,
2878 RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
2879};
2880
2881enum {
2882 IL_FIRST_OFDM_RATE = RATE_6M_IDX,
2883 IL39_LAST_OFDM_RATE = RATE_54M_IDX,
2884 IL_LAST_OFDM_RATE = RATE_60M_IDX,
2885 IL_FIRST_CCK_RATE = RATE_1M_IDX,
2886 IL_LAST_CCK_RATE = RATE_11M_IDX,
2887};
2888
2889/* #define vs. enum to keep from defaulting to 'large integer' */
2890#define RATE_6M_MASK (1 << RATE_6M_IDX)
2891#define RATE_9M_MASK (1 << RATE_9M_IDX)
2892#define RATE_12M_MASK (1 << RATE_12M_IDX)
2893#define RATE_18M_MASK (1 << RATE_18M_IDX)
2894#define RATE_24M_MASK (1 << RATE_24M_IDX)
2895#define RATE_36M_MASK (1 << RATE_36M_IDX)
2896#define RATE_48M_MASK (1 << RATE_48M_IDX)
2897#define RATE_54M_MASK (1 << RATE_54M_IDX)
2898#define RATE_60M_MASK (1 << RATE_60M_IDX)
2899#define RATE_1M_MASK (1 << RATE_1M_IDX)
2900#define RATE_2M_MASK (1 << RATE_2M_IDX)
2901#define RATE_5M_MASK (1 << RATE_5M_IDX)
2902#define RATE_11M_MASK (1 << RATE_11M_IDX)
2903
2904/* uCode API values for legacy bit rates, both OFDM and CCK */
2905enum {
2906 RATE_6M_PLCP = 13,
2907 RATE_9M_PLCP = 15,
2908 RATE_12M_PLCP = 5,
2909 RATE_18M_PLCP = 7,
2910 RATE_24M_PLCP = 9,
2911 RATE_36M_PLCP = 11,
2912 RATE_48M_PLCP = 1,
2913 RATE_54M_PLCP = 3,
2914 RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
2915 RATE_1M_PLCP = 10,
2916 RATE_2M_PLCP = 20,
2917 RATE_5M_PLCP = 55,
2918 RATE_11M_PLCP = 110,
2919 /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
2920};
2921
2922/* uCode API values for OFDM high-throughput (HT) bit rates */
2923enum {
2924 RATE_SISO_6M_PLCP = 0,
2925 RATE_SISO_12M_PLCP = 1,
2926 RATE_SISO_18M_PLCP = 2,
2927 RATE_SISO_24M_PLCP = 3,
2928 RATE_SISO_36M_PLCP = 4,
2929 RATE_SISO_48M_PLCP = 5,
2930 RATE_SISO_54M_PLCP = 6,
2931 RATE_SISO_60M_PLCP = 7,
2932 RATE_MIMO2_6M_PLCP = 0x8,
2933 RATE_MIMO2_12M_PLCP = 0x9,
2934 RATE_MIMO2_18M_PLCP = 0xa,
2935 RATE_MIMO2_24M_PLCP = 0xb,
2936 RATE_MIMO2_36M_PLCP = 0xc,
2937 RATE_MIMO2_48M_PLCP = 0xd,
2938 RATE_MIMO2_54M_PLCP = 0xe,
2939 RATE_MIMO2_60M_PLCP = 0xf,
2940 RATE_SISO_INVM_PLCP,
2941 RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
2942};
2943
2944/* MAC header values for bit rates */
2945enum {
2946 RATE_6M_IEEE = 12,
2947 RATE_9M_IEEE = 18,
2948 RATE_12M_IEEE = 24,
2949 RATE_18M_IEEE = 36,
2950 RATE_24M_IEEE = 48,
2951 RATE_36M_IEEE = 72,
2952 RATE_48M_IEEE = 96,
2953 RATE_54M_IEEE = 108,
2954 RATE_60M_IEEE = 120,
2955 RATE_1M_IEEE = 2,
2956 RATE_2M_IEEE = 4,
2957 RATE_5M_IEEE = 11,
2958 RATE_11M_IEEE = 22,
2959};
2960
2961#define IL_CCK_BASIC_RATES_MASK \
2962 (RATE_1M_MASK | \
2963 RATE_2M_MASK)
2964
2965#define IL_CCK_RATES_MASK \
2966 (IL_CCK_BASIC_RATES_MASK | \
2967 RATE_5M_MASK | \
2968 RATE_11M_MASK)
2969
2970#define IL_OFDM_BASIC_RATES_MASK \
2971 (RATE_6M_MASK | \
2972 RATE_12M_MASK | \
2973 RATE_24M_MASK)
2974
2975#define IL_OFDM_RATES_MASK \
2976 (IL_OFDM_BASIC_RATES_MASK | \
2977 RATE_9M_MASK | \
2978 RATE_18M_MASK | \
2979 RATE_36M_MASK | \
2980 RATE_48M_MASK | \
2981 RATE_54M_MASK)
2982
2983#define IL_BASIC_RATES_MASK \
2984 (IL_OFDM_BASIC_RATES_MASK | \
2985 IL_CCK_BASIC_RATES_MASK)
2986
2987#define RATES_MASK ((1 << RATE_COUNT) - 1)
2988#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
2989
2990#define IL_INVALID_VALUE -1
2991
2992#define IL_MIN_RSSI_VAL -100
2993#define IL_MAX_RSSI_VAL 0
2994
2995/* These values specify how many Tx frame attempts before
2996 * searching for a new modulation mode */
2997#define IL_LEGACY_FAILURE_LIMIT 160
2998#define IL_LEGACY_SUCCESS_LIMIT 480
2999#define IL_LEGACY_TBL_COUNT 160
3000
3001#define IL_NONE_LEGACY_FAILURE_LIMIT 400
3002#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
3003#define IL_NONE_LEGACY_TBL_COUNT 1500
3004
3005/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
3006#define IL_RS_GOOD_RATIO 12800 /* 100% */
3007#define RATE_SCALE_SWITCH 10880 /* 85% */
3008#define RATE_HIGH_TH 10880 /* 85% */
3009#define RATE_INCREASE_TH 6400 /* 50% */
3010#define RATE_DECREASE_TH 1920 /* 15% */
3011
3012/* possible actions when in legacy mode */
3013#define IL_LEGACY_SWITCH_ANTENNA1 0
3014#define IL_LEGACY_SWITCH_ANTENNA2 1
3015#define IL_LEGACY_SWITCH_SISO 2
3016#define IL_LEGACY_SWITCH_MIMO2_AB 3
3017#define IL_LEGACY_SWITCH_MIMO2_AC 4
3018#define IL_LEGACY_SWITCH_MIMO2_BC 5
3019
3020/* possible actions when in siso mode */
3021#define IL_SISO_SWITCH_ANTENNA1 0
3022#define IL_SISO_SWITCH_ANTENNA2 1
3023#define IL_SISO_SWITCH_MIMO2_AB 2
3024#define IL_SISO_SWITCH_MIMO2_AC 3
3025#define IL_SISO_SWITCH_MIMO2_BC 4
3026#define IL_SISO_SWITCH_GI 5
3027
3028/* possible actions when in mimo mode */
3029#define IL_MIMO2_SWITCH_ANTENNA1 0
3030#define IL_MIMO2_SWITCH_ANTENNA2 1
3031#define IL_MIMO2_SWITCH_SISO_A 2
3032#define IL_MIMO2_SWITCH_SISO_B 3
3033#define IL_MIMO2_SWITCH_SISO_C 4
3034#define IL_MIMO2_SWITCH_GI 5
3035
3036#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
3037
3038#define IL_ACTION_LIMIT 3 /* # possible actions */
3039
3040#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
3041
3042/* load per tid defines for A-MPDU activation */
3043#define IL_AGG_TPT_THREHOLD 0
3044#define IL_AGG_LOAD_THRESHOLD 10
3045#define IL_AGG_ALL_TID 0xff
3046#define TID_QUEUE_CELL_SPACING 50 /*mS */
3047#define TID_QUEUE_MAX_SIZE 20
3048#define TID_ROUND_VALUE 5 /* mS */
3049#define TID_MAX_LOAD_COUNT 8
3050
3051#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
3052#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
3053
3054extern const struct il_rate_info il_rates[RATE_COUNT];
3055
3056enum il_table_type {
3057 LQ_NONE,
3058 LQ_G, /* legacy types */
3059 LQ_A,
3060 LQ_SISO, /* high-throughput types */
3061 LQ_MIMO2,
3062 LQ_MAX,
3063};
3064
3065#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
3066#define is_siso(tbl) ((tbl) == LQ_SISO)
3067#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
3068#define is_mimo(tbl) (is_mimo2(tbl))
3069#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
3070#define is_a_band(tbl) ((tbl) == LQ_A)
3071#define is_g_and(tbl) ((tbl) == LQ_G)
3072
3073#define ANT_NONE 0x0
3074#define ANT_A BIT(0)
3075#define ANT_B BIT(1)
3076#define ANT_AB (ANT_A | ANT_B)
3077#define ANT_C BIT(2)
3078#define ANT_AC (ANT_A | ANT_C)
3079#define ANT_BC (ANT_B | ANT_C)
3080#define ANT_ABC (ANT_AB | ANT_C)
3081
3082#define IL_MAX_MCS_DISPLAY_SIZE 12
3083
3084struct il_rate_mcs_info {
3085 char mbps[IL_MAX_MCS_DISPLAY_SIZE];
3086 char mcs[IL_MAX_MCS_DISPLAY_SIZE];
3087};
3088
3089/**
3090 * struct il_rate_scale_data -- tx success history for one rate
3091 */
3092struct il_rate_scale_data {
3093 u64 data; /* bitmap of successful frames */
3094 s32 success_counter; /* number of frames successful */
3095 s32 success_ratio; /* per-cent * 128 */
3096 s32 counter; /* number of frames attempted */
3097 s32 average_tpt; /* success ratio * expected throughput */
3098 unsigned long stamp;
3099};
3100
3101/**
3102 * struct il_scale_tbl_info -- tx params and success history for all rates
3103 *
3104 * There are two of these in struct il_lq_sta,
3105 * one for "active", and one for "search".
3106 */
3107struct il_scale_tbl_info {
3108 enum il_table_type lq_type;
3109 u8 ant_type;
3110 u8 is_SGI; /* 1 = short guard interval */
3111 u8 is_ht40; /* 1 = 40 MHz channel width */
3112 u8 is_dup; /* 1 = duplicated data streams */
3113 u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
3114 u8 max_search; /* maximun number of tables we can search */
3115 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
3116 u32 current_rate; /* rate_n_flags, uCode API format */
3117 struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
3118};
3119
3120struct il_traffic_load {
3121 unsigned long time_stamp; /* age of the oldest stats */
3122 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
3123 * slice */
3124 u32 total; /* total num of packets during the
3125 * last TID_MAX_TIME_DIFF */
3126 u8 queue_count; /* number of queues that has
3127 * been used since the last cleanup */
3128 u8 head; /* start of the circular buffer */
3129};
3130
3131/**
3132 * struct il_lq_sta -- driver's rate scaling ilate structure
3133 *
3134 * Pointer to this gets passed back and forth between driver and mac80211.
3135 */
3136struct il_lq_sta {
3137 u8 active_tbl; /* idx of active table, range 0-1 */
3138 u8 enable_counter; /* indicates HT mode */
3139 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
3140 u8 search_better_tbl; /* 1: currently trying alternate mode */
3141 s32 last_tpt;
3142
3143 /* The following determine when to search for a new mode */
3144 u32 table_count_limit;
3145 u32 max_failure_limit; /* # failed frames before new search */
3146 u32 max_success_limit; /* # successful frames before new search */
3147 u32 table_count;
3148 u32 total_failed; /* total failed frames, any/all rates */
3149 u32 total_success; /* total successful frames, any/all rates */
3150 u64 flush_timer; /* time staying in mode before new search */
3151
3152 u8 action_counter; /* # mode-switch actions tried */
3153 u8 is_green;
3154 u8 is_dup;
3155 enum ieee80211_band band;
3156
3157 /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
3158 u32 supp_rates;
3159 u16 active_legacy_rate;
3160 u16 active_siso_rate;
3161 u16 active_mimo2_rate;
3162 s8 max_rate_idx; /* Max rate set by user */
3163 u8 missed_rate_counter;
3164
3165 struct il_link_quality_cmd lq;
3166 struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
3167 struct il_traffic_load load[TID_MAX_LOAD_COUNT];
3168 u8 tx_agg_tid_en;
3169#ifdef CONFIG_MAC80211_DEBUGFS
3170 struct dentry *rs_sta_dbgfs_scale_table_file;
3171 struct dentry *rs_sta_dbgfs_stats_table_file;
3172 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
3173 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
3174 u32 dbg_fixed_rate;
3175#endif
3176 struct il_priv *drv;
3177
3178 /* used to be in sta_info */
3179 int last_txrate_idx;
3180 /* last tx rate_n_flags */
3181 u32 last_rate_n_flags;
3182 /* packets destined for this STA are aggregated */
3183 u8 is_agg;
3184};
3185
3186/*
3187 * il_station_priv: Driver's ilate station information
3188 *
3189 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
3190 * in the structure for use by driver. This structure is places in that
3191 * space.
3192 *
3193 * The common struct MUST be first because it is shared between
3194 * 3945 and 4965!
3195 */
3196struct il_station_priv {
3197 struct il_station_priv_common common;
3198 struct il_lq_sta lq_sta;
3199 atomic_t pending_frames;
3200 bool client;
3201 bool asleep;
3202};
3203
3204static inline u8
3205il4965_num_of_ant(u8 m)
3206{
3207 return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
3208}
3209
3210static inline u8
3211il4965_first_antenna(u8 mask)
3212{
3213 if (mask & ANT_A)
3214 return ANT_A;
3215 if (mask & ANT_B)
3216 return ANT_B;
3217 return ANT_C;
3218}
3219
3220/**
3221 * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
3222 *
3223 * The specific throughput table used is based on the type of network
3224 * the associated with, including A, B, G, and G w/ TGG protection
3225 */
3226extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
3227
3228/* Initialize station's rate scaling information after adding station */
3229extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
3230 u8 sta_id);
3231extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
3232 u8 sta_id);
3233
3234/**
3235 * il_rate_control_register - Register the rate control algorithm callbacks
3236 *
3237 * Since the rate control algorithm is hardware specific, there is no need
3238 * or reason to place it as a stand alone module. The driver can call
3239 * il_rate_control_register in order to register the rate control callbacks
3240 * with the mac80211 subsystem. This should be performed prior to calling
3241 * ieee80211_register_hw
3242 *
3243 */
3244extern int il4965_rate_control_register(void);
3245extern int il3945_rate_control_register(void);
3246
3247/**
3248 * il_rate_control_unregister - Unregister the rate control callbacks
3249 *
3250 * This should be called after calling ieee80211_unregister_hw, but before
3251 * the driver is unloaded.
3252 */
3253extern void il4965_rate_control_unregister(void);
3254extern void il3945_rate_control_unregister(void);
3255
3256extern int il_power_update_mode(struct il_priv *il, bool force);
3257extern void il_power_initialize(struct il_priv *il);
3258
3259extern u32 il_debug_level;
3260
3261#ifdef CONFIG_IWLEGACY_DEBUG
3262/*
3263 * il_get_debug_level: Return active debug level for device
3264 *
3265 * Using sysfs it is possible to set per device debug level. This debug
3266 * level will be used if set, otherwise the global debug level which can be
3267 * set via module parameter is used.
3268 */
3269static inline u32
3270il_get_debug_level(struct il_priv *il)
3271{
3272 if (il->debug_level)
3273 return il->debug_level;
3274 else
3275 return il_debug_level;
3276}
3277#else
3278static inline u32
3279il_get_debug_level(struct il_priv *il)
3280{
3281 return il_debug_level;
3282}
3283#endif
3284
3285#define il_print_hex_error(il, p, len) \
3286do { \
3287 print_hex_dump(KERN_ERR, "iwl data: ", \
3288 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
3289} while (0)
3290
3291#ifdef CONFIG_IWLEGACY_DEBUG
3292#define IL_DBG(level, fmt, args...) \
3293do { \
3294 if (il_get_debug_level(il) & level) \
3295 dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
3296 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
3297 __func__ , ## args); \
3298} while (0)
3299
3300#define il_print_hex_dump(il, level, p, len) \
3301do { \
3302 if (il_get_debug_level(il) & level) \
3303 print_hex_dump(KERN_DEBUG, "iwl data: ", \
3304 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
3305} while (0)
3306
3307#else
3308#define IL_DBG(level, fmt, args...)
3309static inline void
3310il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
3311{
3312}
3313#endif /* CONFIG_IWLEGACY_DEBUG */
3314
3315#ifdef CONFIG_IWLEGACY_DEBUGFS
3316int il_dbgfs_register(struct il_priv *il, const char *name);
3317void il_dbgfs_unregister(struct il_priv *il);
3318#else
3319static inline int
3320il_dbgfs_register(struct il_priv *il, const char *name)
3321{
3322 return 0;
3323}
3324
3325static inline void
3326il_dbgfs_unregister(struct il_priv *il)
3327{
3328}
3329#endif /* CONFIG_IWLEGACY_DEBUGFS */
3330
3331/*
3332 * To use the debug system:
3333 *
3334 * If you are defining a new debug classification, simply add it to the #define
3335 * list here in the form of
3336 *
3337 * #define IL_DL_xxxx VALUE
3338 *
3339 * where xxxx should be the name of the classification (for example, WEP).
3340 *
3341 * You then need to either add a IL_xxxx_DEBUG() macro definition for your
3342 * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
3343 * to send output to that classification.
3344 *
3345 * The active debug levels can be accessed via files
3346 *
3347 * /sys/module/iwl4965/parameters/debug
3348 * /sys/module/iwl3945/parameters/debug
3349 * /sys/class/net/wlan0/device/debug_level
3350 *
3351 * when CONFIG_IWLEGACY_DEBUG=y.
3352 */
3353
3354/* 0x0000000F - 0x00000001 */
3355#define IL_DL_INFO (1 << 0)
3356#define IL_DL_MAC80211 (1 << 1)
3357#define IL_DL_HCMD (1 << 2)
3358#define IL_DL_STATE (1 << 3)
3359/* 0x000000F0 - 0x00000010 */
3360#define IL_DL_MACDUMP (1 << 4)
3361#define IL_DL_HCMD_DUMP (1 << 5)
3362#define IL_DL_EEPROM (1 << 6)
3363#define IL_DL_RADIO (1 << 7)
3364/* 0x00000F00 - 0x00000100 */
3365#define IL_DL_POWER (1 << 8)
3366#define IL_DL_TEMP (1 << 9)
3367#define IL_DL_NOTIF (1 << 10)
3368#define IL_DL_SCAN (1 << 11)
3369/* 0x0000F000 - 0x00001000 */
3370#define IL_DL_ASSOC (1 << 12)
3371#define IL_DL_DROP (1 << 13)
3372#define IL_DL_TXPOWER (1 << 14)
3373#define IL_DL_AP (1 << 15)
3374/* 0x000F0000 - 0x00010000 */
3375#define IL_DL_FW (1 << 16)
3376#define IL_DL_RF_KILL (1 << 17)
3377#define IL_DL_FW_ERRORS (1 << 18)
3378#define IL_DL_LED (1 << 19)
3379/* 0x00F00000 - 0x00100000 */
3380#define IL_DL_RATE (1 << 20)
3381#define IL_DL_CALIB (1 << 21)
3382#define IL_DL_WEP (1 << 22)
3383#define IL_DL_TX (1 << 23)
3384/* 0x0F000000 - 0x01000000 */
3385#define IL_DL_RX (1 << 24)
3386#define IL_DL_ISR (1 << 25)
3387#define IL_DL_HT (1 << 26)
3388/* 0xF0000000 - 0x10000000 */
3389#define IL_DL_11H (1 << 28)
3390#define IL_DL_STATS (1 << 29)
3391#define IL_DL_TX_REPLY (1 << 30)
3392#define IL_DL_QOS (1 << 31)
3393
3394#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
3395#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
3396#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
3397#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
3398#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
3399#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
3400#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
3401#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
3402#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
3403#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
3404#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
3405#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
3406#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
3407#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
3408#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
3409#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
3410#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
3411#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
3412#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
3413#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
3414#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
3415#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
3416#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
3417#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
3418#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
3419#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
3420#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
3421#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
3422#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
3423
3424#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a9616c269..9138e15004fa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_legacy_csr_h__ 63#ifndef __il_csr_h__
64#define __iwl_legacy_csr_h__ 64#define __il_csr_h__
65/* 65/*
66 * CSR (control and status registers) 66 * CSR (control and status registers)
67 * 67 *
@@ -70,9 +70,9 @@
70 * low power states due to driver-invoked device resets 70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. 71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 * 72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers; 73 * Use _il_wr() and _il_rd() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC. 74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_legacy_write_direct32() family for these registers; 75 * Do not use il_wr() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. 76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing 77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers. 78 * the CSR registers.
@@ -82,16 +82,16 @@
82 */ 82 */
83#define CSR_BASE (0x000) 83#define CSR_BASE (0x000)
84 84
85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ 85#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ 86#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ 87#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ 88#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ 89#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ 90#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ 91#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
92#define CSR_GP_CNTRL (CSR_BASE+0x024) 92#define CSR_GP_CNTRL (CSR_BASE+0x024)
93 93
94/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ 94/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) 95#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
96 96
97/* 97/*
@@ -166,26 +166,26 @@
166 166
167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 167#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 168#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ 169#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ 170#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ 171#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
172 172
173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ 173#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ 174#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
175 175
176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), 176/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
177 * acknowledged (reset) by host writing "1" to flagged bits. */ 177 * acknowledged (reset) by host writing "1" to flagged bits. */
178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ 178#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ 179#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ 180#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ 181#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ 182#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ 183#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ 184#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ 185#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ 186#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ 187#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ 188#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
189 189
190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ 190#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
191 CSR_INT_BIT_HW_ERR | \ 191 CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
197 CSR_INT_BIT_ALIVE) 197 CSR_INT_BIT_ALIVE)
198 198
199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ 199/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ 200#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ 201#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ 202#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ 203#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ 204#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ 205#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ 206#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ 207#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
208 208
209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ 209#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
210 CSR39_FH_INT_BIT_RX_CHNL2 | \ 210 CSR39_FH_INT_BIT_RX_CHNL2 | \
211 CSR_FH_INT_BIT_RX_CHNL1 | \ 211 CSR_FH_INT_BIT_RX_CHNL1 | \
212 CSR_FH_INT_BIT_RX_CHNL0) 212 CSR_FH_INT_BIT_RX_CHNL0)
213 213
214
215#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ 214#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
216 CSR_FH_INT_BIT_TX_CHNL1 | \ 215 CSR_FH_INT_BIT_TX_CHNL1 | \
217 CSR_FH_INT_BIT_TX_CHNL0) 216 CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
285#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) 284#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
286#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) 285#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
287 286
288
289/* EEPROM REG */ 287/* EEPROM REG */
290#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 288#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
291#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 289#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
293#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) 291#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
294 292
295/* EEPROM GP */ 293/* EEPROM GP */
296#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ 294#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
297#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 295#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
298#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) 296#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
299#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) 297#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
300 298
301/* GP REG */ 299/* GP REG */
302#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ 300#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
303#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) 301#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
304#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) 302#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
305#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) 303#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
306#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) 304#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
307 305
308
309/* CSR GIO */ 306/* CSR GIO */
310#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) 307#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
311 308
@@ -357,7 +354,7 @@
357/* HPET MEM debug */ 354/* HPET MEM debug */
358#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) 355#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
359 356
360/* DRAM INT TABLE */ 357/* DRAM INT TBL */
361#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) 358#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
362#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) 359#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
363 360
@@ -368,13 +365,13 @@
368 * to indirectly access device's internal memory or registers that 365 * to indirectly access device's internal memory or registers that
369 * may be powered-down. 366 * may be powered-down.
370 * 367 *
371 * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family 368 * Use il_wr()/il_rd() family
372 * for these registers; 369 * for these registers;
373 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ 370 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
374 * to make sure the MAC (uCode processor, etc.) is powered up for accessing 371 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
375 * internal resources. 372 * internal resources.
376 * 373 *
377 * Do not use iwl_write32()/iwl_read32() family to access these registers; 374 * Do not use _il_wr()/_il_rd() family to access these registers;
378 * these provide only simple PCI bus access, without waking up the MAC. 375 * these provide only simple PCI bus access, without waking up the MAC.
379 */ 376 */
380#define HBUS_BASE (0x400) 377#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
411#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 408#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
412 409
413/* 410/*
414 * Per-Tx-queue write pointer (index, really!) 411 * Per-Tx-queue write pointer (idx, really!)
415 * Indicates index to next TFD that driver will fill (1 past latest filled). 412 * Indicates idx to next TFD that driver will fill (1 past latest filled).
416 * Bit usage: 413 * Bit usage:
417 * 0-7: queue write index 414 * 0-7: queue write idx
418 * 11-8: queue selector 415 * 11-8: queue selector
419 */ 416 */
420#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) 417#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
421 418
422#endif /* !__iwl_legacy_csr_h__ */ 419#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 000000000000..928bdbb00085
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1410 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31#include "common.h"
32
33/* create and remove of files */
34#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
35 if (!debugfs_create_file(#name, mode, parent, il, \
36 &il_dbgfs_##name##_ops)) \
37 goto err; \
38} while (0)
39
40#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
41 struct dentry *__tmp; \
42 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
43 parent, ptr); \
44 if (IS_ERR(__tmp) || !__tmp) \
45 goto err; \
46} while (0)
47
48#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
49 struct dentry *__tmp; \
50 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
51 parent, ptr); \
52 if (IS_ERR(__tmp) || !__tmp) \
53 goto err; \
54} while (0)
55
56/* file operation */
57#define DEBUGFS_READ_FUNC(name) \
58static ssize_t il_dbgfs_##name##_read(struct file *file, \
59 char __user *user_buf, \
60 size_t count, loff_t *ppos);
61
62#define DEBUGFS_WRITE_FUNC(name) \
63static ssize_t il_dbgfs_##name##_write(struct file *file, \
64 const char __user *user_buf, \
65 size_t count, loff_t *ppos);
66
67static int
68il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
69{
70 file->private_data = inode->i_private;
71 return 0;
72}
73
74#define DEBUGFS_READ_FILE_OPS(name) \
75 DEBUGFS_READ_FUNC(name); \
76static const struct file_operations il_dbgfs_##name##_ops = { \
77 .read = il_dbgfs_##name##_read, \
78 .open = il_dbgfs_open_file_generic, \
79 .llseek = generic_file_llseek, \
80};
81
82#define DEBUGFS_WRITE_FILE_OPS(name) \
83 DEBUGFS_WRITE_FUNC(name); \
84static const struct file_operations il_dbgfs_##name##_ops = { \
85 .write = il_dbgfs_##name##_write, \
86 .open = il_dbgfs_open_file_generic, \
87 .llseek = generic_file_llseek, \
88};
89
90#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
91 DEBUGFS_READ_FUNC(name); \
92 DEBUGFS_WRITE_FUNC(name); \
93static const struct file_operations il_dbgfs_##name##_ops = { \
94 .write = il_dbgfs_##name##_write, \
95 .read = il_dbgfs_##name##_read, \
96 .open = il_dbgfs_open_file_generic, \
97 .llseek = generic_file_llseek, \
98};
99
100static ssize_t
101il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
102 loff_t *ppos)
103{
104
105 struct il_priv *il = file->private_data;
106 char *buf;
107 int pos = 0;
108
109 int cnt;
110 ssize_t ret;
111 const size_t bufsz =
112 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
113 buf = kzalloc(bufsz, GFP_KERNEL);
114 if (!buf)
115 return -ENOMEM;
116 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
117 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
118 pos +=
119 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
120 il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
121 }
122 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
123 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
124 pos +=
125 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
126 il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
127 }
128 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
129 pos +=
130 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
131 il->tx_stats.data_cnt);
132 pos +=
133 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
134 il->tx_stats.data_bytes);
135 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
136 kfree(buf);
137 return ret;
138}
139
140static ssize_t
141il_dbgfs_clear_traffic_stats_write(struct file *file,
142 const char __user *user_buf, size_t count,
143 loff_t *ppos)
144{
145 struct il_priv *il = file->private_data;
146 u32 clear_flag;
147 char buf[8];
148 int buf_size;
149
150 memset(buf, 0, sizeof(buf));
151 buf_size = min(count, sizeof(buf) - 1);
152 if (copy_from_user(buf, user_buf, buf_size))
153 return -EFAULT;
154 if (sscanf(buf, "%x", &clear_flag) != 1)
155 return -EFAULT;
156 il_clear_traffic_stats(il);
157
158 return count;
159}
160
161static ssize_t
162il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
163 loff_t *ppos)
164{
165
166 struct il_priv *il = file->private_data;
167 char *buf;
168 int pos = 0;
169 int cnt;
170 ssize_t ret;
171 const size_t bufsz =
172 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
173 buf = kzalloc(bufsz, GFP_KERNEL);
174 if (!buf)
175 return -ENOMEM;
176
177 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
178 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
179 pos +=
180 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
181 il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
182 }
183 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
184 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
185 pos +=
186 scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
187 il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
188 }
189 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
190 pos +=
191 scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
192 il->rx_stats.data_cnt);
193 pos +=
194 scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
195 il->rx_stats.data_bytes);
196
197 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
198 kfree(buf);
199 return ret;
200}
201
202#define BYTE1_MASK 0x000000ff;
203#define BYTE2_MASK 0x0000ffff;
204#define BYTE3_MASK 0x00ffffff;
205static ssize_t
206il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
207 loff_t *ppos)
208{
209 u32 val;
210 char *buf;
211 ssize_t ret;
212 int i;
213 int pos = 0;
214 struct il_priv *il = file->private_data;
215 size_t bufsz;
216
217 /* default is to dump the entire data segment */
218 if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
219 il->dbgfs_sram_offset = 0x800000;
220 if (il->ucode_type == UCODE_INIT)
221 il->dbgfs_sram_len = il->ucode_init_data.len;
222 else
223 il->dbgfs_sram_len = il->ucode_data.len;
224 }
225 bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
226 buf = kmalloc(bufsz, GFP_KERNEL);
227 if (!buf)
228 return -ENOMEM;
229 pos +=
230 scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
231 il->dbgfs_sram_len);
232 pos +=
233 scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
234 il->dbgfs_sram_offset);
235 for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
236 val =
237 il_read_targ_mem(il,
238 il->dbgfs_sram_offset +
239 il->dbgfs_sram_len - i);
240 if (i < 4) {
241 switch (i) {
242 case 1:
243 val &= BYTE1_MASK;
244 break;
245 case 2:
246 val &= BYTE2_MASK;
247 break;
248 case 3:
249 val &= BYTE3_MASK;
250 break;
251 }
252 }
253 if (!(i % 16))
254 pos += scnprintf(buf + pos, bufsz - pos, "\n");
255 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
256 }
257 pos += scnprintf(buf + pos, bufsz - pos, "\n");
258
259 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
260 kfree(buf);
261 return ret;
262}
263
264static ssize_t
265il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
266 size_t count, loff_t *ppos)
267{
268 struct il_priv *il = file->private_data;
269 char buf[64];
270 int buf_size;
271 u32 offset, len;
272
273 memset(buf, 0, sizeof(buf));
274 buf_size = min(count, sizeof(buf) - 1);
275 if (copy_from_user(buf, user_buf, buf_size))
276 return -EFAULT;
277
278 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
279 il->dbgfs_sram_offset = offset;
280 il->dbgfs_sram_len = len;
281 } else {
282 il->dbgfs_sram_offset = 0;
283 il->dbgfs_sram_len = 0;
284 }
285
286 return count;
287}
288
289static ssize_t
290il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
291 loff_t *ppos)
292{
293 struct il_priv *il = file->private_data;
294 struct il_station_entry *station;
295 int max_sta = il->hw_params.max_stations;
296 char *buf;
297 int i, j, pos = 0;
298 ssize_t ret;
299 /* Add 30 for initial string */
300 const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
301
302 buf = kmalloc(bufsz, GFP_KERNEL);
303 if (!buf)
304 return -ENOMEM;
305
306 pos +=
307 scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
308 il->num_stations);
309
310 for (i = 0; i < max_sta; i++) {
311 station = &il->stations[i];
312 if (!station->used)
313 continue;
314 pos +=
315 scnprintf(buf + pos, bufsz - pos,
316 "station %d - addr: %pM, flags: %#x\n", i,
317 station->sta.sta.addr,
318 station->sta.station_flags_msk);
319 pos +=
320 scnprintf(buf + pos, bufsz - pos,
321 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
322 pos +=
323 scnprintf(buf + pos, bufsz - pos,
324 "start_idx\tbitmap\t\t\trate_n_flags\n");
325
326 for (j = 0; j < MAX_TID_COUNT; j++) {
327 pos +=
328 scnprintf(buf + pos, bufsz - pos,
329 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
330 j, station->tid[j].seq_number,
331 station->tid[j].agg.txq_id,
332 station->tid[j].agg.frame_count,
333 station->tid[j].tfds_in_queue,
334 station->tid[j].agg.start_idx,
335 station->tid[j].agg.bitmap,
336 station->tid[j].agg.rate_n_flags);
337
338 if (station->tid[j].agg.wait_for_ba)
339 pos +=
340 scnprintf(buf + pos, bufsz - pos,
341 " - waitforba");
342 pos += scnprintf(buf + pos, bufsz - pos, "\n");
343 }
344
345 pos += scnprintf(buf + pos, bufsz - pos, "\n");
346 }
347
348 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
349 kfree(buf);
350 return ret;
351}
352
353static ssize_t
354il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
355 loff_t *ppos)
356{
357 ssize_t ret;
358 struct il_priv *il = file->private_data;
359 int pos = 0, ofs = 0, buf_size = 0;
360 const u8 *ptr;
361 char *buf;
362 u16 eeprom_ver;
363 size_t eeprom_len = il->cfg->base_params->eeprom_size;
364 buf_size = 4 * eeprom_len + 256;
365
366 if (eeprom_len % 16) {
367 IL_ERR("NVM size is not multiple of 16.\n");
368 return -ENODATA;
369 }
370
371 ptr = il->eeprom;
372 if (!ptr) {
373 IL_ERR("Invalid EEPROM memory\n");
374 return -ENOMEM;
375 }
376
377 /* 4 characters for byte 0xYY */
378 buf = kzalloc(buf_size, GFP_KERNEL);
379 if (!buf) {
380 IL_ERR("Can not allocate Buffer\n");
381 return -ENOMEM;
382 }
383 eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
384 pos +=
385 scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
386 eeprom_ver);
387 for (ofs = 0; ofs < eeprom_len; ofs += 16) {
388 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
389 hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
390 buf_size - pos, 0);
391 pos += strlen(buf + pos);
392 if (buf_size - pos > 0)
393 buf[pos++] = '\n';
394 }
395
396 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
397 kfree(buf);
398 return ret;
399}
400
401static ssize_t
402il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
403 loff_t *ppos)
404{
405 struct il_priv *il = file->private_data;
406 struct ieee80211_channel *channels = NULL;
407 const struct ieee80211_supported_band *supp_band = NULL;
408 int pos = 0, i, bufsz = PAGE_SIZE;
409 char *buf;
410 ssize_t ret;
411
412 if (!test_bit(S_GEO_CONFIGURED, &il->status))
413 return -EAGAIN;
414
415 buf = kzalloc(bufsz, GFP_KERNEL);
416 if (!buf) {
417 IL_ERR("Can not allocate Buffer\n");
418 return -ENOMEM;
419 }
420
421 supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
422 if (supp_band) {
423 channels = supp_band->channels;
424
425 pos +=
426 scnprintf(buf + pos, bufsz - pos,
427 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
428 supp_band->n_channels);
429
430 for (i = 0; i < supp_band->n_channels; i++)
431 pos +=
432 scnprintf(buf + pos, bufsz - pos,
433 "%d: %ddBm: BSS%s%s, %s.\n",
434 channels[i].hw_value,
435 channels[i].max_power,
436 channels[i].
437 flags & IEEE80211_CHAN_RADAR ?
438 " (IEEE 802.11h required)" : "",
439 ((channels[i].
440 flags & IEEE80211_CHAN_NO_IBSS) ||
441 (channels[i].
442 flags & IEEE80211_CHAN_RADAR)) ? "" :
443 ", IBSS",
444 channels[i].
445 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
446 "passive only" : "active/passive");
447 }
448 supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
449 if (supp_band) {
450 channels = supp_band->channels;
451
452 pos +=
453 scnprintf(buf + pos, bufsz - pos,
454 "Displaying %d channels in 5.2GHz band (802.11a)\n",
455 supp_band->n_channels);
456
457 for (i = 0; i < supp_band->n_channels; i++)
458 pos +=
459 scnprintf(buf + pos, bufsz - pos,
460 "%d: %ddBm: BSS%s%s, %s.\n",
461 channels[i].hw_value,
462 channels[i].max_power,
463 channels[i].
464 flags & IEEE80211_CHAN_RADAR ?
465 " (IEEE 802.11h required)" : "",
466 ((channels[i].
467 flags & IEEE80211_CHAN_NO_IBSS) ||
468 (channels[i].
469 flags & IEEE80211_CHAN_RADAR)) ? "" :
470 ", IBSS",
471 channels[i].
472 flags & IEEE80211_CHAN_PASSIVE_SCAN ?
473 "passive only" : "active/passive");
474 }
475 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
476 kfree(buf);
477 return ret;
478}
479
480static ssize_t
481il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
482 loff_t *ppos)
483{
484
485 struct il_priv *il = file->private_data;
486 char buf[512];
487 int pos = 0;
488 const size_t bufsz = sizeof(buf);
489
490 pos +=
491 scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
492 test_bit(S_HCMD_ACTIVE, &il->status));
493 pos +=
494 scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
495 test_bit(S_INT_ENABLED, &il->status));
496 pos +=
497 scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
498 test_bit(S_RF_KILL_HW, &il->status));
499 pos +=
500 scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
501 test_bit(S_CT_KILL, &il->status));
502 pos +=
503 scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
504 test_bit(S_INIT, &il->status));
505 pos +=
506 scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
507 test_bit(S_ALIVE, &il->status));
508 pos +=
509 scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
510 test_bit(S_READY, &il->status));
511 pos +=
512 scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
513 test_bit(S_TEMPERATURE, &il->status));
514 pos +=
515 scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
516 test_bit(S_GEO_CONFIGURED, &il->status));
517 pos +=
518 scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
519 test_bit(S_EXIT_PENDING, &il->status));
520 pos +=
521 scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
522 test_bit(S_STATS, &il->status));
523 pos +=
524 scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
525 test_bit(S_SCANNING, &il->status));
526 pos +=
527 scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
528 test_bit(S_SCAN_ABORTING, &il->status));
529 pos +=
530 scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
531 test_bit(S_SCAN_HW, &il->status));
532 pos +=
533 scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
534 test_bit(S_POWER_PMI, &il->status));
535 pos +=
536 scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
537 test_bit(S_FW_ERROR, &il->status));
538 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
539}
540
541static ssize_t
542il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
543 loff_t *ppos)
544{
545
546 struct il_priv *il = file->private_data;
547 int pos = 0;
548 int cnt = 0;
549 char *buf;
550 int bufsz = 24 * 64; /* 24 items * 64 char per item */
551 ssize_t ret;
552
553 buf = kzalloc(bufsz, GFP_KERNEL);
554 if (!buf) {
555 IL_ERR("Can not allocate Buffer\n");
556 return -ENOMEM;
557 }
558
559 pos +=
560 scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
561
562 pos +=
563 scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
564 il->isr_stats.hw);
565 pos +=
566 scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
567 il->isr_stats.sw);
568 if (il->isr_stats.sw || il->isr_stats.hw) {
569 pos +=
570 scnprintf(buf + pos, bufsz - pos,
571 "\tLast Restarting Code: 0x%X\n",
572 il->isr_stats.err_code);
573 }
574#ifdef CONFIG_IWLEGACY_DEBUG
575 pos +=
576 scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
577 il->isr_stats.sch);
578 pos +=
579 scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
580 il->isr_stats.alive);
581#endif
582 pos +=
583 scnprintf(buf + pos, bufsz - pos,
584 "HW RF KILL switch toggled:\t %u\n",
585 il->isr_stats.rfkill);
586
587 pos +=
588 scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
589 il->isr_stats.ctkill);
590
591 pos +=
592 scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
593 il->isr_stats.wakeup);
594
595 pos +=
596 scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
597 il->isr_stats.rx);
598 for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
599 if (il->isr_stats.handlers[cnt] > 0)
600 pos +=
601 scnprintf(buf + pos, bufsz - pos,
602 "\tRx handler[%36s]:\t\t %u\n",
603 il_get_cmd_string(cnt),
604 il->isr_stats.handlers[cnt]);
605 }
606
607 pos +=
608 scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
609 il->isr_stats.tx);
610
611 pos +=
612 scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
613 il->isr_stats.unhandled);
614
615 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
616 kfree(buf);
617 return ret;
618}
619
620static ssize_t
621il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
622 size_t count, loff_t *ppos)
623{
624 struct il_priv *il = file->private_data;
625 char buf[8];
626 int buf_size;
627 u32 reset_flag;
628
629 memset(buf, 0, sizeof(buf));
630 buf_size = min(count, sizeof(buf) - 1);
631 if (copy_from_user(buf, user_buf, buf_size))
632 return -EFAULT;
633 if (sscanf(buf, "%x", &reset_flag) != 1)
634 return -EFAULT;
635 if (reset_flag == 0)
636 il_clear_isr_stats(il);
637
638 return count;
639}
640
641static ssize_t
642il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
643 loff_t *ppos)
644{
645 struct il_priv *il = file->private_data;
646 struct il_rxon_context *ctx = &il->ctx;
647 int pos = 0, i;
648 char buf[256];
649 const size_t bufsz = sizeof(buf);
650
651 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
652 for (i = 0; i < AC_NUM; i++) {
653 pos +=
654 scnprintf(buf + pos, bufsz - pos,
655 "\tcw_min\tcw_max\taifsn\ttxop\n");
656 pos +=
657 scnprintf(buf + pos, bufsz - pos,
658 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
659 ctx->qos_data.def_qos_parm.ac[i].cw_min,
660 ctx->qos_data.def_qos_parm.ac[i].cw_max,
661 ctx->qos_data.def_qos_parm.ac[i].aifsn,
662 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
663 }
664
665 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
666}
667
668static ssize_t
669il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
670 size_t count, loff_t *ppos)
671{
672 struct il_priv *il = file->private_data;
673 char buf[8];
674 int buf_size;
675 int ht40;
676
677 memset(buf, 0, sizeof(buf));
678 buf_size = min(count, sizeof(buf) - 1);
679 if (copy_from_user(buf, user_buf, buf_size))
680 return -EFAULT;
681 if (sscanf(buf, "%d", &ht40) != 1)
682 return -EFAULT;
683 if (!il_is_any_associated(il))
684 il->disable_ht40 = ht40 ? true : false;
685 else {
686 IL_ERR("Sta associated with AP - "
687 "Change to 40MHz channel support is not allowed\n");
688 return -EINVAL;
689 }
690
691 return count;
692}
693
694static ssize_t
695il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
696 size_t count, loff_t *ppos)
697{
698 struct il_priv *il = file->private_data;
699 char buf[100];
700 int pos = 0;
701 const size_t bufsz = sizeof(buf);
702
703 pos +=
704 scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
705 il->disable_ht40 ? "Disabled" : "Enabled");
706 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
707}
708
709DEBUGFS_READ_WRITE_FILE_OPS(sram);
710DEBUGFS_READ_FILE_OPS(nvm);
711DEBUGFS_READ_FILE_OPS(stations);
712DEBUGFS_READ_FILE_OPS(channels);
713DEBUGFS_READ_FILE_OPS(status);
714DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
715DEBUGFS_READ_FILE_OPS(qos);
716DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
717
718static ssize_t
719il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
720 size_t count, loff_t *ppos)
721{
722 struct il_priv *il = file->private_data;
723 int pos = 0, ofs = 0;
724 int cnt = 0, entry;
725 struct il_tx_queue *txq;
726 struct il_queue *q;
727 struct il_rx_queue *rxq = &il->rxq;
728 char *buf;
729 int bufsz =
730 ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
731 (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
732 const u8 *ptr;
733 ssize_t ret;
734
735 if (!il->txq) {
736 IL_ERR("txq not ready\n");
737 return -EAGAIN;
738 }
739 buf = kzalloc(bufsz, GFP_KERNEL);
740 if (!buf) {
741 IL_ERR("Can not allocate buffer\n");
742 return -ENOMEM;
743 }
744 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
745 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
746 txq = &il->txq[cnt];
747 q = &txq->q;
748 pos +=
749 scnprintf(buf + pos, bufsz - pos,
750 "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
751 q->read_ptr, q->write_ptr);
752 }
753 if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
754 ptr = il->tx_traffic;
755 pos +=
756 scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
757 il->tx_traffic_idx);
758 for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
759 for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
760 entry++, ofs += 16) {
761 pos +=
762 scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
763 ofs);
764 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
765 buf + pos, bufsz - pos, 0);
766 pos += strlen(buf + pos);
767 if (bufsz - pos > 0)
768 buf[pos++] = '\n';
769 }
770 }
771 }
772
773 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
774 pos +=
775 scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
776 rxq->read, rxq->write);
777
778 if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
779 ptr = il->rx_traffic;
780 pos +=
781 scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
782 il->rx_traffic_idx);
783 for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
784 for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
785 entry++, ofs += 16) {
786 pos +=
787 scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
788 ofs);
789 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
790 buf + pos, bufsz - pos, 0);
791 pos += strlen(buf + pos);
792 if (bufsz - pos > 0)
793 buf[pos++] = '\n';
794 }
795 }
796 }
797
798 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
799 kfree(buf);
800 return ret;
801}
802
803static ssize_t
804il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
805 size_t count, loff_t *ppos)
806{
807 struct il_priv *il = file->private_data;
808 char buf[8];
809 int buf_size;
810 int traffic_log;
811
812 memset(buf, 0, sizeof(buf));
813 buf_size = min(count, sizeof(buf) - 1);
814 if (copy_from_user(buf, user_buf, buf_size))
815 return -EFAULT;
816 if (sscanf(buf, "%d", &traffic_log) != 1)
817 return -EFAULT;
818 if (traffic_log == 0)
819 il_reset_traffic_log(il);
820
821 return count;
822}
823
824static ssize_t
825il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
826 loff_t *ppos)
827{
828
829 struct il_priv *il = file->private_data;
830 struct il_tx_queue *txq;
831 struct il_queue *q;
832 char *buf;
833 int pos = 0;
834 int cnt;
835 int ret;
836 const size_t bufsz =
837 sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
838
839 if (!il->txq) {
840 IL_ERR("txq not ready\n");
841 return -EAGAIN;
842 }
843 buf = kzalloc(bufsz, GFP_KERNEL);
844 if (!buf)
845 return -ENOMEM;
846
847 for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
848 txq = &il->txq[cnt];
849 q = &txq->q;
850 pos +=
851 scnprintf(buf + pos, bufsz - pos,
852 "hwq %.2d: read=%u write=%u stop=%d"
853 " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
854 q->read_ptr, q->write_ptr,
855 !!test_bit(cnt, il->queue_stopped),
856 txq->swq_id, txq->swq_id & 3,
857 (txq->swq_id >> 2) & 0x1f);
858 if (cnt >= 4)
859 continue;
860 /* for the ACs, display the stop count too */
861 pos +=
862 scnprintf(buf + pos, bufsz - pos,
863 " stop-count: %d\n",
864 atomic_read(&il->queue_stop_count[cnt]));
865 }
866 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
867 kfree(buf);
868 return ret;
869}
870
871static ssize_t
872il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
873 loff_t *ppos)
874{
875
876 struct il_priv *il = file->private_data;
877 struct il_rx_queue *rxq = &il->rxq;
878 char buf[256];
879 int pos = 0;
880 const size_t bufsz = sizeof(buf);
881
882 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
883 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
884 pos +=
885 scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
886 rxq->free_count);
887 if (rxq->rb_stts) {
888 pos +=
889 scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
890 le16_to_cpu(rxq->rb_stts->
891 closed_rb_num) & 0x0FFF);
892 } else {
893 pos +=
894 scnprintf(buf + pos, bufsz - pos,
895 "closed_rb_num: Not Allocated\n");
896 }
897 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
898}
899
900static ssize_t
901il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
902 size_t count, loff_t *ppos)
903{
904 struct il_priv *il = file->private_data;
905 return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
906 count, ppos);
907}
908
909static ssize_t
910il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
911 size_t count, loff_t *ppos)
912{
913 struct il_priv *il = file->private_data;
914 return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
915 count, ppos);
916}
917
918static ssize_t
919il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
920 size_t count, loff_t *ppos)
921{
922 struct il_priv *il = file->private_data;
923 return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
924 count, ppos);
925}
926
927static ssize_t
928il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
929 size_t count, loff_t *ppos)
930{
931
932 struct il_priv *il = file->private_data;
933 int pos = 0;
934 int cnt = 0;
935 char *buf;
936 int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
937 ssize_t ret;
938 struct il_sensitivity_data *data;
939
940 data = &il->sensitivity_data;
941 buf = kzalloc(bufsz, GFP_KERNEL);
942 if (!buf) {
943 IL_ERR("Can not allocate Buffer\n");
944 return -ENOMEM;
945 }
946
947 pos +=
948 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
949 data->auto_corr_ofdm);
950 pos +=
951 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
952 data->auto_corr_ofdm_mrc);
953 pos +=
954 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
955 data->auto_corr_ofdm_x1);
956 pos +=
957 scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
958 data->auto_corr_ofdm_mrc_x1);
959 pos +=
960 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
961 data->auto_corr_cck);
962 pos +=
963 scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
964 data->auto_corr_cck_mrc);
965 pos +=
966 scnprintf(buf + pos, bufsz - pos,
967 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
968 data->last_bad_plcp_cnt_ofdm);
969 pos +=
970 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
971 data->last_fa_cnt_ofdm);
972 pos +=
973 scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
974 data->last_bad_plcp_cnt_cck);
975 pos +=
976 scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
977 data->last_fa_cnt_cck);
978 pos +=
979 scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
980 data->nrg_curr_state);
981 pos +=
982 scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
983 data->nrg_prev_state);
984 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
985 for (cnt = 0; cnt < 10; cnt++) {
986 pos +=
987 scnprintf(buf + pos, bufsz - pos, " %u",
988 data->nrg_value[cnt]);
989 }
990 pos += scnprintf(buf + pos, bufsz - pos, "\n");
991 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
992 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
993 pos +=
994 scnprintf(buf + pos, bufsz - pos, " %u",
995 data->nrg_silence_rssi[cnt]);
996 }
997 pos += scnprintf(buf + pos, bufsz - pos, "\n");
998 pos +=
999 scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
1000 data->nrg_silence_ref);
1001 pos +=
1002 scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
1003 data->nrg_energy_idx);
1004 pos +=
1005 scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
1006 data->nrg_silence_idx);
1007 pos +=
1008 scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
1009 data->nrg_th_cck);
1010 pos +=
1011 scnprintf(buf + pos, bufsz - pos,
1012 "nrg_auto_corr_silence_diff:\t %u\n",
1013 data->nrg_auto_corr_silence_diff);
1014 pos +=
1015 scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
1016 data->num_in_cck_no_fa);
1017 pos +=
1018 scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
1019 data->nrg_th_ofdm);
1020
1021 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1022 kfree(buf);
1023 return ret;
1024}
1025
1026static ssize_t
1027il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
1028 size_t count, loff_t *ppos)
1029{
1030
1031 struct il_priv *il = file->private_data;
1032 int pos = 0;
1033 int cnt = 0;
1034 char *buf;
1035 int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
1036 ssize_t ret;
1037 struct il_chain_noise_data *data;
1038
1039 data = &il->chain_noise_data;
1040 buf = kzalloc(bufsz, GFP_KERNEL);
1041 if (!buf) {
1042 IL_ERR("Can not allocate Buffer\n");
1043 return -ENOMEM;
1044 }
1045
1046 pos +=
1047 scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
1048 data->active_chains);
1049 pos +=
1050 scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
1051 data->chain_noise_a);
1052 pos +=
1053 scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
1054 data->chain_noise_b);
1055 pos +=
1056 scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
1057 data->chain_noise_c);
1058 pos +=
1059 scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
1060 data->chain_signal_a);
1061 pos +=
1062 scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
1063 data->chain_signal_b);
1064 pos +=
1065 scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
1066 data->chain_signal_c);
1067 pos +=
1068 scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
1069 data->beacon_count);
1070
1071 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
1072 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1073 pos +=
1074 scnprintf(buf + pos, bufsz - pos, " %u",
1075 data->disconn_array[cnt]);
1076 }
1077 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1078 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
1079 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
1080 pos +=
1081 scnprintf(buf + pos, bufsz - pos, " %u",
1082 data->delta_gain_code[cnt]);
1083 }
1084 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1085 pos +=
1086 scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1087 data->radio_write);
1088 pos +=
1089 scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1090 data->state);
1091
1092 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1093 kfree(buf);
1094 return ret;
1095}
1096
1097static ssize_t
1098il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
1099 size_t count, loff_t *ppos)
1100{
1101 struct il_priv *il = file->private_data;
1102 char buf[60];
1103 int pos = 0;
1104 const size_t bufsz = sizeof(buf);
1105 u32 pwrsave_status;
1106
1107 pwrsave_status =
1108 _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1109
1110 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1111 pos +=
1112 scnprintf(buf + pos, bufsz - pos, "%s\n",
1113 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1114 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1115 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1116 "error");
1117
1118 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1119}
1120
1121static ssize_t
1122il_dbgfs_clear_ucode_stats_write(struct file *file,
1123 const char __user *user_buf, size_t count,
1124 loff_t *ppos)
1125{
1126 struct il_priv *il = file->private_data;
1127 char buf[8];
1128 int buf_size;
1129 int clear;
1130
1131 memset(buf, 0, sizeof(buf));
1132 buf_size = min(count, sizeof(buf) - 1);
1133 if (copy_from_user(buf, user_buf, buf_size))
1134 return -EFAULT;
1135 if (sscanf(buf, "%d", &clear) != 1)
1136 return -EFAULT;
1137
1138 /* make request to uCode to retrieve stats information */
1139 mutex_lock(&il->mutex);
1140 il_send_stats_request(il, CMD_SYNC, true);
1141 mutex_unlock(&il->mutex);
1142
1143 return count;
1144}
1145
1146static ssize_t
1147il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
1148 size_t count, loff_t *ppos)
1149{
1150
1151 struct il_priv *il = file->private_data;
1152 int len = 0;
1153 char buf[20];
1154
1155 len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
1156 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1157}
1158
1159static ssize_t
1160il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
1161 size_t count, loff_t *ppos)
1162{
1163
1164 struct il_priv *il = file->private_data;
1165 int len = 0;
1166 char buf[20];
1167
1168 len =
1169 sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
1170 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1171}
1172
1173static ssize_t
1174il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
1175 loff_t *ppos)
1176{
1177 struct il_priv *il = file->private_data;
1178 char *buf;
1179 int pos = 0;
1180 ssize_t ret = -EFAULT;
1181
1182 if (il->cfg->ops->lib->dump_fh) {
1183 ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
1184 if (buf) {
1185 ret =
1186 simple_read_from_buffer(user_buf, count, ppos, buf,
1187 pos);
1188 kfree(buf);
1189 }
1190 }
1191
1192 return ret;
1193}
1194
1195static ssize_t
1196il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
1197 size_t count, loff_t *ppos)
1198{
1199
1200 struct il_priv *il = file->private_data;
1201 int pos = 0;
1202 char buf[12];
1203 const size_t bufsz = sizeof(buf);
1204
1205 pos +=
1206 scnprintf(buf + pos, bufsz - pos, "%d\n",
1207 il->missed_beacon_threshold);
1208
1209 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1210}
1211
1212static ssize_t
1213il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
1214 size_t count, loff_t *ppos)
1215{
1216 struct il_priv *il = file->private_data;
1217 char buf[8];
1218 int buf_size;
1219 int missed;
1220
1221 memset(buf, 0, sizeof(buf));
1222 buf_size = min(count, sizeof(buf) - 1);
1223 if (copy_from_user(buf, user_buf, buf_size))
1224 return -EFAULT;
1225 if (sscanf(buf, "%d", &missed) != 1)
1226 return -EINVAL;
1227
1228 if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
1229 missed > IL_MISSED_BEACON_THRESHOLD_MAX)
1230 il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
1231 else
1232 il->missed_beacon_threshold = missed;
1233
1234 return count;
1235}
1236
1237static ssize_t
1238il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
1239 size_t count, loff_t *ppos)
1240{
1241
1242 struct il_priv *il = file->private_data;
1243 int pos = 0;
1244 char buf[300];
1245 const size_t bufsz = sizeof(buf);
1246 struct il_force_reset *force_reset;
1247
1248 force_reset = &il->force_reset;
1249
1250 pos +=
1251 scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
1252 force_reset->reset_request_count);
1253 pos +=
1254 scnprintf(buf + pos, bufsz - pos,
1255 "\tnumber of reset request success: %d\n",
1256 force_reset->reset_success_count);
1257 pos +=
1258 scnprintf(buf + pos, bufsz - pos,
1259 "\tnumber of reset request reject: %d\n",
1260 force_reset->reset_reject_count);
1261 pos +=
1262 scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
1263 force_reset->reset_duration);
1264
1265 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1266}
1267
1268static ssize_t
1269il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
1270 size_t count, loff_t *ppos)
1271{
1272
1273 int ret;
1274 struct il_priv *il = file->private_data;
1275
1276 ret = il_force_reset(il, true);
1277
1278 return ret ? ret : count;
1279}
1280
1281static ssize_t
1282il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
1283 size_t count, loff_t *ppos)
1284{
1285
1286 struct il_priv *il = file->private_data;
1287 char buf[8];
1288 int buf_size;
1289 int timeout;
1290
1291 memset(buf, 0, sizeof(buf));
1292 buf_size = min(count, sizeof(buf) - 1);
1293 if (copy_from_user(buf, user_buf, buf_size))
1294 return -EFAULT;
1295 if (sscanf(buf, "%d", &timeout) != 1)
1296 return -EINVAL;
1297 if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
1298 timeout = IL_DEF_WD_TIMEOUT;
1299
1300 il->cfg->base_params->wd_timeout = timeout;
1301 il_setup_watchdog(il);
1302 return count;
1303}
1304
1305DEBUGFS_READ_FILE_OPS(rx_stats);
1306DEBUGFS_READ_FILE_OPS(tx_stats);
1307DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1308DEBUGFS_READ_FILE_OPS(rx_queue);
1309DEBUGFS_READ_FILE_OPS(tx_queue);
1310DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1311DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1312DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1313DEBUGFS_READ_FILE_OPS(sensitivity);
1314DEBUGFS_READ_FILE_OPS(chain_noise);
1315DEBUGFS_READ_FILE_OPS(power_save_status);
1316DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
1317DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
1318DEBUGFS_READ_FILE_OPS(fh_reg);
1319DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1320DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1321DEBUGFS_READ_FILE_OPS(rxon_flags);
1322DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1323DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1324
1325/*
1326 * Create the debugfs files and directories
1327 *
1328 */
1329int
1330il_dbgfs_register(struct il_priv *il, const char *name)
1331{
1332 struct dentry *phyd = il->hw->wiphy->debugfsdir;
1333 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1334
1335 dir_drv = debugfs_create_dir(name, phyd);
1336 if (!dir_drv)
1337 return -ENOMEM;
1338
1339 il->debugfs_dir = dir_drv;
1340
1341 dir_data = debugfs_create_dir("data", dir_drv);
1342 if (!dir_data)
1343 goto err;
1344 dir_rf = debugfs_create_dir("rf", dir_drv);
1345 if (!dir_rf)
1346 goto err;
1347 dir_debug = debugfs_create_dir("debug", dir_drv);
1348 if (!dir_debug)
1349 goto err;
1350
1351 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1352 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1353 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1354 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1355 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1356 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1357 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1358 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1359 DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
1360 DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
1361 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1362 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1363 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1364 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1365 DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
1366 DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
1367 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1368 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1369 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1370 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1371 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1372 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1373
1374 if (il->cfg->base_params->sensitivity_calib_by_driver)
1375 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1376 if (il->cfg->base_params->chain_noise_calib_by_driver)
1377 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1378 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1379 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1380 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1381 if (il->cfg->base_params->sensitivity_calib_by_driver)
1382 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1383 &il->disable_sens_cal);
1384 if (il->cfg->base_params->chain_noise_calib_by_driver)
1385 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1386 &il->disable_chain_noise_cal);
1387 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
1388 return 0;
1389
1390err:
1391 IL_ERR("Can't create the debugfs directory\n");
1392 il_dbgfs_unregister(il);
1393 return -ENOMEM;
1394}
1395EXPORT_SYMBOL(il_dbgfs_register);
1396
1397/**
1398 * Remove the debugfs files and directories
1399 *
1400 */
1401void
1402il_dbgfs_unregister(struct il_priv *il)
1403{
1404 if (!il->debugfs_dir)
1405 return;
1406
1407 debugfs_remove_recursive(il->debugfs_dir);
1408 il->debugfs_dir = NULL;
1409}
1410EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38793ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31
32static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
33{
34 int p = 0;
35
36 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
37 le32_to_cpu(priv->_3945.statistics.flag));
38 if (le32_to_cpu(priv->_3945.statistics.flag) &
39 UCODE_STATISTICS_CLEAR_MSK)
40 p += scnprintf(buf + p, bufsz - p,
41 "\tStatistics have been cleared\n");
42 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
43 (le32_to_cpu(priv->_3945.statistics.flag) &
44 UCODE_STATISTICS_FREQUENCY_MSK)
45 ? "2.4 GHz" : "5.2 GHz");
46 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
47 (le32_to_cpu(priv->_3945.statistics.flag) &
48 UCODE_STATISTICS_NARROW_BAND_MSK)
49 ? "enabled" : "disabled");
50 return p;
51}
52
53ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
54 char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 struct iwl_priv *priv = file->private_data;
58 int pos = 0;
59 char *buf;
60 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
61 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
62 ssize_t ret;
63 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
64 *max_ofdm;
65 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
66 struct iwl39_statistics_rx_non_phy *general, *accum_general;
67 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
68
69 if (!iwl_legacy_is_alive(priv))
70 return -EAGAIN;
71
72 buf = kzalloc(bufsz, GFP_KERNEL);
73 if (!buf) {
74 IWL_ERR(priv, "Can not allocate Buffer\n");
75 return -ENOMEM;
76 }
77
78 /*
79 * The statistic information display here is based on
80 * the last statistics notification from uCode
81 * might not reflect the current uCode activity
82 */
83 ofdm = &priv->_3945.statistics.rx.ofdm;
84 cck = &priv->_3945.statistics.rx.cck;
85 general = &priv->_3945.statistics.rx.general;
86 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
87 accum_cck = &priv->_3945.accum_statistics.rx.cck;
88 accum_general = &priv->_3945.accum_statistics.rx.general;
89 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
90 delta_cck = &priv->_3945.delta_statistics.rx.cck;
91 delta_general = &priv->_3945.delta_statistics.rx.general;
92 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
93 max_cck = &priv->_3945.max_delta.rx.cck;
94 max_general = &priv->_3945.max_delta.rx.general;
95
96 pos += iwl3945_statistics_flag(priv, buf, bufsz);
97 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
98 "acumulative delta max\n",
99 "Statistics_Rx - OFDM:");
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
103 accum_ofdm->ina_cnt,
104 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 " %-30s %10u %10u %10u %10u\n",
107 "fina_cnt:",
108 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
109 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
110 pos += scnprintf(buf + pos, bufsz - pos,
111 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
112 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
113 delta_ofdm->plcp_err, max_ofdm->plcp_err);
114 pos += scnprintf(buf + pos, bufsz - pos,
115 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
116 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
117 delta_ofdm->crc32_err, max_ofdm->crc32_err);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
120 le32_to_cpu(ofdm->overrun_err),
121 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
122 max_ofdm->overrun_err);
123 pos += scnprintf(buf + pos, bufsz - pos,
124 " %-30s %10u %10u %10u %10u\n",
125 "early_overrun_err:",
126 le32_to_cpu(ofdm->early_overrun_err),
127 accum_ofdm->early_overrun_err,
128 delta_ofdm->early_overrun_err,
129 max_ofdm->early_overrun_err);
130 pos += scnprintf(buf + pos, bufsz - pos,
131 " %-30s %10u %10u %10u %10u\n",
132 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
133 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
134 max_ofdm->crc32_good);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
137 le32_to_cpu(ofdm->false_alarm_cnt),
138 accum_ofdm->false_alarm_cnt,
139 delta_ofdm->false_alarm_cnt,
140 max_ofdm->false_alarm_cnt);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "fina_sync_err_cnt:",
144 le32_to_cpu(ofdm->fina_sync_err_cnt),
145 accum_ofdm->fina_sync_err_cnt,
146 delta_ofdm->fina_sync_err_cnt,
147 max_ofdm->fina_sync_err_cnt);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "sfd_timeout:",
151 le32_to_cpu(ofdm->sfd_timeout),
152 accum_ofdm->sfd_timeout,
153 delta_ofdm->sfd_timeout,
154 max_ofdm->sfd_timeout);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n",
157 "fina_timeout:",
158 le32_to_cpu(ofdm->fina_timeout),
159 accum_ofdm->fina_timeout,
160 delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 " %-30s %10u %10u %10u %10u\n",
164 "unresponded_rts:",
165 le32_to_cpu(ofdm->unresponded_rts),
166 accum_ofdm->unresponded_rts,
167 delta_ofdm->unresponded_rts,
168 max_ofdm->unresponded_rts);
169 pos += scnprintf(buf + pos, bufsz - pos,
170 " %-30s %10u %10u %10u %10u\n",
171 "rxe_frame_lmt_ovrun:",
172 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
173 accum_ofdm->rxe_frame_limit_overrun,
174 delta_ofdm->rxe_frame_limit_overrun,
175 max_ofdm->rxe_frame_limit_overrun);
176 pos += scnprintf(buf + pos, bufsz - pos,
177 " %-30s %10u %10u %10u %10u\n",
178 "sent_ack_cnt:",
179 le32_to_cpu(ofdm->sent_ack_cnt),
180 accum_ofdm->sent_ack_cnt,
181 delta_ofdm->sent_ack_cnt,
182 max_ofdm->sent_ack_cnt);
183 pos += scnprintf(buf + pos, bufsz - pos,
184 " %-30s %10u %10u %10u %10u\n",
185 "sent_cts_cnt:",
186 le32_to_cpu(ofdm->sent_cts_cnt),
187 accum_ofdm->sent_cts_cnt,
188 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
189
190 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
191 "acumulative delta max\n",
192 "Statistics_Rx - CCK:");
193 pos += scnprintf(buf + pos, bufsz - pos,
194 " %-30s %10u %10u %10u %10u\n",
195 "ina_cnt:",
196 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
197 delta_cck->ina_cnt, max_cck->ina_cnt);
198 pos += scnprintf(buf + pos, bufsz - pos,
199 " %-30s %10u %10u %10u %10u\n",
200 "fina_cnt:",
201 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
202 delta_cck->fina_cnt, max_cck->fina_cnt);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "plcp_err:",
206 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
207 delta_cck->plcp_err, max_cck->plcp_err);
208 pos += scnprintf(buf + pos, bufsz - pos,
209 " %-30s %10u %10u %10u %10u\n",
210 "crc32_err:",
211 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
212 delta_cck->crc32_err, max_cck->crc32_err);
213 pos += scnprintf(buf + pos, bufsz - pos,
214 " %-30s %10u %10u %10u %10u\n",
215 "overrun_err:",
216 le32_to_cpu(cck->overrun_err),
217 accum_cck->overrun_err,
218 delta_cck->overrun_err, max_cck->overrun_err);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 " %-30s %10u %10u %10u %10u\n",
221 "early_overrun_err:",
222 le32_to_cpu(cck->early_overrun_err),
223 accum_cck->early_overrun_err,
224 delta_cck->early_overrun_err,
225 max_cck->early_overrun_err);
226 pos += scnprintf(buf + pos, bufsz - pos,
227 " %-30s %10u %10u %10u %10u\n",
228 "crc32_good:",
229 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
230 delta_cck->crc32_good,
231 max_cck->crc32_good);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 " %-30s %10u %10u %10u %10u\n",
234 "false_alarm_cnt:",
235 le32_to_cpu(cck->false_alarm_cnt),
236 accum_cck->false_alarm_cnt,
237 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 " %-30s %10u %10u %10u %10u\n",
240 "fina_sync_err_cnt:",
241 le32_to_cpu(cck->fina_sync_err_cnt),
242 accum_cck->fina_sync_err_cnt,
243 delta_cck->fina_sync_err_cnt,
244 max_cck->fina_sync_err_cnt);
245 pos += scnprintf(buf + pos, bufsz - pos,
246 " %-30s %10u %10u %10u %10u\n",
247 "sfd_timeout:",
248 le32_to_cpu(cck->sfd_timeout),
249 accum_cck->sfd_timeout,
250 delta_cck->sfd_timeout, max_cck->sfd_timeout);
251 pos += scnprintf(buf + pos, bufsz - pos,
252 " %-30s %10u %10u %10u %10u\n",
253 "fina_timeout:",
254 le32_to_cpu(cck->fina_timeout),
255 accum_cck->fina_timeout,
256 delta_cck->fina_timeout, max_cck->fina_timeout);
257 pos += scnprintf(buf + pos, bufsz - pos,
258 " %-30s %10u %10u %10u %10u\n",
259 "unresponded_rts:",
260 le32_to_cpu(cck->unresponded_rts),
261 accum_cck->unresponded_rts,
262 delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n",
273 "sent_ack_cnt:",
274 le32_to_cpu(cck->sent_ack_cnt),
275 accum_cck->sent_ack_cnt,
276 delta_cck->sent_ack_cnt,
277 max_cck->sent_ack_cnt);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt,
283 delta_cck->sent_cts_cnt,
284 max_cck->sent_cts_cnt);
285
286 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
287 "acumulative delta max\n",
288 "Statistics_Rx - GENERAL:");
289 pos += scnprintf(buf + pos, bufsz - pos,
290 " %-30s %10u %10u %10u %10u\n",
291 "bogus_cts:",
292 le32_to_cpu(general->bogus_cts),
293 accum_general->bogus_cts,
294 delta_general->bogus_cts, max_general->bogus_cts);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 " %-30s %10u %10u %10u %10u\n",
297 "bogus_ack:",
298 le32_to_cpu(general->bogus_ack),
299 accum_general->bogus_ack,
300 delta_general->bogus_ack, max_general->bogus_ack);
301 pos += scnprintf(buf + pos, bufsz - pos,
302 " %-30s %10u %10u %10u %10u\n",
303 "non_bssid_frames:",
304 le32_to_cpu(general->non_bssid_frames),
305 accum_general->non_bssid_frames,
306 delta_general->non_bssid_frames,
307 max_general->non_bssid_frames);
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n",
310 "filtered_frames:",
311 le32_to_cpu(general->filtered_frames),
312 accum_general->filtered_frames,
313 delta_general->filtered_frames,
314 max_general->filtered_frames);
315 pos += scnprintf(buf + pos, bufsz - pos,
316 " %-30s %10u %10u %10u %10u\n",
317 "non_channel_beacons:",
318 le32_to_cpu(general->non_channel_beacons),
319 accum_general->non_channel_beacons,
320 delta_general->non_channel_beacons,
321 max_general->non_channel_beacons);
322
323 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
324 kfree(buf);
325 return ret;
326}
327
328ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
329 char __user *user_buf,
330 size_t count, loff_t *ppos)
331{
332 struct iwl_priv *priv = file->private_data;
333 int pos = 0;
334 char *buf;
335 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
336 ssize_t ret;
337 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
338
339 if (!iwl_legacy_is_alive(priv))
340 return -EAGAIN;
341
342 buf = kzalloc(bufsz, GFP_KERNEL);
343 if (!buf) {
344 IWL_ERR(priv, "Can not allocate Buffer\n");
345 return -ENOMEM;
346 }
347
348 /*
349 * The statistic information display here is based on
350 * the last statistics notification from uCode
351 * might not reflect the current uCode activity
352 */
353 tx = &priv->_3945.statistics.tx;
354 accum_tx = &priv->_3945.accum_statistics.tx;
355 delta_tx = &priv->_3945.delta_statistics.tx;
356 max_tx = &priv->_3945.max_delta.tx;
357 pos += iwl3945_statistics_flag(priv, buf, bufsz);
358 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
359 "acumulative delta max\n",
360 "Statistics_Tx:");
361 pos += scnprintf(buf + pos, bufsz - pos,
362 " %-30s %10u %10u %10u %10u\n",
363 "preamble:",
364 le32_to_cpu(tx->preamble_cnt),
365 accum_tx->preamble_cnt,
366 delta_tx->preamble_cnt, max_tx->preamble_cnt);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "rx_detected_cnt:",
370 le32_to_cpu(tx->rx_detected_cnt),
371 accum_tx->rx_detected_cnt,
372 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 " %-30s %10u %10u %10u %10u\n",
375 "bt_prio_defer_cnt:",
376 le32_to_cpu(tx->bt_prio_defer_cnt),
377 accum_tx->bt_prio_defer_cnt,
378 delta_tx->bt_prio_defer_cnt,
379 max_tx->bt_prio_defer_cnt);
380 pos += scnprintf(buf + pos, bufsz - pos,
381 " %-30s %10u %10u %10u %10u\n",
382 "bt_prio_kill_cnt:",
383 le32_to_cpu(tx->bt_prio_kill_cnt),
384 accum_tx->bt_prio_kill_cnt,
385 delta_tx->bt_prio_kill_cnt,
386 max_tx->bt_prio_kill_cnt);
387 pos += scnprintf(buf + pos, bufsz - pos,
388 " %-30s %10u %10u %10u %10u\n",
389 "few_bytes_cnt:",
390 le32_to_cpu(tx->few_bytes_cnt),
391 accum_tx->few_bytes_cnt,
392 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
393 pos += scnprintf(buf + pos, bufsz - pos,
394 " %-30s %10u %10u %10u %10u\n",
395 "cts_timeout:",
396 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
397 delta_tx->cts_timeout, max_tx->cts_timeout);
398 pos += scnprintf(buf + pos, bufsz - pos,
399 " %-30s %10u %10u %10u %10u\n",
400 "ack_timeout:",
401 le32_to_cpu(tx->ack_timeout),
402 accum_tx->ack_timeout,
403 delta_tx->ack_timeout, max_tx->ack_timeout);
404 pos += scnprintf(buf + pos, bufsz - pos,
405 " %-30s %10u %10u %10u %10u\n",
406 "expected_ack_cnt:",
407 le32_to_cpu(tx->expected_ack_cnt),
408 accum_tx->expected_ack_cnt,
409 delta_tx->expected_ack_cnt,
410 max_tx->expected_ack_cnt);
411 pos += scnprintf(buf + pos, bufsz - pos,
412 " %-30s %10u %10u %10u %10u\n",
413 "actual_ack_cnt:",
414 le32_to_cpu(tx->actual_ack_cnt),
415 accum_tx->actual_ack_cnt,
416 delta_tx->actual_ack_cnt,
417 max_tx->actual_ack_cnt);
418
419 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
420 kfree(buf);
421 return ret;
422}
423
424ssize_t iwl3945_ucode_general_stats_read(struct file *file,
425 char __user *user_buf,
426 size_t count, loff_t *ppos)
427{
428 struct iwl_priv *priv = file->private_data;
429 int pos = 0;
430 char *buf;
431 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
432 ssize_t ret;
433 struct iwl39_statistics_general *general, *accum_general;
434 struct iwl39_statistics_general *delta_general, *max_general;
435 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
436 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
437
438 if (!iwl_legacy_is_alive(priv))
439 return -EAGAIN;
440
441 buf = kzalloc(bufsz, GFP_KERNEL);
442 if (!buf) {
443 IWL_ERR(priv, "Can not allocate Buffer\n");
444 return -ENOMEM;
445 }
446
447 /*
448 * The statistic information display here is based on
449 * the last statistics notification from uCode
450 * might not reflect the current uCode activity
451 */
452 general = &priv->_3945.statistics.general;
453 dbg = &priv->_3945.statistics.general.dbg;
454 div = &priv->_3945.statistics.general.div;
455 accum_general = &priv->_3945.accum_statistics.general;
456 delta_general = &priv->_3945.delta_statistics.general;
457 max_general = &priv->_3945.max_delta.general;
458 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
459 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
460 max_dbg = &priv->_3945.max_delta.general.dbg;
461 accum_div = &priv->_3945.accum_statistics.general.div;
462 delta_div = &priv->_3945.delta_statistics.general.div;
463 max_div = &priv->_3945.max_delta.general.div;
464 pos += iwl3945_statistics_flag(priv, buf, bufsz);
465 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
466 "acumulative delta max\n",
467 "Statistics_General:");
468 pos += scnprintf(buf + pos, bufsz - pos,
469 " %-30s %10u %10u %10u %10u\n",
470 "burst_check:",
471 le32_to_cpu(dbg->burst_check),
472 accum_dbg->burst_check,
473 delta_dbg->burst_check, max_dbg->burst_check);
474 pos += scnprintf(buf + pos, bufsz - pos,
475 " %-30s %10u %10u %10u %10u\n",
476 "burst_count:",
477 le32_to_cpu(dbg->burst_count),
478 accum_dbg->burst_count,
479 delta_dbg->burst_count, max_dbg->burst_count);
480 pos += scnprintf(buf + pos, bufsz - pos,
481 " %-30s %10u %10u %10u %10u\n",
482 "sleep_time:",
483 le32_to_cpu(general->sleep_time),
484 accum_general->sleep_time,
485 delta_general->sleep_time, max_general->sleep_time);
486 pos += scnprintf(buf + pos, bufsz - pos,
487 " %-30s %10u %10u %10u %10u\n",
488 "slots_out:",
489 le32_to_cpu(general->slots_out),
490 accum_general->slots_out,
491 delta_general->slots_out, max_general->slots_out);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "slots_idle:",
495 le32_to_cpu(general->slots_idle),
496 accum_general->slots_idle,
497 delta_general->slots_idle, max_general->slots_idle);
498 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
499 le32_to_cpu(general->ttl_timestamp));
500 pos += scnprintf(buf + pos, bufsz - pos,
501 " %-30s %10u %10u %10u %10u\n",
502 "tx_on_a:",
503 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
504 delta_div->tx_on_a, max_div->tx_on_a);
505 pos += scnprintf(buf + pos, bufsz - pos,
506 " %-30s %10u %10u %10u %10u\n",
507 "tx_on_b:",
508 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
509 delta_div->tx_on_b, max_div->tx_on_b);
510 pos += scnprintf(buf + pos, bufsz - pos,
511 " %-30s %10u %10u %10u %10u\n",
512 "exec_time:",
513 le32_to_cpu(div->exec_time), accum_div->exec_time,
514 delta_div->exec_time, max_div->exec_time);
515 pos += scnprintf(buf + pos, bufsz - pos,
516 " %-30s %10u %10u %10u %10u\n",
517 "probe_time:",
518 le32_to_cpu(div->probe_time), accum_div->probe_time,
519 delta_div->probe_time, max_div->probe_time);
520 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
521 kfree(buf);
522 return ret;
523}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b32b447..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c9919f82e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_3945_fh_h__
64#define __iwl_3945_fh_h__
65
66/************************************/
67/* iwl3945 Flow Handler Definitions */
68/************************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH39_MEM_LOWER_BOUND (0x0800)
75#define FH39_MEM_UPPER_BOUND (0x1000)
76
77#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
78#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
79#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
80#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
81#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
82#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
83
84/* TFDB (Transmit Frame Buffer Descriptor) */
85#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
86 ((_ch) * 2 + (buf)) * 0x28)
87#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
88
89/* CBCC channel is [0,2] */
90#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
91#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
92#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
93
94/* RCSR channel is [0,2] */
95#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
96#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
97#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
98#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
99#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
100
101#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
102
103/* RSSR */
104#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
105#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
106
107/* TCSR */
108#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
109#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
110#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
111#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
112
113/* TSSR */
114#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
115#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
116#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
117
118
119/* DBM */
120
121#define FH39_SRVC_CHNL (6)
122
123#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
124#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
125
126#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
127
128#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
129
130#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
131
132#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
133
134#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
135
136#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
137
138#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
139#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
140
141#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
142#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
143
144#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
145
146#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
147
148#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
149#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
150
151#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
152
153#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
154
155#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
156#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
157
158#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
159
160#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
161#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
162
163#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
164#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
165
166#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
167#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
168
169#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
170 (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
171 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
172
173#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
174
175struct iwl3945_tfd_tb {
176 __le32 addr;
177 __le32 len;
178} __packed;
179
180struct iwl3945_tfd {
181 __le32 control_flags;
182 struct iwl3945_tfd_tb tbs[4];
183 u8 __pad[28];
184} __packed;
185
186
187#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d3af12..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
65 * Please use iwl-commands.h for uCode API definitions.
66 * Please use iwl-3945.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_3945_hw__
70#define __iwl_3945_hw__
71
72#include "iwl-eeprom.h"
73
74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95
76
77/*
78 * EEPROM related constants, enums, and structures.
79 */
80#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
81
82/*
83 * Mapping of a Tx power level, at factory calibration temperature,
84 * to a radio/DSP gain table index.
85 * One for each of 5 "sample" power levels in each band.
86 * v_det is measured at the factory, using the 3945's built-in power amplifier
87 * (PA) output voltage detector. This same detector is used during Tx of
88 * long packets in normal operation to provide feedback as to proper output
89 * level.
90 * Data copied from EEPROM.
91 * DO NOT ALTER THIS STRUCTURE!!!
92 */
93struct iwl3945_eeprom_txpower_sample {
94 u8 gain_index; /* index into power (gain) setup table ... */
95 s8 power; /* ... for this pwr level for this chnl group */
96 u16 v_det; /* PA output voltage */
97} __packed;
98
99/*
100 * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
101 * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
102 * Tx power setup code interpolates between the 5 "sample" power levels
103 * to determine the nominal setup for a requested power level.
104 * Data copied from EEPROM.
105 * DO NOT ALTER THIS STRUCTURE!!!
106 */
107struct iwl3945_eeprom_txpower_group {
108 struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
109 s32 a, b, c, d, e; /* coefficients for voltage->power
110 * formula (signed) */
111 s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
112 * frequency (signed) */
113 s8 saturation_power; /* highest power possible by h/w in this
114 * band */
115 u8 group_channel; /* "representative" channel # in this band */
116 s16 temperature; /* h/w temperature at factory calib this band
117 * (signed) */
118} __packed;
119
120/*
121 * Temperature-based Tx-power compensation data, not band-specific.
122 * These coefficients are use to modify a/b/c/d/e coeffs based on
123 * difference between current temperature and factory calib temperature.
124 * Data copied from EEPROM.
125 */
126struct iwl3945_eeprom_temperature_corr {
127 u32 Ta;
128 u32 Tb;
129 u32 Tc;
130 u32 Td;
131 u32 Te;
132} __packed;
133
134/*
135 * EEPROM map
136 */
137struct iwl3945_eeprom {
138 u8 reserved0[16];
139 u16 device_id; /* abs.ofs: 16 */
140 u8 reserved1[2];
141 u16 pmc; /* abs.ofs: 20 */
142 u8 reserved2[20];
143 u8 mac_address[6]; /* abs.ofs: 42 */
144 u8 reserved3[58];
145 u16 board_revision; /* abs.ofs: 106 */
146 u8 reserved4[11];
147 u8 board_pba_number[9]; /* abs.ofs: 119 */
148 u8 reserved5[8];
149 u16 version; /* abs.ofs: 136 */
150 u8 sku_cap; /* abs.ofs: 138 */
151 u8 leds_mode; /* abs.ofs: 139 */
152 u16 oem_mode;
153 u16 wowlan_mode; /* abs.ofs: 142 */
154 u16 leds_time_interval; /* abs.ofs: 144 */
155 u8 leds_off_time; /* abs.ofs: 146 */
156 u8 leds_on_time; /* abs.ofs: 147 */
157 u8 almgor_m_version; /* abs.ofs: 148 */
158 u8 antenna_switch_type; /* abs.ofs: 149 */
159 u8 reserved6[42];
160 u8 sku_id[4]; /* abs.ofs: 192 */
161
162/*
163 * Per-channel regulatory data.
164 *
165 * Each channel that *might* be supported by 3945 has a fixed location
166 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
167 * txpower (MSB).
168 *
169 * Entries immediately below are for 20 MHz channel width.
170 *
171 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
172 */
173 u16 band_1_count; /* abs.ofs: 196 */
174 struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
175
176/*
177 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
178 * 5.0 GHz channels 7, 8, 11, 12, 16
179 * (4915-5080MHz) (none of these is ever supported)
180 */
181 u16 band_2_count; /* abs.ofs: 226 */
182 struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
183
184/*
185 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
186 * (5170-5320MHz)
187 */
188 u16 band_3_count; /* abs.ofs: 254 */
189 struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
190
191/*
192 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
193 * (5500-5700MHz)
194 */
195 u16 band_4_count; /* abs.ofs: 280 */
196 struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
197
198/*
199 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
200 * (5725-5825MHz)
201 */
202 u16 band_5_count; /* abs.ofs: 304 */
203 struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
204
205 u8 reserved9[194];
206
207/*
208 * 3945 Txpower calibration data.
209 */
210#define IWL_NUM_TX_CALIB_GROUPS 5
211 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
212/* abs.ofs: 512 */
213 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
214 u8 reserved16[172]; /* fill out to full 1024 byte block */
215} __packed;
216
217#define IWL3945_EEPROM_IMG_SIZE 1024
218
219/* End of EEPROM */
220
221#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
222#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
223
224/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
225#define IWL39_NUM_QUEUES 5
226#define IWL39_CMD_QUEUE_NUM 4
227
228#define IWL_DEFAULT_TX_RETRY 15
229
230/*********************************************/
231
232#define RFD_SIZE 4
233#define NUM_TFD_CHUNKS 4
234
235#define RX_QUEUE_SIZE 256
236#define RX_QUEUE_MASK 255
237#define RX_QUEUE_SIZE_LOG 8
238
239#define U32_PAD(n) ((4-(n))&0x3)
240
241#define TFD_CTL_COUNT_SET(n) (n << 24)
242#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
243#define TFD_CTL_PAD_SET(n) (n << 28)
244#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
245
246/* Sizes and addresses for instruction and data memory (SRAM) in
247 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
248#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
249#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
250
251#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
252#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
253
254#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
255 IWL39_RTC_INST_LOWER_BOUND)
256#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
257 IWL39_RTC_DATA_LOWER_BOUND)
258
259#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
260#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
261
262/* Size of uCode instruction memory in bootstrap state machine */
263#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
264
265static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
266{
267 return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
268 (addr < IWL39_RTC_DATA_UPPER_BOUND);
269}
270
271/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
272 * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
273struct iwl3945_shared {
274 __le32 tx_base_ptr[8];
275} __packed;
276
277static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
278{
279 return le16_to_cpu(rate_n_flags) & 0xFF;
280}
281
282static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
283{
284 return le16_to_cpu(rate_n_flags);
285}
286
287static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
288{
289 return cpu_to_le16((u16)rate|flags);
290}
291#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f38c8ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-commands.h"
40#include "iwl-3945.h"
41#include "iwl-core.h"
42#include "iwl-dev.h"
43#include "iwl-3945-led.h"
44
45
46/* Send led command */
47static int iwl3945_send_led_cmd(struct iwl_priv *priv,
48 struct iwl_led_cmd *led_cmd)
49{
50 struct iwl_host_cmd cmd = {
51 .id = REPLY_LEDS_CMD,
52 .len = sizeof(struct iwl_led_cmd),
53 .data = led_cmd,
54 .flags = CMD_ASYNC,
55 .callback = NULL,
56 };
57
58 return iwl_legacy_send_cmd(priv, &cmd);
59}
60
61const struct iwl_led_ops iwl3945_led_ops = {
62 .cmd = iwl3945_send_led_cmd,
63};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 96716276eb0d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_3945_led_h__
28#define __iwl_3945_led_h__
29
30extern const struct iwl_led_ops iwl3945_led_ops;
31
32#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2dddec..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/skbuff.h>
30#include <linux/slab.h>
31#include <net/mac80211.h>
32
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/delay.h>
36
37#include <linux/workqueue.h>
38
39#include "iwl-commands.h"
40#include "iwl-3945.h"
41#include "iwl-sta.h"
42
43#define RS_NAME "iwl-3945-rs"
44
45static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
46 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
47};
48
49static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
50 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
51};
52
53static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
54 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
55};
56
57static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
58 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
59};
60
61struct iwl3945_tpt_entry {
62 s8 min_rssi;
63 u8 index;
64};
65
66static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
67 {-60, IWL_RATE_54M_INDEX},
68 {-64, IWL_RATE_48M_INDEX},
69 {-72, IWL_RATE_36M_INDEX},
70 {-80, IWL_RATE_24M_INDEX},
71 {-84, IWL_RATE_18M_INDEX},
72 {-85, IWL_RATE_12M_INDEX},
73 {-87, IWL_RATE_9M_INDEX},
74 {-89, IWL_RATE_6M_INDEX}
75};
76
77static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
78 {-60, IWL_RATE_54M_INDEX},
79 {-64, IWL_RATE_48M_INDEX},
80 {-68, IWL_RATE_36M_INDEX},
81 {-80, IWL_RATE_24M_INDEX},
82 {-84, IWL_RATE_18M_INDEX},
83 {-85, IWL_RATE_12M_INDEX},
84 {-86, IWL_RATE_11M_INDEX},
85 {-88, IWL_RATE_5M_INDEX},
86 {-90, IWL_RATE_2M_INDEX},
87 {-92, IWL_RATE_1M_INDEX}
88};
89
90#define IWL_RATE_MAX_WINDOW 62
91#define IWL_RATE_FLUSH (3*HZ)
92#define IWL_RATE_WIN_FLUSH (HZ/2)
93#define IWL39_RATE_HIGH_TH 11520
94#define IWL_SUCCESS_UP_TH 8960
95#define IWL_SUCCESS_DOWN_TH 10880
96#define IWL_RATE_MIN_FAILURE_TH 6
97#define IWL_RATE_MIN_SUCCESS_TH 8
98#define IWL_RATE_DECREASE_TH 1920
99#define IWL_RATE_RETRY_TH 15
100
101static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
102{
103 u32 index = 0;
104 u32 table_size = 0;
105 struct iwl3945_tpt_entry *tpt_table = NULL;
106
107 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
108 rssi = IWL_MIN_RSSI_VAL;
109
110 switch (band) {
111 case IEEE80211_BAND_2GHZ:
112 tpt_table = iwl3945_tpt_table_g;
113 table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
114 break;
115
116 case IEEE80211_BAND_5GHZ:
117 tpt_table = iwl3945_tpt_table_a;
118 table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
119 break;
120
121 default:
122 BUG();
123 break;
124 }
125
126 while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
127 index++;
128
129 index = min(index, (table_size - 1));
130
131 return tpt_table[index].index;
132}
133
134static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
135{
136 window->data = 0;
137 window->success_counter = 0;
138 window->success_ratio = -1;
139 window->counter = 0;
140 window->average_tpt = IWL_INVALID_VALUE;
141 window->stamp = 0;
142}
143
144/**
145 * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
146 *
147 * Returns the number of windows that have gathered data but were
148 * not flushed. If there were any that were not flushed, then
149 * reschedule the rate flushing routine.
150 */
151static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
152{
153 int unflushed = 0;
154 int i;
155 unsigned long flags;
156 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
157
158 /*
159 * For each rate, if we have collected data on that rate
160 * and it has been more than IWL_RATE_WIN_FLUSH
161 * since we flushed, clear out the gathered statistics
162 */
163 for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
164 if (!rs_sta->win[i].counter)
165 continue;
166
167 spin_lock_irqsave(&rs_sta->lock, flags);
168 if (time_after(jiffies, rs_sta->win[i].stamp +
169 IWL_RATE_WIN_FLUSH)) {
170 IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
171 "index %d\n",
172 rs_sta->win[i].counter, i);
173 iwl3945_clear_window(&rs_sta->win[i]);
174 } else
175 unflushed++;
176 spin_unlock_irqrestore(&rs_sta->lock, flags);
177 }
178
179 return unflushed;
180}
181
182#define IWL_RATE_FLUSH_MAX 5000 /* msec */
183#define IWL_RATE_FLUSH_MIN 50 /* msec */
184#define IWL_AVERAGE_PACKETS 1500
185
186static void iwl3945_bg_rate_scale_flush(unsigned long data)
187{
188 struct iwl3945_rs_sta *rs_sta = (void *)data;
189 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
190 int unflushed = 0;
191 unsigned long flags;
192 u32 packet_count, duration, pps;
193
194 IWL_DEBUG_RATE(priv, "enter\n");
195
196 unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
197
198 spin_lock_irqsave(&rs_sta->lock, flags);
199
200 /* Number of packets Rx'd since last time this timer ran */
201 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
202
203 rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
204
205 if (unflushed) {
206 duration =
207 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
208
209 IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
210 packet_count, duration);
211
212 /* Determine packets per second */
213 if (duration)
214 pps = (packet_count * 1000) / duration;
215 else
216 pps = 0;
217
218 if (pps) {
219 duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
220 if (duration < IWL_RATE_FLUSH_MIN)
221 duration = IWL_RATE_FLUSH_MIN;
222 else if (duration > IWL_RATE_FLUSH_MAX)
223 duration = IWL_RATE_FLUSH_MAX;
224 } else
225 duration = IWL_RATE_FLUSH_MAX;
226
227 rs_sta->flush_time = msecs_to_jiffies(duration);
228
229 IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
230 duration, packet_count);
231
232 mod_timer(&rs_sta->rate_scale_flush, jiffies +
233 rs_sta->flush_time);
234
235 rs_sta->last_partial_flush = jiffies;
236 } else {
237 rs_sta->flush_time = IWL_RATE_FLUSH;
238 rs_sta->flush_pending = 0;
239 }
240 /* If there weren't any unflushed entries, we don't schedule the timer
241 * to run again */
242
243 rs_sta->last_flush = jiffies;
244
245 spin_unlock_irqrestore(&rs_sta->lock, flags);
246
247 IWL_DEBUG_RATE(priv, "leave\n");
248}
249
250/**
251 * iwl3945_collect_tx_data - Update the success/failure sliding window
252 *
253 * We keep a sliding window of the last 64 packets transmitted
254 * at this rate. window->data contains the bitmask of successful
255 * packets.
256 */
257static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
258 struct iwl3945_rate_scale_data *window,
259 int success, int retries, int index)
260{
261 unsigned long flags;
262 s32 fail_count;
263 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
264
265 if (!retries) {
266 IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
267 return;
268 }
269
270 spin_lock_irqsave(&rs_sta->lock, flags);
271
272 /*
273 * Keep track of only the latest 62 tx frame attempts in this rate's
274 * history window; anything older isn't really relevant any more.
275 * If we have filled up the sliding window, drop the oldest attempt;
276 * if the oldest attempt (highest bit in bitmap) shows "success",
277 * subtract "1" from the success counter (this is the main reason
278 * we keep these bitmaps!).
279 * */
280 while (retries > 0) {
281 if (window->counter >= IWL_RATE_MAX_WINDOW) {
282
283 /* remove earliest */
284 window->counter = IWL_RATE_MAX_WINDOW - 1;
285
286 if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
287 window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
288 window->success_counter--;
289 }
290 }
291
292 /* Increment frames-attempted counter */
293 window->counter++;
294
295 /* Shift bitmap by one frame (throw away oldest history),
296 * OR in "1", and increment "success" if this
297 * frame was successful. */
298 window->data <<= 1;
299 if (success > 0) {
300 window->success_counter++;
301 window->data |= 0x1;
302 success--;
303 }
304
305 retries--;
306 }
307
308 /* Calculate current success ratio, avoid divide-by-0! */
309 if (window->counter > 0)
310 window->success_ratio = 128 * (100 * window->success_counter)
311 / window->counter;
312 else
313 window->success_ratio = IWL_INVALID_VALUE;
314
315 fail_count = window->counter - window->success_counter;
316
317 /* Calculate average throughput, if we have enough history. */
318 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
319 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
320 window->average_tpt = ((window->success_ratio *
321 rs_sta->expected_tpt[index] + 64) / 128);
322 else
323 window->average_tpt = IWL_INVALID_VALUE;
324
325 /* Tag this window as having been updated */
326 window->stamp = jiffies;
327
328 spin_unlock_irqrestore(&rs_sta->lock, flags);
329
330}
331
332/*
333 * Called after adding a new station to initialize rate scaling
334 */
335void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
336{
337 struct ieee80211_hw *hw = priv->hw;
338 struct ieee80211_conf *conf = &priv->hw->conf;
339 struct iwl3945_sta_priv *psta;
340 struct iwl3945_rs_sta *rs_sta;
341 struct ieee80211_supported_band *sband;
342 int i;
343
344 IWL_DEBUG_INFO(priv, "enter\n");
345 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
346 goto out;
347
348 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
349 rs_sta = &psta->rs_sta;
350 sband = hw->wiphy->bands[conf->channel->band];
351
352 rs_sta->priv = priv;
353
354 rs_sta->start_rate = IWL_RATE_INVALID;
355
356 /* default to just 802.11b */
357 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
358
359 rs_sta->last_partial_flush = jiffies;
360 rs_sta->last_flush = jiffies;
361 rs_sta->flush_time = IWL_RATE_FLUSH;
362 rs_sta->last_tx_packets = 0;
363
364 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
365 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
366
367 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
368 iwl3945_clear_window(&rs_sta->win[i]);
369
370 /* TODO: what is a good starting rate for STA? About middle? Maybe not
371 * the lowest or the highest rate.. Could consider using RSSI from
372 * previous packets? Need to have IEEE 802.1X auth succeed immediately
373 * after assoc.. */
374
375 for (i = sband->n_bitrates - 1; i >= 0; i--) {
376 if (sta->supp_rates[sband->band] & (1 << i)) {
377 rs_sta->last_txrate_idx = i;
378 break;
379 }
380 }
381
382 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
383 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
384 if (sband->band == IEEE80211_BAND_5GHZ) {
385 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
386 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
387 IWL_FIRST_OFDM_RATE;
388 }
389
390out:
391 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
392
393 IWL_DEBUG_INFO(priv, "leave\n");
394}
395
396static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
397{
398 return hw->priv;
399}
400
401/* rate scale requires free function to be implemented */
402static void iwl3945_rs_free(void *priv)
403{
404 return;
405}
406
407static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
408{
409 struct iwl3945_rs_sta *rs_sta;
410 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
411 struct iwl_priv *priv __maybe_unused = iwl_priv;
412
413 IWL_DEBUG_RATE(priv, "enter\n");
414
415 rs_sta = &psta->rs_sta;
416
417 spin_lock_init(&rs_sta->lock);
418 init_timer(&rs_sta->rate_scale_flush);
419
420 IWL_DEBUG_RATE(priv, "leave\n");
421
422 return rs_sta;
423}
424
425static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
426 void *priv_sta)
427{
428 struct iwl3945_rs_sta *rs_sta = priv_sta;
429
430 /*
431 * Be careful not to use any members of iwl3945_rs_sta (like trying
432 * to use iwl_priv to print out debugging) since it may not be fully
433 * initialized at this point.
434 */
435 del_timer_sync(&rs_sta->rate_scale_flush);
436}
437
438
439/**
440 * iwl3945_rs_tx_status - Update rate control values based on Tx results
441 *
442 * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
443 * the hardware for each rate.
444 */
445static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
446 struct ieee80211_sta *sta, void *priv_sta,
447 struct sk_buff *skb)
448{
449 s8 retries = 0, current_count;
450 int scale_rate_index, first_index, last_index;
451 unsigned long flags;
452 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
453 struct iwl3945_rs_sta *rs_sta = priv_sta;
454 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
455
456 IWL_DEBUG_RATE(priv, "enter\n");
457
458 retries = info->status.rates[0].count;
459 /* Sanity Check for retries */
460 if (retries > IWL_RATE_RETRY_TH)
461 retries = IWL_RATE_RETRY_TH;
462
463 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
464 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
465 IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
466 return;
467 }
468
469 if (!priv_sta) {
470 IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
471 return;
472 }
473
474 /* Treat uninitialized rate scaling data same as non-existing. */
475 if (!rs_sta->priv) {
476 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
477 return;
478 }
479
480
481 rs_sta->tx_packets++;
482
483 scale_rate_index = first_index;
484 last_index = first_index;
485
486 /*
487 * Update the window for each rate. We determine which rates
488 * were Tx'd based on the total number of retries vs. the number
489 * of retries configured for each rate -- currently set to the
490 * priv value 'retry_rate' vs. rate specific
491 *
492 * On exit from this while loop last_index indicates the rate
493 * at which the frame was finally transmitted (or failed if no
494 * ACK)
495 */
496 while (retries > 1) {
497 if ((retries - 1) < priv->retry_rate) {
498 current_count = (retries - 1);
499 last_index = scale_rate_index;
500 } else {
501 current_count = priv->retry_rate;
502 last_index = iwl3945_rs_next_rate(priv,
503 scale_rate_index);
504 }
505
506 /* Update this rate accounting for as many retries
507 * as was used for it (per current_count) */
508 iwl3945_collect_tx_data(rs_sta,
509 &rs_sta->win[scale_rate_index],
510 0, current_count, scale_rate_index);
511 IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
512 scale_rate_index, current_count);
513
514 retries -= current_count;
515
516 scale_rate_index = last_index;
517 }
518
519
520 /* Update the last index window with success/failure based on ACK */
521 IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
522 last_index,
523 (info->flags & IEEE80211_TX_STAT_ACK) ?
524 "success" : "failure");
525 iwl3945_collect_tx_data(rs_sta,
526 &rs_sta->win[last_index],
527 info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
528
529 /* We updated the rate scale window -- if its been more than
530 * flush_time since the last run, schedule the flush
531 * again */
532 spin_lock_irqsave(&rs_sta->lock, flags);
533
534 if (!rs_sta->flush_pending &&
535 time_after(jiffies, rs_sta->last_flush +
536 rs_sta->flush_time)) {
537
538 rs_sta->last_partial_flush = jiffies;
539 rs_sta->flush_pending = 1;
540 mod_timer(&rs_sta->rate_scale_flush,
541 jiffies + rs_sta->flush_time);
542 }
543
544 spin_unlock_irqrestore(&rs_sta->lock, flags);
545
546 IWL_DEBUG_RATE(priv, "leave\n");
547}
548
549static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
550 u8 index, u16 rate_mask, enum ieee80211_band band)
551{
552 u8 high = IWL_RATE_INVALID;
553 u8 low = IWL_RATE_INVALID;
554 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
555
556 /* 802.11A walks to the next literal adjacent rate in
557 * the rate table */
558 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
559 int i;
560 u32 mask;
561
562 /* Find the previous rate that is in the rate mask */
563 i = index - 1;
564 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
565 if (rate_mask & mask) {
566 low = i;
567 break;
568 }
569 }
570
571 /* Find the next rate that is in the rate mask */
572 i = index + 1;
573 for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
574 i++, mask <<= 1) {
575 if (rate_mask & mask) {
576 high = i;
577 break;
578 }
579 }
580
581 return (high << 8) | low;
582 }
583
584 low = index;
585 while (low != IWL_RATE_INVALID) {
586 if (rs_sta->tgg)
587 low = iwl3945_rates[low].prev_rs_tgg;
588 else
589 low = iwl3945_rates[low].prev_rs;
590 if (low == IWL_RATE_INVALID)
591 break;
592 if (rate_mask & (1 << low))
593 break;
594 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
595 }
596
597 high = index;
598 while (high != IWL_RATE_INVALID) {
599 if (rs_sta->tgg)
600 high = iwl3945_rates[high].next_rs_tgg;
601 else
602 high = iwl3945_rates[high].next_rs;
603 if (high == IWL_RATE_INVALID)
604 break;
605 if (rate_mask & (1 << high))
606 break;
607 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
608 }
609
610 return (high << 8) | low;
611}
612
613/**
614 * iwl3945_rs_get_rate - find the rate for the requested packet
615 *
616 * Returns the ieee80211_rate structure allocated by the driver.
617 *
618 * The rate control algorithm has no internal mapping between hw_mode's
619 * rate ordering and the rate ordering used by the rate control algorithm.
620 *
621 * The rate control algorithm uses a single table of rates that goes across
622 * the entire A/B/G spectrum vs. being limited to just one particular
623 * hw_mode.
624 *
625 * As such, we can't convert the index obtained below into the hw_mode's
626 * rate table and must reference the driver allocated rate table
627 *
628 */
629static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
630 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
631{
632 struct ieee80211_supported_band *sband = txrc->sband;
633 struct sk_buff *skb = txrc->skb;
634 u8 low = IWL_RATE_INVALID;
635 u8 high = IWL_RATE_INVALID;
636 u16 high_low;
637 int index;
638 struct iwl3945_rs_sta *rs_sta = priv_sta;
639 struct iwl3945_rate_scale_data *window = NULL;
640 int current_tpt = IWL_INVALID_VALUE;
641 int low_tpt = IWL_INVALID_VALUE;
642 int high_tpt = IWL_INVALID_VALUE;
643 u32 fail_count;
644 s8 scale_action = 0;
645 unsigned long flags;
646 u16 rate_mask;
647 s8 max_rate_idx = -1;
648 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
649 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
650
651 IWL_DEBUG_RATE(priv, "enter\n");
652
653 /* Treat uninitialized rate scaling data same as non-existing. */
654 if (rs_sta && !rs_sta->priv) {
655 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
656 priv_sta = NULL;
657 }
658
659 if (rate_control_send_low(sta, priv_sta, txrc))
660 return;
661
662 rate_mask = sta->supp_rates[sband->band];
663
664 /* get user max rate if set */
665 max_rate_idx = txrc->max_rate_idx;
666 if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
667 max_rate_idx += IWL_FIRST_OFDM_RATE;
668 if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
669 max_rate_idx = -1;
670
671 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
672
673 if (sband->band == IEEE80211_BAND_5GHZ)
674 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
675
676 spin_lock_irqsave(&rs_sta->lock, flags);
677
678 /* for recent assoc, choose best rate regarding
679 * to rssi value
680 */
681 if (rs_sta->start_rate != IWL_RATE_INVALID) {
682 if (rs_sta->start_rate < index &&
683 (rate_mask & (1 << rs_sta->start_rate)))
684 index = rs_sta->start_rate;
685 rs_sta->start_rate = IWL_RATE_INVALID;
686 }
687
688 /* force user max rate if set by user */
689 if ((max_rate_idx != -1) && (max_rate_idx < index)) {
690 if (rate_mask & (1 << max_rate_idx))
691 index = max_rate_idx;
692 }
693
694 window = &(rs_sta->win[index]);
695
696 fail_count = window->counter - window->success_counter;
697
698 if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
699 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
700 spin_unlock_irqrestore(&rs_sta->lock, flags);
701
702 IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
703 "counter: %d, success_counter: %d, "
704 "expected_tpt is %sNULL\n",
705 index,
706 window->counter,
707 window->success_counter,
708 rs_sta->expected_tpt ? "not " : "");
709
710 /* Can't calculate this yet; not enough history */
711 window->average_tpt = IWL_INVALID_VALUE;
712 goto out;
713
714 }
715
716 current_tpt = window->average_tpt;
717
718 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
719 sband->band);
720 low = high_low & 0xff;
721 high = (high_low >> 8) & 0xff;
722
723 /* If user set max rate, dont allow higher than user constrain */
724 if ((max_rate_idx != -1) && (max_rate_idx < high))
725 high = IWL_RATE_INVALID;
726
727 /* Collect Measured throughputs of adjacent rates */
728 if (low != IWL_RATE_INVALID)
729 low_tpt = rs_sta->win[low].average_tpt;
730
731 if (high != IWL_RATE_INVALID)
732 high_tpt = rs_sta->win[high].average_tpt;
733
734 spin_unlock_irqrestore(&rs_sta->lock, flags);
735
736 scale_action = 0;
737
738 /* Low success ratio , need to drop the rate */
739 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
740 IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
741 scale_action = -1;
742 /* No throughput measured yet for adjacent rates,
743 * try increase */
744 } else if ((low_tpt == IWL_INVALID_VALUE) &&
745 (high_tpt == IWL_INVALID_VALUE)) {
746
747 if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
748 scale_action = 1;
749 else if (low != IWL_RATE_INVALID)
750 scale_action = 0;
751
752 /* Both adjacent throughputs are measured, but neither one has
753 * better throughput; we're using the best rate, don't change
754 * it! */
755 } else if ((low_tpt != IWL_INVALID_VALUE) &&
756 (high_tpt != IWL_INVALID_VALUE) &&
757 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
758
759 IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
760 "current_tpt [%d]\n",
761 low_tpt, high_tpt, current_tpt);
762 scale_action = 0;
763
764 /* At least one of the rates has better throughput */
765 } else {
766 if (high_tpt != IWL_INVALID_VALUE) {
767
768 /* High rate has better throughput, Increase
769 * rate */
770 if (high_tpt > current_tpt &&
771 window->success_ratio >= IWL_RATE_INCREASE_TH)
772 scale_action = 1;
773 else {
774 IWL_DEBUG_RATE(priv,
775 "decrease rate because of high tpt\n");
776 scale_action = 0;
777 }
778 } else if (low_tpt != IWL_INVALID_VALUE) {
779 if (low_tpt > current_tpt) {
780 IWL_DEBUG_RATE(priv,
781 "decrease rate because of low tpt\n");
782 scale_action = -1;
783 } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
784 /* Lower rate has better
785 * throughput,decrease rate */
786 scale_action = 1;
787 }
788 }
789 }
790
791 /* Sanity check; asked for decrease, but success rate or throughput
792 * has been good at old rate. Don't change it. */
793 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
794 ((window->success_ratio > IWL_RATE_HIGH_TH) ||
795 (current_tpt > (100 * rs_sta->expected_tpt[low]))))
796 scale_action = 0;
797
798 switch (scale_action) {
799 case -1:
800
801 /* Decrese rate */
802 if (low != IWL_RATE_INVALID)
803 index = low;
804 break;
805
806 case 1:
807 /* Increase rate */
808 if (high != IWL_RATE_INVALID)
809 index = high;
810
811 break;
812
813 case 0:
814 default:
815 /* No change */
816 break;
817 }
818
819 IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
820 index, scale_action, low, high);
821
822 out:
823
824 if (sband->band == IEEE80211_BAND_5GHZ) {
825 if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
826 index = IWL_FIRST_OFDM_RATE;
827 rs_sta->last_txrate_idx = index;
828 info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
829 } else {
830 rs_sta->last_txrate_idx = index;
831 info->control.rates[0].idx = rs_sta->last_txrate_idx;
832 }
833
834 IWL_DEBUG_RATE(priv, "leave: %d\n", index);
835}
836
837#ifdef CONFIG_MAC80211_DEBUGFS
838static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
839{
840 file->private_data = inode->i_private;
841 return 0;
842}
843
844static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
845 char __user *user_buf,
846 size_t count, loff_t *ppos)
847{
848 char *buff;
849 int desc = 0;
850 int j;
851 ssize_t ret;
852 struct iwl3945_rs_sta *lq_sta = file->private_data;
853
854 buff = kmalloc(1024, GFP_KERNEL);
855 if (!buff)
856 return -ENOMEM;
857
858 desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
859 "rate=0x%X flush time %d\n",
860 lq_sta->tx_packets,
861 lq_sta->last_txrate_idx,
862 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
863 for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
864 desc += sprintf(buff+desc,
865 "counter=%d success=%d %%=%d\n",
866 lq_sta->win[j].counter,
867 lq_sta->win[j].success_counter,
868 lq_sta->win[j].success_ratio);
869 }
870 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
871 kfree(buff);
872 return ret;
873}
874
875static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
876 .read = iwl3945_sta_dbgfs_stats_table_read,
877 .open = iwl3945_open_file_generic,
878 .llseek = default_llseek,
879};
880
881static void iwl3945_add_debugfs(void *priv, void *priv_sta,
882 struct dentry *dir)
883{
884 struct iwl3945_rs_sta *lq_sta = priv_sta;
885
886 lq_sta->rs_sta_dbgfs_stats_table_file =
887 debugfs_create_file("rate_stats_table", 0600, dir,
888 lq_sta, &rs_sta_dbgfs_stats_table_ops);
889
890}
891
892static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
893{
894 struct iwl3945_rs_sta *lq_sta = priv_sta;
895 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
896}
897#endif
898
899/*
900 * Initialization of rate scaling information is done by driver after
901 * the station is added. Since mac80211 calls this function before a
902 * station is added we ignore it.
903 */
904static void iwl3945_rs_rate_init_stub(void *priv_r,
905 struct ieee80211_supported_band *sband,
906 struct ieee80211_sta *sta, void *priv_sta)
907{
908}
909
910static struct rate_control_ops rs_ops = {
911 .module = NULL,
912 .name = RS_NAME,
913 .tx_status = iwl3945_rs_tx_status,
914 .get_rate = iwl3945_rs_get_rate,
915 .rate_init = iwl3945_rs_rate_init_stub,
916 .alloc = iwl3945_rs_alloc,
917 .free = iwl3945_rs_free,
918 .alloc_sta = iwl3945_rs_alloc_sta,
919 .free_sta = iwl3945_rs_free_sta,
920#ifdef CONFIG_MAC80211_DEBUGFS
921 .add_sta_debugfs = iwl3945_add_debugfs,
922 .remove_sta_debugfs = iwl3945_remove_debugfs,
923#endif
924
925};
926void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
927{
928 struct iwl_priv *priv = hw->priv;
929 s32 rssi = 0;
930 unsigned long flags;
931 struct iwl3945_rs_sta *rs_sta;
932 struct ieee80211_sta *sta;
933 struct iwl3945_sta_priv *psta;
934
935 IWL_DEBUG_RATE(priv, "enter\n");
936
937 rcu_read_lock();
938
939 sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
940 priv->stations[sta_id].sta.sta.addr);
941 if (!sta) {
942 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
943 rcu_read_unlock();
944 return;
945 }
946
947 psta = (void *) sta->drv_priv;
948 rs_sta = &psta->rs_sta;
949
950 spin_lock_irqsave(&rs_sta->lock, flags);
951
952 rs_sta->tgg = 0;
953 switch (priv->band) {
954 case IEEE80211_BAND_2GHZ:
955 /* TODO: this always does G, not a regression */
956 if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
957 RXON_FLG_TGG_PROTECT_MSK) {
958 rs_sta->tgg = 1;
959 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
960 } else
961 rs_sta->expected_tpt = iwl3945_expected_tpt_g;
962 break;
963
964 case IEEE80211_BAND_5GHZ:
965 rs_sta->expected_tpt = iwl3945_expected_tpt_a;
966 break;
967 case IEEE80211_NUM_BANDS:
968 BUG();
969 break;
970 }
971
972 spin_unlock_irqrestore(&rs_sta->lock, flags);
973
974 rssi = priv->_3945.last_rx_rssi;
975 if (rssi == 0)
976 rssi = IWL_MIN_RSSI_VAL;
977
978 IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
979
980 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
981
982 IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
983 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
984 iwl3945_rates[rs_sta->start_rate].plcp);
985 rcu_read_unlock();
986}
987
988int iwl3945_rate_control_register(void)
989{
990 return ieee80211_rate_control_register(&rs_ops);
991}
992
993void iwl3945_rate_control_unregister(void)
994{
995 ieee80211_rate_control_unregister(&rs_ops);
996}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a7438476..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/sched.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/firmware.h>
38#include <linux/etherdevice.h>
39#include <asm/unaligned.h>
40#include <net/mac80211.h>
41
42#include "iwl-fh.h"
43#include "iwl-3945-fh.h"
44#include "iwl-commands.h"
45#include "iwl-sta.h"
46#include "iwl-3945.h"
47#include "iwl-eeprom.h"
48#include "iwl-core.h"
49#include "iwl-helpers.h"
50#include "iwl-led.h"
51#include "iwl-3945-led.h"
52#include "iwl-3945-debugfs.h"
53
54#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
55 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
56 IWL_RATE_##r##M_IEEE, \
57 IWL_RATE_##ip##M_INDEX, \
58 IWL_RATE_##in##M_INDEX, \
59 IWL_RATE_##rp##M_INDEX, \
60 IWL_RATE_##rn##M_INDEX, \
61 IWL_RATE_##pp##M_INDEX, \
62 IWL_RATE_##np##M_INDEX, \
63 IWL_RATE_##r##M_INDEX_TABLE, \
64 IWL_RATE_##ip##M_INDEX_TABLE }
65
66/*
67 * Parameter order:
68 * rate, prev rate, next rate, prev tgg rate, next tgg rate
69 *
70 * If there isn't a valid next or previous rate then INV is used which
71 * maps to IWL_RATE_INVALID
72 *
73 */
74const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
75 IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
76 IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
77 IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
78 IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
79 IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
80 IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
81 IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
82 IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
83 IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
84 IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
85 IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
86 IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
87};
88
89static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
90{
91 u8 rate = iwl3945_rates[rate_index].prev_ieee;
92
93 if (rate == IWL_RATE_INVALID)
94 rate = rate_index;
95 return rate;
96}
97
98/* 1 = enable the iwl3945_disable_events() function */
99#define IWL_EVT_DISABLE (0)
100#define IWL_EVT_DISABLE_SIZE (1532/32)
101
102/**
103 * iwl3945_disable_events - Disable selected events in uCode event log
104 *
105 * Disable an event by writing "1"s into "disable"
106 * bitmap in SRAM. Bit position corresponds to Event # (id/type).
107 * Default values of 0 enable uCode events to be logged.
108 * Use for only special debugging. This function is just a placeholder as-is,
109 * you'll need to provide the special bits! ...
110 * ... and set IWL_EVT_DISABLE to 1. */
111void iwl3945_disable_events(struct iwl_priv *priv)
112{
113 int i;
114 u32 base; /* SRAM address of event log header */
115 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
116 u32 array_size; /* # of u32 entries in array */
117 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
118 0x00000000, /* 31 - 0 Event id numbers */
119 0x00000000, /* 63 - 32 */
120 0x00000000, /* 95 - 64 */
121 0x00000000, /* 127 - 96 */
122 0x00000000, /* 159 - 128 */
123 0x00000000, /* 191 - 160 */
124 0x00000000, /* 223 - 192 */
125 0x00000000, /* 255 - 224 */
126 0x00000000, /* 287 - 256 */
127 0x00000000, /* 319 - 288 */
128 0x00000000, /* 351 - 320 */
129 0x00000000, /* 383 - 352 */
130 0x00000000, /* 415 - 384 */
131 0x00000000, /* 447 - 416 */
132 0x00000000, /* 479 - 448 */
133 0x00000000, /* 511 - 480 */
134 0x00000000, /* 543 - 512 */
135 0x00000000, /* 575 - 544 */
136 0x00000000, /* 607 - 576 */
137 0x00000000, /* 639 - 608 */
138 0x00000000, /* 671 - 640 */
139 0x00000000, /* 703 - 672 */
140 0x00000000, /* 735 - 704 */
141 0x00000000, /* 767 - 736 */
142 0x00000000, /* 799 - 768 */
143 0x00000000, /* 831 - 800 */
144 0x00000000, /* 863 - 832 */
145 0x00000000, /* 895 - 864 */
146 0x00000000, /* 927 - 896 */
147 0x00000000, /* 959 - 928 */
148 0x00000000, /* 991 - 960 */
149 0x00000000, /* 1023 - 992 */
150 0x00000000, /* 1055 - 1024 */
151 0x00000000, /* 1087 - 1056 */
152 0x00000000, /* 1119 - 1088 */
153 0x00000000, /* 1151 - 1120 */
154 0x00000000, /* 1183 - 1152 */
155 0x00000000, /* 1215 - 1184 */
156 0x00000000, /* 1247 - 1216 */
157 0x00000000, /* 1279 - 1248 */
158 0x00000000, /* 1311 - 1280 */
159 0x00000000, /* 1343 - 1312 */
160 0x00000000, /* 1375 - 1344 */
161 0x00000000, /* 1407 - 1376 */
162 0x00000000, /* 1439 - 1408 */
163 0x00000000, /* 1471 - 1440 */
164 0x00000000, /* 1503 - 1472 */
165 };
166
167 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
168 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
169 IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
170 return;
171 }
172
173 disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
174 array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
175
176 if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
177 IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
178 disable_ptr);
179 for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
180 iwl_legacy_write_targ_mem(priv,
181 disable_ptr + (i * sizeof(u32)),
182 evt_disable[i]);
183
184 } else {
185 IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
186 IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
187 IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
188 disable_ptr, array_size);
189 }
190
191}
192
193static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
194{
195 int idx;
196
197 for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
198 if (iwl3945_rates[idx].plcp == plcp)
199 return idx;
200 return -1;
201}
202
203#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
204#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
205
206static const char *iwl3945_get_tx_fail_reason(u32 status)
207{
208 switch (status & TX_STATUS_MSK) {
209 case TX_3945_STATUS_SUCCESS:
210 return "SUCCESS";
211 TX_STATUS_ENTRY(SHORT_LIMIT);
212 TX_STATUS_ENTRY(LONG_LIMIT);
213 TX_STATUS_ENTRY(FIFO_UNDERRUN);
214 TX_STATUS_ENTRY(MGMNT_ABORT);
215 TX_STATUS_ENTRY(NEXT_FRAG);
216 TX_STATUS_ENTRY(LIFE_EXPIRE);
217 TX_STATUS_ENTRY(DEST_PS);
218 TX_STATUS_ENTRY(ABORTED);
219 TX_STATUS_ENTRY(BT_RETRY);
220 TX_STATUS_ENTRY(STA_INVALID);
221 TX_STATUS_ENTRY(FRAG_DROPPED);
222 TX_STATUS_ENTRY(TID_DISABLE);
223 TX_STATUS_ENTRY(FRAME_FLUSHED);
224 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
225 TX_STATUS_ENTRY(TX_LOCKED);
226 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
227 }
228
229 return "UNKNOWN";
230}
231#else
232static inline const char *iwl3945_get_tx_fail_reason(u32 status)
233{
234 return "";
235}
236#endif
237
238/*
239 * get ieee prev rate from rate scale table.
240 * for A and B mode we need to overright prev
241 * value
242 */
243int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
244{
245 int next_rate = iwl3945_get_prev_ieee_rate(rate);
246
247 switch (priv->band) {
248 case IEEE80211_BAND_5GHZ:
249 if (rate == IWL_RATE_12M_INDEX)
250 next_rate = IWL_RATE_9M_INDEX;
251 else if (rate == IWL_RATE_6M_INDEX)
252 next_rate = IWL_RATE_6M_INDEX;
253 break;
254 case IEEE80211_BAND_2GHZ:
255 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
256 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
257 if (rate == IWL_RATE_11M_INDEX)
258 next_rate = IWL_RATE_5M_INDEX;
259 }
260 break;
261
262 default:
263 break;
264 }
265
266 return next_rate;
267}
268
269
270/**
271 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
272 *
273 * When FW advances 'R' index, all entries between old and new 'R' index
274 * need to be reclaimed. As result, some free space forms. If there is
275 * enough free space (> low mark), wake the stack that feeds us.
276 */
277static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
278 int txq_id, int index)
279{
280 struct iwl_tx_queue *txq = &priv->txq[txq_id];
281 struct iwl_queue *q = &txq->q;
282 struct iwl_tx_info *tx_info;
283
284 BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
285
286 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
287 q->read_ptr != index;
288 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
289
290 tx_info = &txq->txb[txq->q.read_ptr];
291 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
292 tx_info->skb = NULL;
293 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
294 }
295
296 if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
297 (txq_id != IWL39_CMD_QUEUE_NUM) &&
298 priv->mac80211_registered)
299 iwl_legacy_wake_queue(priv, txq);
300}
301
302/**
303 * iwl3945_rx_reply_tx - Handle Tx response
304 */
305static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
306 struct iwl_rx_mem_buffer *rxb)
307{
308 struct iwl_rx_packet *pkt = rxb_addr(rxb);
309 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
310 int txq_id = SEQ_TO_QUEUE(sequence);
311 int index = SEQ_TO_INDEX(sequence);
312 struct iwl_tx_queue *txq = &priv->txq[txq_id];
313 struct ieee80211_tx_info *info;
314 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
315 u32 status = le32_to_cpu(tx_resp->status);
316 int rate_idx;
317 int fail;
318
319 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
320 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
321 "is out of range [0-%d] %d %d\n", txq_id,
322 index, txq->q.n_bd, txq->q.write_ptr,
323 txq->q.read_ptr);
324 return;
325 }
326
327 txq->time_stamp = jiffies;
328 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
329 ieee80211_tx_info_clear_status(info);
330
331 /* Fill the MRR chain with some info about on-chip retransmissions */
332 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
333 if (info->band == IEEE80211_BAND_5GHZ)
334 rate_idx -= IWL_FIRST_OFDM_RATE;
335
336 fail = tx_resp->failure_frame;
337
338 info->status.rates[0].idx = rate_idx;
339 info->status.rates[0].count = fail + 1; /* add final attempt */
340
341 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
342 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
343 IEEE80211_TX_STAT_ACK : 0;
344
345 IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
346 txq_id, iwl3945_get_tx_fail_reason(status), status,
347 tx_resp->rate, tx_resp->failure_frame);
348
349 IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
350 iwl3945_tx_queue_reclaim(priv, txq_id, index);
351
352 if (status & TX_ABORT_REQUIRED_MSK)
353 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
354}
355
356
357
358/*****************************************************************************
359 *
360 * Intel PRO/Wireless 3945ABG/BG Network Connection
361 *
362 * RX handler implementations
363 *
364 *****************************************************************************/
365#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
366static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
367 __le32 *stats)
368{
369 int i;
370 __le32 *prev_stats;
371 u32 *accum_stats;
372 u32 *delta, *max_delta;
373
374 prev_stats = (__le32 *)&priv->_3945.statistics;
375 accum_stats = (u32 *)&priv->_3945.accum_statistics;
376 delta = (u32 *)&priv->_3945.delta_statistics;
377 max_delta = (u32 *)&priv->_3945.max_delta;
378
379 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
380 i += sizeof(__le32), stats++, prev_stats++, delta++,
381 max_delta++, accum_stats++) {
382 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
383 *delta = (le32_to_cpu(*stats) -
384 le32_to_cpu(*prev_stats));
385 *accum_stats += *delta;
386 if (*delta > *max_delta)
387 *max_delta = *delta;
388 }
389 }
390
391 /* reset accumulative statistics for "no-counter" type statistics */
392 priv->_3945.accum_statistics.general.temperature =
393 priv->_3945.statistics.general.temperature;
394 priv->_3945.accum_statistics.general.ttl_timestamp =
395 priv->_3945.statistics.general.ttl_timestamp;
396}
397#endif
398
399void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
400 struct iwl_rx_mem_buffer *rxb)
401{
402 struct iwl_rx_packet *pkt = rxb_addr(rxb);
403
404 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
405 (int)sizeof(struct iwl3945_notif_statistics),
406 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
407#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
408 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
409#endif
410
411 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
412}
413
414void iwl3945_reply_statistics(struct iwl_priv *priv,
415 struct iwl_rx_mem_buffer *rxb)
416{
417 struct iwl_rx_packet *pkt = rxb_addr(rxb);
418 __le32 *flag = (__le32 *)&pkt->u.raw;
419
420 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
421#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
422 memset(&priv->_3945.accum_statistics, 0,
423 sizeof(struct iwl3945_notif_statistics));
424 memset(&priv->_3945.delta_statistics, 0,
425 sizeof(struct iwl3945_notif_statistics));
426 memset(&priv->_3945.max_delta, 0,
427 sizeof(struct iwl3945_notif_statistics));
428#endif
429 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
430 }
431 iwl3945_hw_rx_statistics(priv, rxb);
432}
433
434
435/******************************************************************************
436 *
437 * Misc. internal state and helper functions
438 *
439 ******************************************************************************/
440
441/* This is necessary only for a number of statistics, see the caller. */
442static int iwl3945_is_network_packet(struct iwl_priv *priv,
443 struct ieee80211_hdr *header)
444{
445 /* Filter incoming packets to determine if they are targeted toward
446 * this network, discarding packets coming from ourselves */
447 switch (priv->iw_mode) {
448 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
449 /* packets to our IBSS update information */
450 return !compare_ether_addr(header->addr3, priv->bssid);
451 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
452 /* packets to our IBSS update information */
453 return !compare_ether_addr(header->addr2, priv->bssid);
454 default:
455 return 1;
456 }
457}
458
459static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
460 struct iwl_rx_mem_buffer *rxb,
461 struct ieee80211_rx_status *stats)
462{
463 struct iwl_rx_packet *pkt = rxb_addr(rxb);
464 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
465 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
466 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
467 u16 len = le16_to_cpu(rx_hdr->len);
468 struct sk_buff *skb;
469 __le16 fc = hdr->frame_control;
470
471 /* We received data from the HW, so stop the watchdog */
472 if (unlikely(len + IWL39_RX_FRAME_SIZE >
473 PAGE_SIZE << priv->hw_params.rx_page_order)) {
474 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
475 return;
476 }
477
478 /* We only process data packets if the interface is open */
479 if (unlikely(!priv->is_open)) {
480 IWL_DEBUG_DROP_LIMIT(priv,
481 "Dropping packet while interface is not open.\n");
482 return;
483 }
484
485 skb = dev_alloc_skb(128);
486 if (!skb) {
487 IWL_ERR(priv, "dev_alloc_skb failed\n");
488 return;
489 }
490
491 if (!iwl3945_mod_params.sw_crypto)
492 iwl_legacy_set_decrypted_flag(priv,
493 (struct ieee80211_hdr *)rxb_addr(rxb),
494 le32_to_cpu(rx_end->status), stats);
495
496 skb_add_rx_frag(skb, 0, rxb->page,
497 (void *)rx_hdr->payload - (void *)pkt, len);
498
499 iwl_legacy_update_stats(priv, false, fc, len);
500 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
501
502 ieee80211_rx(priv->hw, skb);
503 priv->alloc_rxb_page--;
504 rxb->page = NULL;
505}
506
507#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
508
509static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
510 struct iwl_rx_mem_buffer *rxb)
511{
512 struct ieee80211_hdr *header;
513 struct ieee80211_rx_status rx_status;
514 struct iwl_rx_packet *pkt = rxb_addr(rxb);
515 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
516 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
517 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
518 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
519 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
520 u8 network_packet;
521
522 rx_status.flag = 0;
523 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
524 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
525 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
526 rx_status.freq =
527 ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
528 rx_status.band);
529
530 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
531 if (rx_status.band == IEEE80211_BAND_5GHZ)
532 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
533
534 rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
535 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
536
537 /* set the preamble flag if appropriate */
538 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
539 rx_status.flag |= RX_FLAG_SHORTPRE;
540
541 if ((unlikely(rx_stats->phy_count > 20))) {
542 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
543 rx_stats->phy_count);
544 return;
545 }
546
547 if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
548 || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
549 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
550 return;
551 }
552
553
554
555 /* Convert 3945's rssi indicator to dBm */
556 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
557
558 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
559 rx_status.signal, rx_stats_sig_avg,
560 rx_stats_noise_diff);
561
562 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
563
564 network_packet = iwl3945_is_network_packet(priv, header);
565
566 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
567 network_packet ? '*' : ' ',
568 le16_to_cpu(rx_hdr->channel),
569 rx_status.signal, rx_status.signal,
570 rx_status.rate_idx);
571
572 iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
573 header);
574
575 if (network_packet) {
576 priv->_3945.last_beacon_time =
577 le32_to_cpu(rx_end->beacon_timestamp);
578 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
579 priv->_3945.last_rx_rssi = rx_status.signal;
580 }
581
582 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
583}
584
585int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
586 struct iwl_tx_queue *txq,
587 dma_addr_t addr, u16 len, u8 reset, u8 pad)
588{
589 int count;
590 struct iwl_queue *q;
591 struct iwl3945_tfd *tfd, *tfd_tmp;
592
593 q = &txq->q;
594 tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
595 tfd = &tfd_tmp[q->write_ptr];
596
597 if (reset)
598 memset(tfd, 0, sizeof(*tfd));
599
600 count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
601
602 if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
603 IWL_ERR(priv, "Error can not send more than %d chunks\n",
604 NUM_TFD_CHUNKS);
605 return -EINVAL;
606 }
607
608 tfd->tbs[count].addr = cpu_to_le32(addr);
609 tfd->tbs[count].len = cpu_to_le32(len);
610
611 count++;
612
613 tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
614 TFD_CTL_PAD_SET(pad));
615
616 return 0;
617}
618
619/**
620 * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
621 *
622 * Does NOT advance any indexes
623 */
624void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
625{
626 struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
627 int index = txq->q.read_ptr;
628 struct iwl3945_tfd *tfd = &tfd_tmp[index];
629 struct pci_dev *dev = priv->pci_dev;
630 int i;
631 int counter;
632
633 /* sanity check */
634 counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
635 if (counter > NUM_TFD_CHUNKS) {
636 IWL_ERR(priv, "Too many chunks: %i\n", counter);
637 /* @todo issue fatal error, it is quite serious situation */
638 return;
639 }
640
641 /* Unmap tx_cmd */
642 if (counter)
643 pci_unmap_single(dev,
644 dma_unmap_addr(&txq->meta[index], mapping),
645 dma_unmap_len(&txq->meta[index], len),
646 PCI_DMA_TODEVICE);
647
648 /* unmap chunks if any */
649
650 for (i = 1; i < counter; i++)
651 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
652 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
653
654 /* free SKB */
655 if (txq->txb) {
656 struct sk_buff *skb;
657
658 skb = txq->txb[txq->q.read_ptr].skb;
659
660 /* can be called from irqs-disabled context */
661 if (skb) {
662 dev_kfree_skb_any(skb);
663 txq->txb[txq->q.read_ptr].skb = NULL;
664 }
665 }
666}
667
668/**
669 * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
670 *
671*/
672void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
673 struct iwl_device_cmd *cmd,
674 struct ieee80211_tx_info *info,
675 struct ieee80211_hdr *hdr,
676 int sta_id, int tx_id)
677{
678 u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
679 u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
680 u16 rate_mask;
681 int rate;
682 u8 rts_retry_limit;
683 u8 data_retry_limit;
684 __le32 tx_flags;
685 __le16 fc = hdr->frame_control;
686 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
687
688 rate = iwl3945_rates[rate_index].plcp;
689 tx_flags = tx_cmd->tx_flags;
690
691 /* We need to figure out how to get the sta->supp_rates while
692 * in this running context */
693 rate_mask = IWL_RATES_MASK_3945;
694
695 /* Set retry limit on DATA packets and Probe Responses*/
696 if (ieee80211_is_probe_resp(fc))
697 data_retry_limit = 3;
698 else
699 data_retry_limit = IWL_DEFAULT_TX_RETRY;
700 tx_cmd->data_retry_limit = data_retry_limit;
701
702 if (tx_id >= IWL39_CMD_QUEUE_NUM)
703 rts_retry_limit = 3;
704 else
705 rts_retry_limit = 7;
706
707 if (data_retry_limit < rts_retry_limit)
708 rts_retry_limit = data_retry_limit;
709 tx_cmd->rts_retry_limit = rts_retry_limit;
710
711 tx_cmd->rate = rate;
712 tx_cmd->tx_flags = tx_flags;
713
714 /* OFDM */
715 tx_cmd->supp_rates[0] =
716 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
717
718 /* CCK */
719 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
720
721 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
722 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
723 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
724 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
725}
726
727static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
728{
729 unsigned long flags_spin;
730 struct iwl_station_entry *station;
731
732 if (sta_id == IWL_INVALID_STATION)
733 return IWL_INVALID_STATION;
734
735 spin_lock_irqsave(&priv->sta_lock, flags_spin);
736 station = &priv->stations[sta_id];
737
738 station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
739 station->sta.rate_n_flags = cpu_to_le16(tx_rate);
740 station->sta.mode = STA_CONTROL_MODIFY_MSK;
741 iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
742 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
743
744 IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
745 sta_id, tx_rate);
746 return sta_id;
747}
748
749static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
750{
751/*
752 * (for documentation purposes)
753 * to set power to V_AUX, do
754
755 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
756 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
757 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
758 ~APMG_PS_CTRL_MSK_PWR_SRC);
759
760 iwl_poll_bit(priv, CSR_GPIO_IN,
761 CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
762 CSR_GPIO_IN_BIT_AUX_POWER, 5000);
763 }
764 */
765
766 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
767 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
768 ~APMG_PS_CTRL_MSK_PWR_SRC);
769
770 iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
771 CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
772}
773
774static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
775{
776 iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
777 iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
778 rxq->rb_stts_dma);
779 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
780 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
781 FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
782 FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
783 FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
784 FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
785 (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
786 FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
787 (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
788 FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
789
790 /* fake read to flush all prev I/O */
791 iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
792
793 return 0;
794}
795
796static int iwl3945_tx_reset(struct iwl_priv *priv)
797{
798
799 /* bypass mode */
800 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
801
802 /* RA 0 is active */
803 iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
804
805 /* all 6 fifo are active */
806 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
807
808 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
809 iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
810 iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
811 iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
812
813 iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
814 priv->_3945.shared_phys);
815
816 iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
817 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
818 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
819 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
820 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
821 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
822 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
823 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
824
825
826 return 0;
827}
828
829/**
830 * iwl3945_txq_ctx_reset - Reset TX queue context
831 *
832 * Destroys all DMA structures and initialize them again
833 */
834static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
835{
836 int rc;
837 int txq_id, slots_num;
838
839 iwl3945_hw_txq_ctx_free(priv);
840
841 /* allocate tx queue structure */
842 rc = iwl_legacy_alloc_txq_mem(priv);
843 if (rc)
844 return rc;
845
846 /* Tx CMD queue */
847 rc = iwl3945_tx_reset(priv);
848 if (rc)
849 goto error;
850
851 /* Tx queue(s) */
852 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
853 slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
854 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
855 rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
856 slots_num, txq_id);
857 if (rc) {
858 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
859 goto error;
860 }
861 }
862
863 return rc;
864
865 error:
866 iwl3945_hw_txq_ctx_free(priv);
867 return rc;
868}
869
870
871/*
872 * Start up 3945's basic functionality after it has been reset
873 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
874 * NOTE: This does not load uCode nor start the embedded processor
875 */
876static int iwl3945_apm_init(struct iwl_priv *priv)
877{
878 int ret = iwl_legacy_apm_init(priv);
879
880 /* Clear APMG (NIC's internal power management) interrupts */
881 iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
882 iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
883
884 /* Reset radio chip */
885 iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
886 APMG_PS_CTRL_VAL_RESET_REQ);
887 udelay(5);
888 iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
889 APMG_PS_CTRL_VAL_RESET_REQ);
890
891 return ret;
892}
893
894static void iwl3945_nic_config(struct iwl_priv *priv)
895{
896 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
897 unsigned long flags;
898 u8 rev_id = priv->pci_dev->revision;
899
900 spin_lock_irqsave(&priv->lock, flags);
901
902 /* Determine HW type */
903 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
904
905 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
906 IWL_DEBUG_INFO(priv, "RTP type\n");
907 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
908 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
909 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
910 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
911 } else {
912 IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
913 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
914 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
915 }
916
917 if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
918 IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
919 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
920 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
921 } else
922 IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
923
924 if ((eeprom->board_revision & 0xF0) == 0xD0) {
925 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
926 eeprom->board_revision);
927 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
928 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
929 } else {
930 IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
931 eeprom->board_revision);
932 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
933 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
934 }
935
936 if (eeprom->almgor_m_version <= 1) {
937 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
938 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
939 IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
940 eeprom->almgor_m_version);
941 } else {
942 IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
943 eeprom->almgor_m_version);
944 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
945 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
946 }
947 spin_unlock_irqrestore(&priv->lock, flags);
948
949 if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
950 IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
951
952 if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
953 IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
954}
955
956int iwl3945_hw_nic_init(struct iwl_priv *priv)
957{
958 int rc;
959 unsigned long flags;
960 struct iwl_rx_queue *rxq = &priv->rxq;
961
962 spin_lock_irqsave(&priv->lock, flags);
963 priv->cfg->ops->lib->apm_ops.init(priv);
964 spin_unlock_irqrestore(&priv->lock, flags);
965
966 iwl3945_set_pwr_vmain(priv);
967
968 priv->cfg->ops->lib->apm_ops.config(priv);
969
970 /* Allocate the RX queue, or reset if it is already allocated */
971 if (!rxq->bd) {
972 rc = iwl_legacy_rx_queue_alloc(priv);
973 if (rc) {
974 IWL_ERR(priv, "Unable to initialize Rx queue\n");
975 return -ENOMEM;
976 }
977 } else
978 iwl3945_rx_queue_reset(priv, rxq);
979
980 iwl3945_rx_replenish(priv);
981
982 iwl3945_rx_init(priv, rxq);
983
984
985 /* Look at using this instead:
986 rxq->need_update = 1;
987 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
988 */
989
990 iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
991
992 rc = iwl3945_txq_ctx_reset(priv);
993 if (rc)
994 return rc;
995
996 set_bit(STATUS_INIT, &priv->status);
997
998 return 0;
999}
1000
1001/**
1002 * iwl3945_hw_txq_ctx_free - Free TXQ Context
1003 *
1004 * Destroy all TX DMA queues and structures
1005 */
1006void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1007{
1008 int txq_id;
1009
1010 /* Tx queues */
1011 if (priv->txq)
1012 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1013 txq_id++)
1014 if (txq_id == IWL39_CMD_QUEUE_NUM)
1015 iwl_legacy_cmd_queue_free(priv);
1016 else
1017 iwl_legacy_tx_queue_free(priv, txq_id);
1018
1019 /* free tx queue structure */
1020 iwl_legacy_txq_mem(priv);
1021}
1022
1023void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1024{
1025 int txq_id;
1026
1027 /* stop SCD */
1028 iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
1029 iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1030
1031 /* reset TFD queues */
1032 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
1033 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
1034 iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
1035 FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
1036 1000);
1037 }
1038
1039 iwl3945_hw_txq_ctx_free(priv);
1040}
1041
1042/**
1043 * iwl3945_hw_reg_adjust_power_by_temp
1044 * return index delta into power gain settings table
1045*/
1046static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
1047{
1048 return (new_reading - old_reading) * (-11) / 100;
1049}
1050
1051/**
1052 * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
1053 */
1054static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
1055{
1056 return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
1057}
1058
1059int iwl3945_hw_get_temperature(struct iwl_priv *priv)
1060{
1061 return iwl_read32(priv, CSR_UCODE_DRV_GP2);
1062}
1063
1064/**
1065 * iwl3945_hw_reg_txpower_get_temperature
1066 * get the current temperature by reading from NIC
1067*/
1068static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
1069{
1070 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1071 int temperature;
1072
1073 temperature = iwl3945_hw_get_temperature(priv);
1074
1075 /* driver's okay range is -260 to +25.
1076 * human readable okay range is 0 to +285 */
1077 IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
1078
1079 /* handle insane temp reading */
1080 if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
1081 IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
1082
1083 /* if really really hot(?),
1084 * substitute the 3rd band/group's temp measured at factory */
1085 if (priv->last_temperature > 100)
1086 temperature = eeprom->groups[2].temperature;
1087 else /* else use most recent "sane" value from driver */
1088 temperature = priv->last_temperature;
1089 }
1090
1091 return temperature; /* raw, not "human readable" */
1092}
1093
1094/* Adjust Txpower only if temperature variance is greater than threshold.
1095 *
1096 * Both are lower than older versions' 9 degrees */
1097#define IWL_TEMPERATURE_LIMIT_TIMER 6
1098
1099/**
1100 * iwl3945_is_temp_calib_needed - determines if new calibration is needed
1101 *
1102 * records new temperature in tx_mgr->temperature.
1103 * replaces tx_mgr->last_temperature *only* if calib needed
1104 * (assumes caller will actually do the calibration!). */
1105static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
1106{
1107 int temp_diff;
1108
1109 priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
1110 temp_diff = priv->temperature - priv->last_temperature;
1111
1112 /* get absolute value */
1113 if (temp_diff < 0) {
1114 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
1115 temp_diff = -temp_diff;
1116 } else if (temp_diff == 0)
1117 IWL_DEBUG_POWER(priv, "Same temp,\n");
1118 else
1119 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
1120
1121 /* if we don't need calibration, *don't* update last_temperature */
1122 if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
1123 IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
1124 return 0;
1125 }
1126
1127 IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
1128
1129 /* assume that caller will actually do calib ...
1130 * update the "last temperature" value */
1131 priv->last_temperature = priv->temperature;
1132 return 1;
1133}
1134
1135#define IWL_MAX_GAIN_ENTRIES 78
1136#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
1137#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
1138
1139/* radio and DSP power table, each step is 1/2 dB.
1140 * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
1141static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
1142 {
1143 {251, 127}, /* 2.4 GHz, highest power */
1144 {251, 127},
1145 {251, 127},
1146 {251, 127},
1147 {251, 125},
1148 {251, 110},
1149 {251, 105},
1150 {251, 98},
1151 {187, 125},
1152 {187, 115},
1153 {187, 108},
1154 {187, 99},
1155 {243, 119},
1156 {243, 111},
1157 {243, 105},
1158 {243, 97},
1159 {243, 92},
1160 {211, 106},
1161 {211, 100},
1162 {179, 120},
1163 {179, 113},
1164 {179, 107},
1165 {147, 125},
1166 {147, 119},
1167 {147, 112},
1168 {147, 106},
1169 {147, 101},
1170 {147, 97},
1171 {147, 91},
1172 {115, 107},
1173 {235, 121},
1174 {235, 115},
1175 {235, 109},
1176 {203, 127},
1177 {203, 121},
1178 {203, 115},
1179 {203, 108},
1180 {203, 102},
1181 {203, 96},
1182 {203, 92},
1183 {171, 110},
1184 {171, 104},
1185 {171, 98},
1186 {139, 116},
1187 {227, 125},
1188 {227, 119},
1189 {227, 113},
1190 {227, 107},
1191 {227, 101},
1192 {227, 96},
1193 {195, 113},
1194 {195, 106},
1195 {195, 102},
1196 {195, 95},
1197 {163, 113},
1198 {163, 106},
1199 {163, 102},
1200 {163, 95},
1201 {131, 113},
1202 {131, 106},
1203 {131, 102},
1204 {131, 95},
1205 {99, 113},
1206 {99, 106},
1207 {99, 102},
1208 {99, 95},
1209 {67, 113},
1210 {67, 106},
1211 {67, 102},
1212 {67, 95},
1213 {35, 113},
1214 {35, 106},
1215 {35, 102},
1216 {35, 95},
1217 {3, 113},
1218 {3, 106},
1219 {3, 102},
1220 {3, 95} }, /* 2.4 GHz, lowest power */
1221 {
1222 {251, 127}, /* 5.x GHz, highest power */
1223 {251, 120},
1224 {251, 114},
1225 {219, 119},
1226 {219, 101},
1227 {187, 113},
1228 {187, 102},
1229 {155, 114},
1230 {155, 103},
1231 {123, 117},
1232 {123, 107},
1233 {123, 99},
1234 {123, 92},
1235 {91, 108},
1236 {59, 125},
1237 {59, 118},
1238 {59, 109},
1239 {59, 102},
1240 {59, 96},
1241 {59, 90},
1242 {27, 104},
1243 {27, 98},
1244 {27, 92},
1245 {115, 118},
1246 {115, 111},
1247 {115, 104},
1248 {83, 126},
1249 {83, 121},
1250 {83, 113},
1251 {83, 105},
1252 {83, 99},
1253 {51, 118},
1254 {51, 111},
1255 {51, 104},
1256 {51, 98},
1257 {19, 116},
1258 {19, 109},
1259 {19, 102},
1260 {19, 98},
1261 {19, 93},
1262 {171, 113},
1263 {171, 107},
1264 {171, 99},
1265 {139, 120},
1266 {139, 113},
1267 {139, 107},
1268 {139, 99},
1269 {107, 120},
1270 {107, 113},
1271 {107, 107},
1272 {107, 99},
1273 {75, 120},
1274 {75, 113},
1275 {75, 107},
1276 {75, 99},
1277 {43, 120},
1278 {43, 113},
1279 {43, 107},
1280 {43, 99},
1281 {11, 120},
1282 {11, 113},
1283 {11, 107},
1284 {11, 99},
1285 {131, 107},
1286 {131, 99},
1287 {99, 120},
1288 {99, 113},
1289 {99, 107},
1290 {99, 99},
1291 {67, 120},
1292 {67, 113},
1293 {67, 107},
1294 {67, 99},
1295 {35, 120},
1296 {35, 113},
1297 {35, 107},
1298 {35, 99},
1299 {3, 120} } /* 5.x GHz, lowest power */
1300};
1301
1302static inline u8 iwl3945_hw_reg_fix_power_index(int index)
1303{
1304 if (index < 0)
1305 return 0;
1306 if (index >= IWL_MAX_GAIN_ENTRIES)
1307 return IWL_MAX_GAIN_ENTRIES - 1;
1308 return (u8) index;
1309}
1310
1311/* Kick off thermal recalibration check every 60 seconds */
1312#define REG_RECALIB_PERIOD (60)
1313
1314/**
1315 * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
1316 *
1317 * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
1318 * or 6 Mbit (OFDM) rates.
1319 */
1320static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
1321 s32 rate_index, const s8 *clip_pwrs,
1322 struct iwl_channel_info *ch_info,
1323 int band_index)
1324{
1325 struct iwl3945_scan_power_info *scan_power_info;
1326 s8 power;
1327 u8 power_index;
1328
1329 scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
1330
1331 /* use this channel group's 6Mbit clipping/saturation pwr,
1332 * but cap at regulatory scan power restriction (set during init
1333 * based on eeprom channel data) for this channel. */
1334 power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
1335
1336 power = min(power, priv->tx_power_user_lmt);
1337 scan_power_info->requested_power = power;
1338
1339 /* find difference between new scan *power* and current "normal"
1340 * Tx *power* for 6Mb. Use this difference (x2) to adjust the
1341 * current "normal" temperature-compensated Tx power *index* for
1342 * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
1343 * *index*. */
1344 power_index = ch_info->power_info[rate_index].power_table_index
1345 - (power - ch_info->power_info
1346 [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
1347
1348 /* store reference index that we use when adjusting *all* scan
1349 * powers. So we can accommodate user (all channel) or spectrum
1350 * management (single channel) power changes "between" temperature
1351 * feedback compensation procedures.
1352 * don't force fit this reference index into gain table; it may be a
1353 * negative number. This will help avoid errors when we're at
1354 * the lower bounds (highest gains, for warmest temperatures)
1355 * of the table. */
1356
1357 /* don't exceed table bounds for "real" setting */
1358 power_index = iwl3945_hw_reg_fix_power_index(power_index);
1359
1360 scan_power_info->power_table_index = power_index;
1361 scan_power_info->tpc.tx_gain =
1362 power_gain_table[band_index][power_index].tx_gain;
1363 scan_power_info->tpc.dsp_atten =
1364 power_gain_table[band_index][power_index].dsp_atten;
1365}
1366
1367/**
1368 * iwl3945_send_tx_power - fill in Tx Power command with gain settings
1369 *
1370 * Configures power settings for all rates for the current channel,
1371 * using values from channel info struct, and send to NIC
1372 */
1373static int iwl3945_send_tx_power(struct iwl_priv *priv)
1374{
1375 int rate_idx, i;
1376 const struct iwl_channel_info *ch_info = NULL;
1377 struct iwl3945_txpowertable_cmd txpower = {
1378 .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
1379 };
1380 u16 chan;
1381
1382 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1383 "TX Power requested while scanning!\n"))
1384 return -EAGAIN;
1385
1386 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1387
1388 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1389 ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
1390 if (!ch_info) {
1391 IWL_ERR(priv,
1392 "Failed to get channel info for channel %d [%d]\n",
1393 chan, priv->band);
1394 return -EINVAL;
1395 }
1396
1397 if (!iwl_legacy_is_channel_valid(ch_info)) {
1398 IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
1399 "non-Tx channel.\n");
1400 return 0;
1401 }
1402
1403 /* fill cmd with power settings for all rates for current channel */
1404 /* Fill OFDM rate */
1405 for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
1406 rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
1407
1408 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1409 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1410
1411 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1412 le16_to_cpu(txpower.channel),
1413 txpower.band,
1414 txpower.power[i].tpc.tx_gain,
1415 txpower.power[i].tpc.dsp_atten,
1416 txpower.power[i].rate);
1417 }
1418 /* Fill CCK rates */
1419 for (rate_idx = IWL_FIRST_CCK_RATE;
1420 rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
1421 txpower.power[i].tpc = ch_info->power_info[i].tpc;
1422 txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
1423
1424 IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
1425 le16_to_cpu(txpower.channel),
1426 txpower.band,
1427 txpower.power[i].tpc.tx_gain,
1428 txpower.power[i].tpc.dsp_atten,
1429 txpower.power[i].rate);
1430 }
1431
1432 return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
1433 sizeof(struct iwl3945_txpowertable_cmd),
1434 &txpower);
1435
1436}
1437
1438/**
1439 * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
1440 * @ch_info: Channel to update. Uses power_info.requested_power.
1441 *
1442 * Replace requested_power and base_power_index ch_info fields for
1443 * one channel.
1444 *
1445 * Called if user or spectrum management changes power preferences.
1446 * Takes into account h/w and modulation limitations (clip power).
1447 *
1448 * This does *not* send anything to NIC, just sets up ch_info for one channel.
1449 *
1450 * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
1451 * properly fill out the scan powers, and actual h/w gain settings,
1452 * and send changes to NIC
1453 */
1454static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1455 struct iwl_channel_info *ch_info)
1456{
1457 struct iwl3945_channel_power_info *power_info;
1458 int power_changed = 0;
1459 int i;
1460 const s8 *clip_pwrs;
1461 int power;
1462
1463 /* Get this chnlgrp's rate-to-max/clip-powers table */
1464 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1465
1466 /* Get this channel's rate-to-current-power settings table */
1467 power_info = ch_info->power_info;
1468
1469 /* update OFDM Txpower settings */
1470 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
1471 i++, ++power_info) {
1472 int delta_idx;
1473
1474 /* limit new power to be no more than h/w capability */
1475 power = min(ch_info->curr_txpow, clip_pwrs[i]);
1476 if (power == power_info->requested_power)
1477 continue;
1478
1479 /* find difference between old and new requested powers,
1480 * update base (non-temp-compensated) power index */
1481 delta_idx = (power - power_info->requested_power) * 2;
1482 power_info->base_power_index -= delta_idx;
1483
1484 /* save new requested power value */
1485 power_info->requested_power = power;
1486
1487 power_changed = 1;
1488 }
1489
1490 /* update CCK Txpower settings, based on OFDM 12M setting ...
1491 * ... all CCK power settings for a given channel are the *same*. */
1492 if (power_changed) {
1493 power =
1494 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1495 requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
1496
1497 /* do all CCK rates' iwl3945_channel_power_info structures */
1498 for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
1499 power_info->requested_power = power;
1500 power_info->base_power_index =
1501 ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
1502 base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
1503 ++power_info;
1504 }
1505 }
1506
1507 return 0;
1508}
1509
1510/**
1511 * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
1512 *
1513 * NOTE: Returned power limit may be less (but not more) than requested,
1514 * based strictly on regulatory (eeprom and spectrum mgt) limitations
1515 * (no consideration for h/w clipping limitations).
1516 */
1517static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
1518{
1519 s8 max_power;
1520
1521#if 0
1522 /* if we're using TGd limits, use lower of TGd or EEPROM */
1523 if (ch_info->tgd_data.max_power != 0)
1524 max_power = min(ch_info->tgd_data.max_power,
1525 ch_info->eeprom.max_power_avg);
1526
1527 /* else just use EEPROM limits */
1528 else
1529#endif
1530 max_power = ch_info->eeprom.max_power_avg;
1531
1532 return min(max_power, ch_info->max_power_avg);
1533}
1534
1535/**
1536 * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
1537 *
1538 * Compensate txpower settings of *all* channels for temperature.
1539 * This only accounts for the difference between current temperature
1540 * and the factory calibration temperatures, and bases the new settings
1541 * on the channel's base_power_index.
1542 *
1543 * If RxOn is "associated", this sends the new Txpower to NIC!
1544 */
1545static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1546{
1547 struct iwl_channel_info *ch_info = NULL;
1548 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1549 int delta_index;
1550 const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
1551 u8 a_band;
1552 u8 rate_index;
1553 u8 scan_tbl_index;
1554 u8 i;
1555 int ref_temp;
1556 int temperature = priv->temperature;
1557
1558 if (priv->disable_tx_power_cal ||
1559 test_bit(STATUS_SCANNING, &priv->status)) {
1560 /* do not perform tx power calibration */
1561 return 0;
1562 }
1563 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1564 for (i = 0; i < priv->channel_count; i++) {
1565 ch_info = &priv->channel_info[i];
1566 a_band = iwl_legacy_is_channel_a_band(ch_info);
1567
1568 /* Get this chnlgrp's factory calibration temperature */
1569 ref_temp = (s16)eeprom->groups[ch_info->group_index].
1570 temperature;
1571
1572 /* get power index adjustment based on current and factory
1573 * temps */
1574 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
1575 ref_temp);
1576
1577 /* set tx power value for all rates, OFDM and CCK */
1578 for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
1579 rate_index++) {
1580 int power_idx =
1581 ch_info->power_info[rate_index].base_power_index;
1582
1583 /* temperature compensate */
1584 power_idx += delta_index;
1585
1586 /* stay within table range */
1587 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
1588 ch_info->power_info[rate_index].
1589 power_table_index = (u8) power_idx;
1590 ch_info->power_info[rate_index].tpc =
1591 power_gain_table[a_band][power_idx];
1592 }
1593
1594 /* Get this chnlgrp's rate-to-max/clip-powers table */
1595 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1596
1597 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1598 for (scan_tbl_index = 0;
1599 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
1600 s32 actual_index = (scan_tbl_index == 0) ?
1601 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
1602 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
1603 actual_index, clip_pwrs,
1604 ch_info, a_band);
1605 }
1606 }
1607
1608 /* send Txpower command for current channel to ucode */
1609 return priv->cfg->ops->lib->send_tx_power(priv);
1610}
1611
1612int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1613{
1614 struct iwl_channel_info *ch_info;
1615 s8 max_power;
1616 u8 a_band;
1617 u8 i;
1618
1619 if (priv->tx_power_user_lmt == power) {
1620 IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
1621 "limit: %ddBm.\n", power);
1622 return 0;
1623 }
1624
1625 IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
1626 priv->tx_power_user_lmt = power;
1627
1628 /* set up new Tx powers for each and every channel, 2.4 and 5.x */
1629
1630 for (i = 0; i < priv->channel_count; i++) {
1631 ch_info = &priv->channel_info[i];
1632 a_band = iwl_legacy_is_channel_a_band(ch_info);
1633
1634 /* find minimum power of all user and regulatory constraints
1635 * (does not consider h/w clipping limitations) */
1636 max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
1637 max_power = min(power, max_power);
1638 if (max_power != ch_info->curr_txpow) {
1639 ch_info->curr_txpow = max_power;
1640
1641 /* this considers the h/w clipping limitations */
1642 iwl3945_hw_reg_set_new_power(priv, ch_info);
1643 }
1644 }
1645
1646 /* update txpower settings for all channels,
1647 * send to NIC if associated. */
1648 iwl3945_is_temp_calib_needed(priv);
1649 iwl3945_hw_reg_comp_txpower_temp(priv);
1650
1651 return 0;
1652}
1653
1654static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
1655 struct iwl_rxon_context *ctx)
1656{
1657 int rc = 0;
1658 struct iwl_rx_packet *pkt;
1659 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1660 struct iwl_host_cmd cmd = {
1661 .id = REPLY_RXON_ASSOC,
1662 .len = sizeof(rxon_assoc),
1663 .flags = CMD_WANT_SKB,
1664 .data = &rxon_assoc,
1665 };
1666 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1667 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1668
1669 if ((rxon1->flags == rxon2->flags) &&
1670 (rxon1->filter_flags == rxon2->filter_flags) &&
1671 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1672 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1673 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1674 return 0;
1675 }
1676
1677 rxon_assoc.flags = ctx->staging.flags;
1678 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1679 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1680 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1681 rxon_assoc.reserved = 0;
1682
1683 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
1684 if (rc)
1685 return rc;
1686
1687 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1688 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1689 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1690 rc = -EIO;
1691 }
1692
1693 iwl_legacy_free_pages(priv, cmd.reply_page);
1694
1695 return rc;
1696}
1697
1698/**
1699 * iwl3945_commit_rxon - commit staging_rxon to hardware
1700 *
1701 * The RXON command in staging_rxon is committed to the hardware and
1702 * the active_rxon structure is updated with the new data. This
1703 * function correctly transitions out of the RXON_ASSOC_MSK state if
1704 * a HW tune is required based on the RXON structure changes.
1705 */
1706int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1707{
1708 /* cast away the const for active_rxon in this function */
1709 struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
1710 struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
1711 int rc = 0;
1712 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1713
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return -EINVAL;
1716
1717 if (!iwl_legacy_is_alive(priv))
1718 return -1;
1719
1720 /* always get timestamp with Rx frame */
1721 staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
1722
1723 /* select antenna */
1724 staging_rxon->flags &=
1725 ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1726 staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
1727
1728 rc = iwl_legacy_check_rxon_cmd(priv, ctx);
1729 if (rc) {
1730 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1731 return -EINVAL;
1732 }
1733
1734 /* If we don't need to send a full RXON, we can use
1735 * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
1736 * and other flags for the current radio configuration. */
1737 if (!iwl_legacy_full_rxon_required(priv,
1738 &priv->contexts[IWL_RXON_CTX_BSS])) {
1739 rc = iwl_legacy_send_rxon_assoc(priv,
1740 &priv->contexts[IWL_RXON_CTX_BSS]);
1741 if (rc) {
1742 IWL_ERR(priv, "Error setting RXON_ASSOC "
1743 "configuration (%d).\n", rc);
1744 return rc;
1745 }
1746
1747 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1748 /*
1749 * We do not commit tx power settings while channel changing,
1750 * do it now if tx power changed.
1751 */
1752 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1753 return 0;
1754 }
1755
1756 /* If we are currently associated and the new config requires
1757 * an RXON_ASSOC and the new config wants the associated mask enabled,
1758 * we must clear the associated from the active configuration
1759 * before we apply the new config */
1760 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
1761 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1762 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1763
1764 /*
1765 * reserved4 and 5 could have been filled by the iwlcore code.
1766 * Let's clear them before pushing to the 3945.
1767 */
1768 active_rxon->reserved4 = 0;
1769 active_rxon->reserved5 = 0;
1770 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1771 sizeof(struct iwl3945_rxon_cmd),
1772 &priv->contexts[IWL_RXON_CTX_BSS].active);
1773
1774 /* If the mask clearing failed then we set
1775 * active_rxon back to what it was previously */
1776 if (rc) {
1777 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1778 IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
1779 "configuration (%d).\n", rc);
1780 return rc;
1781 }
1782 iwl_legacy_clear_ucode_stations(priv,
1783 &priv->contexts[IWL_RXON_CTX_BSS]);
1784 iwl_legacy_restore_stations(priv,
1785 &priv->contexts[IWL_RXON_CTX_BSS]);
1786 }
1787
1788 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1789 "* with%s RXON_FILTER_ASSOC_MSK\n"
1790 "* channel = %d\n"
1791 "* bssid = %pM\n",
1792 (new_assoc ? "" : "out"),
1793 le16_to_cpu(staging_rxon->channel),
1794 staging_rxon->bssid_addr);
1795
1796 /*
1797 * reserved4 and 5 could have been filled by the iwlcore code.
1798 * Let's clear them before pushing to the 3945.
1799 */
1800 staging_rxon->reserved4 = 0;
1801 staging_rxon->reserved5 = 0;
1802
1803 iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
1804
1805 /* Apply the new configuration */
1806 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
1807 sizeof(struct iwl3945_rxon_cmd),
1808 staging_rxon);
1809 if (rc) {
1810 IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
1811 return rc;
1812 }
1813
1814 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1815
1816 if (!new_assoc) {
1817 iwl_legacy_clear_ucode_stations(priv,
1818 &priv->contexts[IWL_RXON_CTX_BSS]);
1819 iwl_legacy_restore_stations(priv,
1820 &priv->contexts[IWL_RXON_CTX_BSS]);
1821 }
1822
1823 /* If we issue a new RXON command which required a tune then we must
1824 * send a new TXPOWER command or we won't be able to Tx any frames */
1825 rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1826 if (rc) {
1827 IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
1828 return rc;
1829 }
1830
1831 /* Init the hardware's rate fallback order based on the band */
1832 rc = iwl3945_init_hw_rate_table(priv);
1833 if (rc) {
1834 IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
1835 return -EIO;
1836 }
1837
1838 return 0;
1839}
1840
1841/**
1842 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
1843 *
1844 * -- reset periodic timer
1845 * -- see if temp has changed enough to warrant re-calibration ... if so:
1846 * -- correct coeffs for temp (can reset temp timer)
1847 * -- save this temp as "last",
1848 * -- send new set of gain settings to NIC
1849 * NOTE: This should continue working, even when we're not associated,
1850 * so we can keep our internal table of scan powers current. */
1851void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1852{
1853 /* This will kick in the "brute force"
1854 * iwl3945_hw_reg_comp_txpower_temp() below */
1855 if (!iwl3945_is_temp_calib_needed(priv))
1856 goto reschedule;
1857
1858 /* Set up a new set of temp-adjusted TxPowers, send to NIC.
1859 * This is based *only* on current temperature,
1860 * ignoring any previous power measurements */
1861 iwl3945_hw_reg_comp_txpower_temp(priv);
1862
1863 reschedule:
1864 queue_delayed_work(priv->workqueue,
1865 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
1866}
1867
1868static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
1869{
1870 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1871 _3945.thermal_periodic.work);
1872
1873 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1874 return;
1875
1876 mutex_lock(&priv->mutex);
1877 iwl3945_reg_txpower_periodic(priv);
1878 mutex_unlock(&priv->mutex);
1879}
1880
1881/**
1882 * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
1883 * for the channel.
1884 *
1885 * This function is used when initializing channel-info structs.
1886 *
1887 * NOTE: These channel groups do *NOT* match the bands above!
1888 * These channel groups are based on factory-tested channels;
1889 * on A-band, EEPROM's "group frequency" entries represent the top
1890 * channel in each group 1-4. Group 5 All B/G channels are in group 0.
1891 */
1892static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
1893 const struct iwl_channel_info *ch_info)
1894{
1895 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1896 struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
1897 u8 group;
1898 u16 group_index = 0; /* based on factory calib frequencies */
1899 u8 grp_channel;
1900
1901 /* Find the group index for the channel ... don't use index 1(?) */
1902 if (iwl_legacy_is_channel_a_band(ch_info)) {
1903 for (group = 1; group < 5; group++) {
1904 grp_channel = ch_grp[group].group_channel;
1905 if (ch_info->channel <= grp_channel) {
1906 group_index = group;
1907 break;
1908 }
1909 }
1910 /* group 4 has a few channels *above* its factory cal freq */
1911 if (group == 5)
1912 group_index = 4;
1913 } else
1914 group_index = 0; /* 2.4 GHz, group 0 */
1915
1916 IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
1917 group_index);
1918 return group_index;
1919}
1920
1921/**
1922 * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
1923 *
1924 * Interpolate to get nominal (i.e. at factory calibration temperature) index
1925 * into radio/DSP gain settings table for requested power.
1926 */
1927static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
1928 s8 requested_power,
1929 s32 setting_index, s32 *new_index)
1930{
1931 const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
1932 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1933 s32 index0, index1;
1934 s32 power = 2 * requested_power;
1935 s32 i;
1936 const struct iwl3945_eeprom_txpower_sample *samples;
1937 s32 gains0, gains1;
1938 s32 res;
1939 s32 denominator;
1940
1941 chnl_grp = &eeprom->groups[setting_index];
1942 samples = chnl_grp->samples;
1943 for (i = 0; i < 5; i++) {
1944 if (power == samples[i].power) {
1945 *new_index = samples[i].gain_index;
1946 return 0;
1947 }
1948 }
1949
1950 if (power > samples[1].power) {
1951 index0 = 0;
1952 index1 = 1;
1953 } else if (power > samples[2].power) {
1954 index0 = 1;
1955 index1 = 2;
1956 } else if (power > samples[3].power) {
1957 index0 = 2;
1958 index1 = 3;
1959 } else {
1960 index0 = 3;
1961 index1 = 4;
1962 }
1963
1964 denominator = (s32) samples[index1].power - (s32) samples[index0].power;
1965 if (denominator == 0)
1966 return -EINVAL;
1967 gains0 = (s32) samples[index0].gain_index * (1 << 19);
1968 gains1 = (s32) samples[index1].gain_index * (1 << 19);
1969 res = gains0 + (gains1 - gains0) *
1970 ((s32) power - (s32) samples[index0].power) / denominator +
1971 (1 << 18);
1972 *new_index = res >> 19;
1973 return 0;
1974}
1975
1976static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
1977{
1978 u32 i;
1979 s32 rate_index;
1980 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
1981 const struct iwl3945_eeprom_txpower_group *group;
1982
1983 IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
1984
1985 for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
1986 s8 *clip_pwrs; /* table of power levels for each rate */
1987 s8 satur_pwr; /* saturation power for each chnl group */
1988 group = &eeprom->groups[i];
1989
1990 /* sanity check on factory saturation power value */
1991 if (group->saturation_power < 40) {
1992 IWL_WARN(priv, "Error: saturation power is %d, "
1993 "less than minimum expected 40\n",
1994 group->saturation_power);
1995 return;
1996 }
1997
1998 /*
1999 * Derive requested power levels for each rate, based on
2000 * hardware capabilities (saturation power for band).
2001 * Basic value is 3dB down from saturation, with further
2002 * power reductions for highest 3 data rates. These
2003 * backoffs provide headroom for high rate modulation
2004 * power peaks, without too much distortion (clipping).
2005 */
2006 /* we'll fill in this array with h/w max power levels */
2007 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2008
2009 /* divide factory saturation power by 2 to find -3dB level */
2010 satur_pwr = (s8) (group->saturation_power >> 1);
2011
2012 /* fill in channel group's nominal powers for each rate */
2013 for (rate_index = 0;
2014 rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
2015 switch (rate_index) {
2016 case IWL_RATE_36M_INDEX_TABLE:
2017 if (i == 0) /* B/G */
2018 *clip_pwrs = satur_pwr;
2019 else /* A */
2020 *clip_pwrs = satur_pwr - 5;
2021 break;
2022 case IWL_RATE_48M_INDEX_TABLE:
2023 if (i == 0)
2024 *clip_pwrs = satur_pwr - 7;
2025 else
2026 *clip_pwrs = satur_pwr - 10;
2027 break;
2028 case IWL_RATE_54M_INDEX_TABLE:
2029 if (i == 0)
2030 *clip_pwrs = satur_pwr - 9;
2031 else
2032 *clip_pwrs = satur_pwr - 12;
2033 break;
2034 default:
2035 *clip_pwrs = satur_pwr;
2036 break;
2037 }
2038 }
2039 }
2040}
2041
2042/**
2043 * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
2044 *
2045 * Second pass (during init) to set up priv->channel_info
2046 *
2047 * Set up Tx-power settings in our channel info database for each VALID
2048 * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
2049 * and current temperature.
2050 *
2051 * Since this is based on current temperature (at init time), these values may
2052 * not be valid for very long, but it gives us a starting/default point,
2053 * and allows us to active (i.e. using Tx) scan.
2054 *
2055 * This does *not* write values to NIC, just sets up our internal table.
2056 */
2057int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2058{
2059 struct iwl_channel_info *ch_info = NULL;
2060 struct iwl3945_channel_power_info *pwr_info;
2061 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
2062 int delta_index;
2063 u8 rate_index;
2064 u8 scan_tbl_index;
2065 const s8 *clip_pwrs; /* array of power levels for each rate */
2066 u8 gain, dsp_atten;
2067 s8 power;
2068 u8 pwr_index, base_pwr_index, a_band;
2069 u8 i;
2070 int temperature;
2071
2072 /* save temperature reference,
2073 * so we can determine next time to calibrate */
2074 temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
2075 priv->last_temperature = temperature;
2076
2077 iwl3945_hw_reg_init_channel_groups(priv);
2078
2079 /* initialize Tx power info for each and every channel, 2.4 and 5.x */
2080 for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
2081 i++, ch_info++) {
2082 a_band = iwl_legacy_is_channel_a_band(ch_info);
2083 if (!iwl_legacy_is_channel_valid(ch_info))
2084 continue;
2085
2086 /* find this channel's channel group (*not* "band") index */
2087 ch_info->group_index =
2088 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2089
2090 /* Get this chnlgrp's rate->max/clip-powers table */
2091 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2092
2093 /* calculate power index *adjustment* value according to
2094 * diff between current temperature and factory temperature */
2095 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
2096 eeprom->groups[ch_info->group_index].
2097 temperature);
2098
2099 IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
2100 ch_info->channel, delta_index, temperature +
2101 IWL_TEMP_CONVERT);
2102
2103 /* set tx power value for all OFDM rates */
2104 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
2105 rate_index++) {
2106 s32 uninitialized_var(power_idx);
2107 int rc;
2108
2109 /* use channel group's clip-power table,
2110 * but don't exceed channel's max power */
2111 s8 pwr = min(ch_info->max_power_avg,
2112 clip_pwrs[rate_index]);
2113
2114 pwr_info = &ch_info->power_info[rate_index];
2115
2116 /* get base (i.e. at factory-measured temperature)
2117 * power table index for this rate's power */
2118 rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
2119 ch_info->group_index,
2120 &power_idx);
2121 if (rc) {
2122 IWL_ERR(priv, "Invalid power index\n");
2123 return rc;
2124 }
2125 pwr_info->base_power_index = (u8) power_idx;
2126
2127 /* temperature compensate */
2128 power_idx += delta_index;
2129
2130 /* stay within range of gain table */
2131 power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
2132
2133 /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
2134 pwr_info->requested_power = pwr;
2135 pwr_info->power_table_index = (u8) power_idx;
2136 pwr_info->tpc.tx_gain =
2137 power_gain_table[a_band][power_idx].tx_gain;
2138 pwr_info->tpc.dsp_atten =
2139 power_gain_table[a_band][power_idx].dsp_atten;
2140 }
2141
2142 /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
2143 pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
2144 power = pwr_info->requested_power +
2145 IWL_CCK_FROM_OFDM_POWER_DIFF;
2146 pwr_index = pwr_info->power_table_index +
2147 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2148 base_pwr_index = pwr_info->base_power_index +
2149 IWL_CCK_FROM_OFDM_INDEX_DIFF;
2150
2151 /* stay within table range */
2152 pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
2153 gain = power_gain_table[a_band][pwr_index].tx_gain;
2154 dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
2155
2156 /* fill each CCK rate's iwl3945_channel_power_info structure
2157 * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
2158 * NOTE: CCK rates start at end of OFDM rates! */
2159 for (rate_index = 0;
2160 rate_index < IWL_CCK_RATES; rate_index++) {
2161 pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
2162 pwr_info->requested_power = power;
2163 pwr_info->power_table_index = pwr_index;
2164 pwr_info->base_power_index = base_pwr_index;
2165 pwr_info->tpc.tx_gain = gain;
2166 pwr_info->tpc.dsp_atten = dsp_atten;
2167 }
2168
2169 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
2170 for (scan_tbl_index = 0;
2171 scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
2172 s32 actual_index = (scan_tbl_index == 0) ?
2173 IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
2174 iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
2175 actual_index, clip_pwrs, ch_info, a_band);
2176 }
2177 }
2178
2179 return 0;
2180}
2181
2182int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
2183{
2184 int rc;
2185
2186 iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
2187 rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
2188 FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2189 if (rc < 0)
2190 IWL_ERR(priv, "Can't stop Rx DMA.\n");
2191
2192 return 0;
2193}
2194
2195int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2196{
2197 int txq_id = txq->q.id;
2198
2199 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2200
2201 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2202
2203 iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
2204 iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
2205
2206 iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
2207 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
2208 FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
2209 FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
2210 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
2211 FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
2212
2213 /* fake read to flush all prev. writes */
2214 iwl_read32(priv, FH39_TSSR_CBB_BASE);
2215
2216 return 0;
2217}
2218
2219/*
2220 * HCMD utils
2221 */
2222static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
2223{
2224 switch (cmd_id) {
2225 case REPLY_RXON:
2226 return sizeof(struct iwl3945_rxon_cmd);
2227 case POWER_TABLE_CMD:
2228 return sizeof(struct iwl3945_powertable_cmd);
2229 default:
2230 return len;
2231 }
2232}
2233
2234
2235static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
2236 u8 *data)
2237{
2238 struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
2239 addsta->mode = cmd->mode;
2240 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
2241 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
2242 addsta->station_flags = cmd->station_flags;
2243 addsta->station_flags_msk = cmd->station_flags_msk;
2244 addsta->tid_disable_tx = cpu_to_le16(0);
2245 addsta->rate_n_flags = cmd->rate_n_flags;
2246 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
2247 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
2248 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
2249
2250 return (u16)sizeof(struct iwl3945_addsta_cmd);
2251}
2252
2253static int iwl3945_add_bssid_station(struct iwl_priv *priv,
2254 const u8 *addr, u8 *sta_id_r)
2255{
2256 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2257 int ret;
2258 u8 sta_id;
2259 unsigned long flags;
2260
2261 if (sta_id_r)
2262 *sta_id_r = IWL_INVALID_STATION;
2263
2264 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
2265 if (ret) {
2266 IWL_ERR(priv, "Unable to add station %pM\n", addr);
2267 return ret;
2268 }
2269
2270 if (sta_id_r)
2271 *sta_id_r = sta_id;
2272
2273 spin_lock_irqsave(&priv->sta_lock, flags);
2274 priv->stations[sta_id].used |= IWL_STA_LOCAL;
2275 spin_unlock_irqrestore(&priv->sta_lock, flags);
2276
2277 return 0;
2278}
2279static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2280 struct ieee80211_vif *vif, bool add)
2281{
2282 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2283 int ret;
2284
2285 if (add) {
2286 ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
2287 &vif_priv->ibss_bssid_sta_id);
2288 if (ret)
2289 return ret;
2290
2291 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2292 (priv->band == IEEE80211_BAND_5GHZ) ?
2293 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
2294 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2295
2296 return 0;
2297 }
2298
2299 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2300 vif->bss_conf.bssid);
2301}
2302
2303/**
2304 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
2305 */
2306int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2307{
2308 int rc, i, index, prev_index;
2309 struct iwl3945_rate_scaling_cmd rate_cmd = {
2310 .reserved = {0, 0, 0},
2311 };
2312 struct iwl3945_rate_scaling_info *table = rate_cmd.table;
2313
2314 for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
2315 index = iwl3945_rates[i].table_rs_index;
2316
2317 table[index].rate_n_flags =
2318 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
2319 table[index].try_cnt = priv->retry_rate;
2320 prev_index = iwl3945_get_prev_ieee_rate(i);
2321 table[index].next_rate_index =
2322 iwl3945_rates[prev_index].table_rs_index;
2323 }
2324
2325 switch (priv->band) {
2326 case IEEE80211_BAND_5GHZ:
2327 IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
2328 /* If one of the following CCK rates is used,
2329 * have it fall back to the 6M OFDM rate */
2330 for (i = IWL_RATE_1M_INDEX_TABLE;
2331 i <= IWL_RATE_11M_INDEX_TABLE; i++)
2332 table[i].next_rate_index =
2333 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2334
2335 /* Don't fall back to CCK rates */
2336 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
2337 IWL_RATE_9M_INDEX_TABLE;
2338
2339 /* Don't drop out of OFDM rates */
2340 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
2341 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2342 break;
2343
2344 case IEEE80211_BAND_2GHZ:
2345 IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
2346 /* If an OFDM rate is used, have it fall back to the
2347 * 1M CCK rates */
2348
2349 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2350 iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2351
2352 index = IWL_FIRST_CCK_RATE;
2353 for (i = IWL_RATE_6M_INDEX_TABLE;
2354 i <= IWL_RATE_54M_INDEX_TABLE; i++)
2355 table[i].next_rate_index =
2356 iwl3945_rates[index].table_rs_index;
2357
2358 index = IWL_RATE_11M_INDEX_TABLE;
2359 /* CCK shouldn't fall back to OFDM... */
2360 table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
2361 }
2362 break;
2363
2364 default:
2365 WARN_ON(1);
2366 break;
2367 }
2368
2369 /* Update the rate scaling for control frame Tx */
2370 rate_cmd.table_id = 0;
2371 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2372 &rate_cmd);
2373 if (rc)
2374 return rc;
2375
2376 /* Update the rate scaling for data frame Tx */
2377 rate_cmd.table_id = 1;
2378 return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
2379 &rate_cmd);
2380}
2381
2382/* Called when initializing driver */
2383int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2384{
2385 memset((void *)&priv->hw_params, 0,
2386 sizeof(struct iwl_hw_params));
2387
2388 priv->_3945.shared_virt =
2389 dma_alloc_coherent(&priv->pci_dev->dev,
2390 sizeof(struct iwl3945_shared),
2391 &priv->_3945.shared_phys, GFP_KERNEL);
2392 if (!priv->_3945.shared_virt) {
2393 IWL_ERR(priv, "failed to allocate pci memory\n");
2394 return -ENOMEM;
2395 }
2396
2397 /* Assign number of Usable TX queues */
2398 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
2399
2400 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2401 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2402 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2403 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2404 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
2405 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
2406
2407 priv->sta_key_max_num = STA_KEY_MAX_NUM;
2408
2409 priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
2410 priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
2411 priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
2412
2413 return 0;
2414}
2415
2416unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
2417 struct iwl3945_frame *frame, u8 rate)
2418{
2419 struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
2420 unsigned int frame_size;
2421
2422 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2423 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2424
2425 tx_beacon_cmd->tx.sta_id =
2426 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2427 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2428
2429 frame_size = iwl3945_fill_beacon_frame(priv,
2430 tx_beacon_cmd->frame,
2431 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2432
2433 BUG_ON(frame_size > MAX_MPDU_SIZE);
2434 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2435
2436 tx_beacon_cmd->tx.rate = rate;
2437 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2438 TX_CMD_FLG_TSF_MSK);
2439
2440 /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
2441 tx_beacon_cmd->tx.supp_rates[0] =
2442 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2443
2444 tx_beacon_cmd->tx.supp_rates[1] =
2445 (IWL_CCK_BASIC_RATES_MASK & 0xF);
2446
2447 return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
2448}
2449
2450void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2451{
2452 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2453 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2454}
2455
2456void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2457{
2458 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2459 iwl3945_bg_reg_txpower_periodic);
2460}
2461
2462void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2463{
2464 cancel_delayed_work(&priv->_3945.thermal_periodic);
2465}
2466
2467/* check contents of special bootstrap uCode SRAM */
2468static int iwl3945_verify_bsm(struct iwl_priv *priv)
2469 {
2470 __le32 *image = priv->ucode_boot.v_addr;
2471 u32 len = priv->ucode_boot.len;
2472 u32 reg;
2473 u32 val;
2474
2475 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
2476
2477 /* verify BSM SRAM contents */
2478 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
2479 for (reg = BSM_SRAM_LOWER_BOUND;
2480 reg < BSM_SRAM_LOWER_BOUND + len;
2481 reg += sizeof(u32), image++) {
2482 val = iwl_legacy_read_prph(priv, reg);
2483 if (val != le32_to_cpu(*image)) {
2484 IWL_ERR(priv, "BSM uCode verification failed at "
2485 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
2486 BSM_SRAM_LOWER_BOUND,
2487 reg - BSM_SRAM_LOWER_BOUND, len,
2488 val, le32_to_cpu(*image));
2489 return -EIO;
2490 }
2491 }
2492
2493 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
2494
2495 return 0;
2496}
2497
2498
2499/******************************************************************************
2500 *
2501 * EEPROM related functions
2502 *
2503 ******************************************************************************/
2504
2505/*
2506 * Clear the OWNER_MSK, to establish driver (instead of uCode running on
2507 * embedded controller) as EEPROM reader; each read is a series of pulses
2508 * to/from the EEPROM chip, not a single event, so even reads could conflict
2509 * if they weren't arbitrated by some ownership mechanism. Here, the driver
2510 * simply claims ownership, which should be safe when this function is called
2511 * (i.e. before loading uCode!).
2512 */
2513static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
2514{
2515 _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
2516 return 0;
2517}
2518
2519
2520static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
2521{
2522 return;
2523}
2524
2525 /**
2526 * iwl3945_load_bsm - Load bootstrap instructions
2527 *
2528 * BSM operation:
2529 *
2530 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
2531 * in special SRAM that does not power down during RFKILL. When powering back
2532 * up after power-saving sleeps (or during initial uCode load), the BSM loads
2533 * the bootstrap program into the on-board processor, and starts it.
2534 *
2535 * The bootstrap program loads (via DMA) instructions and data for a new
2536 * program from host DRAM locations indicated by the host driver in the
2537 * BSM_DRAM_* registers. Once the new program is loaded, it starts
2538 * automatically.
2539 *
2540 * When initializing the NIC, the host driver points the BSM to the
2541 * "initialize" uCode image. This uCode sets up some internal data, then
2542 * notifies host via "initialize alive" that it is complete.
2543 *
2544 * The host then replaces the BSM_DRAM_* pointer values to point to the
2545 * normal runtime uCode instructions and a backup uCode data cache buffer
2546 * (filled initially with starting data values for the on-board processor),
2547 * then triggers the "initialize" uCode to load and launch the runtime uCode,
2548 * which begins normal operation.
2549 *
2550 * When doing a power-save shutdown, runtime uCode saves data SRAM into
2551 * the backup data cache in DRAM before SRAM is powered down.
2552 *
2553 * When powering back up, the BSM loads the bootstrap program. This reloads
2554 * the runtime uCode instructions and the backup data cache into SRAM,
2555 * and re-launches the runtime uCode from where it left off.
2556 */
2557static int iwl3945_load_bsm(struct iwl_priv *priv)
2558{
2559 __le32 *image = priv->ucode_boot.v_addr;
2560 u32 len = priv->ucode_boot.len;
2561 dma_addr_t pinst;
2562 dma_addr_t pdata;
2563 u32 inst_len;
2564 u32 data_len;
2565 int rc;
2566 int i;
2567 u32 done;
2568 u32 reg_offset;
2569
2570 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
2571
2572 /* make sure bootstrap program is no larger than BSM's SRAM size */
2573 if (len > IWL39_MAX_BSM_SIZE)
2574 return -EINVAL;
2575
2576 /* Tell bootstrap uCode where to find the "Initialize" uCode
2577 * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
2578 * NOTE: iwl3945_initialize_alive_start() will replace these values,
2579 * after the "initialize" uCode has run, to point to
2580 * runtime/protocol instructions and backup data cache. */
2581 pinst = priv->ucode_init.p_addr;
2582 pdata = priv->ucode_init_data.p_addr;
2583 inst_len = priv->ucode_init.len;
2584 data_len = priv->ucode_init_data.len;
2585
2586 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2587 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2588 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
2589 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
2590
2591 /* Fill BSM memory with bootstrap instructions */
2592 for (reg_offset = BSM_SRAM_LOWER_BOUND;
2593 reg_offset < BSM_SRAM_LOWER_BOUND + len;
2594 reg_offset += sizeof(u32), image++)
2595 _iwl_legacy_write_prph(priv, reg_offset,
2596 le32_to_cpu(*image));
2597
2598 rc = iwl3945_verify_bsm(priv);
2599 if (rc)
2600 return rc;
2601
2602 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
2603 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
2604 iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
2605 IWL39_RTC_INST_LOWER_BOUND);
2606 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
2607
2608 /* Load bootstrap code into instruction SRAM now,
2609 * to prepare to load "initialize" uCode */
2610 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2611 BSM_WR_CTRL_REG_BIT_START);
2612
2613 /* Wait for load of bootstrap uCode to finish */
2614 for (i = 0; i < 100; i++) {
2615 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
2616 if (!(done & BSM_WR_CTRL_REG_BIT_START))
2617 break;
2618 udelay(10);
2619 }
2620 if (i < 100)
2621 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
2622 else {
2623 IWL_ERR(priv, "BSM write did not complete!\n");
2624 return -EIO;
2625 }
2626
2627 /* Enable future boot loads whenever power management unit triggers it
2628 * (e.g. when powering back up after power-save shutdown) */
2629 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
2630 BSM_WR_CTRL_REG_BIT_START_EN);
2631
2632 return 0;
2633}
2634
2635static struct iwl_hcmd_ops iwl3945_hcmd = {
2636 .rxon_assoc = iwl3945_send_rxon_assoc,
2637 .commit_rxon = iwl3945_commit_rxon,
2638};
2639
2640static struct iwl_lib_ops iwl3945_lib = {
2641 .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
2642 .txq_free_tfd = iwl3945_hw_txq_free_tfd,
2643 .txq_init = iwl3945_hw_tx_queue_init,
2644 .load_ucode = iwl3945_load_bsm,
2645 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2646 .apm_ops = {
2647 .init = iwl3945_apm_init,
2648 .config = iwl3945_nic_config,
2649 },
2650 .eeprom_ops = {
2651 .regulatory_bands = {
2652 EEPROM_REGULATORY_BAND_1_CHANNELS,
2653 EEPROM_REGULATORY_BAND_2_CHANNELS,
2654 EEPROM_REGULATORY_BAND_3_CHANNELS,
2655 EEPROM_REGULATORY_BAND_4_CHANNELS,
2656 EEPROM_REGULATORY_BAND_5_CHANNELS,
2657 EEPROM_REGULATORY_BAND_NO_HT40,
2658 EEPROM_REGULATORY_BAND_NO_HT40,
2659 },
2660 .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
2661 .release_semaphore = iwl3945_eeprom_release_semaphore,
2662 },
2663 .send_tx_power = iwl3945_send_tx_power,
2664 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2665
2666 .debugfs_ops = {
2667 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2668 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2669 .general_stats_read = iwl3945_ucode_general_stats_read,
2670 },
2671};
2672
2673static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2674 .post_associate = iwl3945_post_associate,
2675 .config_ap = iwl3945_config_ap,
2676 .manage_ibss_station = iwl3945_manage_ibss_station,
2677};
2678
2679static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2680 .get_hcmd_size = iwl3945_get_hcmd_size,
2681 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2682 .request_scan = iwl3945_request_scan,
2683 .post_scan = iwl3945_post_scan,
2684};
2685
2686static const struct iwl_ops iwl3945_ops = {
2687 .lib = &iwl3945_lib,
2688 .hcmd = &iwl3945_hcmd,
2689 .utils = &iwl3945_hcmd_utils,
2690 .led = &iwl3945_led_ops,
2691 .legacy = &iwl3945_legacy_ops,
2692 .ieee80211_ops = &iwl3945_hw_ops,
2693};
2694
2695static struct iwl_base_params iwl3945_base_params = {
2696 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2697 .num_of_queues = IWL39_NUM_QUEUES,
2698 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2699 .set_l0s = false,
2700 .use_bsm = true,
2701 .led_compensation = 64,
2702 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2703};
2704
2705static struct iwl_cfg iwl3945_bg_cfg = {
2706 .name = "3945BG",
2707 .fw_name_pre = IWL3945_FW_PRE,
2708 .ucode_api_max = IWL3945_UCODE_API_MAX,
2709 .ucode_api_min = IWL3945_UCODE_API_MIN,
2710 .sku = IWL_SKU_G,
2711 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2712 .ops = &iwl3945_ops,
2713 .mod_params = &iwl3945_mod_params,
2714 .base_params = &iwl3945_base_params,
2715 .led_mode = IWL_LED_BLINK,
2716};
2717
2718static struct iwl_cfg iwl3945_abg_cfg = {
2719 .name = "3945ABG",
2720 .fw_name_pre = IWL3945_FW_PRE,
2721 .ucode_api_max = IWL3945_UCODE_API_MAX,
2722 .ucode_api_min = IWL3945_UCODE_API_MIN,
2723 .sku = IWL_SKU_A|IWL_SKU_G,
2724 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2725 .ops = &iwl3945_ops,
2726 .mod_params = &iwl3945_mod_params,
2727 .base_params = &iwl3945_base_params,
2728 .led_mode = IWL_LED_BLINK,
2729};
2730
2731DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
2732 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2733 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2734 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
2735 {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
2736 {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
2737 {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
2738 {0}
2739};
2740
2741MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59b71de..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-3945.h) for driver implementation definitions.
28 * Please use iwl-3945-commands.h for uCode API definitions.
29 * Please use iwl-3945-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_3945_h__
33#define __iwl_3945_h__
34
35#include <linux/pci.h> /* for struct pci_device_id */
36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h>
38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern const struct pci_device_id iwl3945_hw_card_ids[];
41
42#include "iwl-csr.h"
43#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-3945-hw.h"
46#include "iwl-debug.h"
47#include "iwl-power.h"
48#include "iwl-dev.h"
49#include "iwl-led.h"
50
51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2
53
54/* Lowest firmware API version supported */
55#define IWL3945_UCODE_API_MIN 1
56
57#define IWL3945_FW_PRE "iwlwifi-3945-"
58#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
59#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
60
61/* Default noise level to report when noise measurement is not available.
62 * This may be because we're:
63 * 1) Not associated (4965, no beacon statistics being sent to driver)
64 * 2) Scanning (noise measurement does not apply to associated channel)
65 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
66 * Use default noise value of -127 ... this is below the range of measurable
67 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
68 * Also, -127 works better than 0 when averaging frames with/without
69 * noise info (e.g. averaging might be done in app); measured dBm values are
70 * always negative ... using a negative value as the default keeps all
71 * averages within an s8's (used in some apps) range of negative values. */
72#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
73
74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params;
76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 struct timer_list rate_scale_flush;
99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
100#ifdef CONFIG_MAC80211_DEBUGFS
101 struct dentry *rs_sta_dbgfs_stats_table_file;
102#endif
103
104 /* used to be in sta_info */
105 int last_txrate_idx;
106};
107
108
109/*
110 * The common struct MUST be first because it is shared between
111 * 3945 and 4965!
112 */
113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common;
115 struct iwl3945_rs_sta rs_sta;
116};
117
118enum iwl3945_antenna {
119 IWL_ANTENNA_DIVERSITY,
120 IWL_ANTENNA_MAIN,
121 IWL_ANTENNA_AUX
122};
123
124/*
125 * RTS threshold here is total size [2347] minus 4 FCS bytes
126 * Per spec:
127 * a value of 0 means RTS on all data/management packets
128 * a value > max MSDU size means no RTS
129 * else RTS for data/management frames where MPDU is larger
130 * than RTS value.
131 */
132#define DEFAULT_RTS_THRESHOLD 2347U
133#define MIN_RTS_THRESHOLD 0U
134#define MAX_RTS_THRESHOLD 2347U
135#define MAX_MSDU_SIZE 2304U
136#define MAX_MPDU_SIZE 2346U
137#define DEFAULT_BEACON_INTERVAL 100U
138#define DEFAULT_SHORT_RETRY_LIMIT 7U
139#define DEFAULT_LONG_RETRY_LIMIT 4U
140
141#define IWL_TX_FIFO_AC0 0
142#define IWL_TX_FIFO_AC1 1
143#define IWL_TX_FIFO_AC2 2
144#define IWL_TX_FIFO_AC3 3
145#define IWL_TX_FIFO_HCCA_1 5
146#define IWL_TX_FIFO_HCCA_2 6
147#define IWL_TX_FIFO_NONE 7
148
149#define IEEE80211_DATA_LEN 2304
150#define IEEE80211_4ADDR_LEN 30
151#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
152#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
153
154struct iwl3945_frame {
155 union {
156 struct ieee80211_hdr frame;
157 struct iwl3945_tx_beacon_cmd beacon;
158 u8 raw[IEEE80211_FRAME_LEN];
159 u8 cmd[360];
160 } u;
161 struct list_head list;
162};
163
164#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
165#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
166#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
167
168#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
169#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
170#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
171
172#define IWL_SUPPORTED_RATES_IE_LEN 8
173
174#define SCAN_INTERVAL 100
175
176#define MAX_TID_COUNT 9
177
178#define IWL_INVALID_RATE 0xFF
179#define IWL_INVALID_VALUE -1
180
181#define STA_PS_STATUS_WAKE 0
182#define STA_PS_STATUS_SLEEP 1
183
184struct iwl3945_ibss_seq {
185 u8 mac[ETH_ALEN];
186 u16 seq_num;
187 u16 frag_num;
188 unsigned long packet_time;
189 struct list_head list;
190};
191
192#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
193 x->u.rx_frame.stats.payload + \
194 x->u.rx_frame.stats.phy_count))
195#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
196 IWL_RX_HDR(x)->payload + \
197 le16_to_cpu(IWL_RX_HDR(x)->len)))
198#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
199#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
200
201
202/******************************************************************************
203 *
204 * Functions implemented in iwl3945-base.c which are forward declared here
205 * for use by iwl-*.c
206 *
207 *****************************************************************************/
208extern int iwl3945_calc_db_from_ratio(int sig_ratio);
209extern void iwl3945_rx_replenish(void *data);
210extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
211extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
212 struct ieee80211_hdr *hdr, int left);
213extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
214 char **buf, bool display);
215extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
216
217/******************************************************************************
218 *
219 * Functions implemented in iwl-[34]*.c which are forward declared here
220 * for use by iwl3945-base.c
221 *
222 * NOTE: The implementation of these functions are hardware specific
223 * which is why they are in the hardware specific files (vs. iwl-base.c)
224 *
225 * Naming convention --
226 * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
227 * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
228 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
229 * iwl3945_bg_ <-- Called from work queue context
230 * iwl3945_mac_ <-- mac80211 callback
231 *
232 ****************************************************************************/
233extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
234extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
235extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
236extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
237extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
238extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
239extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
240extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
241extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
242extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
243extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
244 struct iwl_tx_queue *txq,
245 dma_addr_t addr, u16 len,
246 u8 reset, u8 pad);
247extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
248 struct iwl_tx_queue *txq);
249extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
250extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
251 struct iwl_tx_queue *txq);
252extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
253 struct iwl3945_frame *frame, u8 rate);
254void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
255 struct iwl_device_cmd *cmd,
256 struct ieee80211_tx_info *info,
257 struct ieee80211_hdr *hdr,
258 int sta_id, int tx_id);
259extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
260extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
261extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb);
263void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv);
268extern void iwl3945_config_ap(struct iwl_priv *priv);
269
270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
271 struct iwl_rxon_context *ctx);
272
273/**
274 * iwl3945_hw_find_station - Find station id for a given BSSID
275 * @bssid: MAC address of station ID to find
276 *
277 * NOTE: This should not be hardware specific but the code has
278 * not yet been merged into a single common layer for managing the
279 * station tables.
280 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/*
286 * Forward declare iwl-3945.c functions for iwl3945-base.c
287 */
288extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
289extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
290extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
291extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
292
293extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
295
296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
297
298/* scanning */
299int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
300void iwl3945_post_scan(struct iwl_priv *priv);
301
302/* rates */
303extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
304
305/* Requires full declaration of iwl_priv before including */
306#include "iwl-io.h"
307
308#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
deleted file mode 100644
index f46c80e6e005..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ /dev/null
@@ -1,75 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62#ifndef __iwl_4965_calib_h__
63#define __iwl_4965_calib_h__
64
65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h"
68
69void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
70void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
71void iwl4965_init_sensitivity(struct iwl_priv *priv);
72void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
73void iwl4965_calib_free_results(struct iwl_priv *priv);
74
75#endif /* __iwl_4965_calib_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665766e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28#include "iwl-4965.h"
29#include "iwl-4965-debugfs.h"
30
31static const char *fmt_value = " %-30s %10u\n";
32static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
33static const char *fmt_header =
34 "%-32s current cumulative delta max\n";
35
36static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
37{
38 int p = 0;
39 u32 flag;
40
41 flag = le32_to_cpu(priv->_4965.statistics.flag);
42
43 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
44 if (flag & UCODE_STATISTICS_CLEAR_MSK)
45 p += scnprintf(buf + p, bufsz - p,
46 "\tStatistics have been cleared\n");
47 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
48 (flag & UCODE_STATISTICS_FREQUENCY_MSK)
49 ? "2.4 GHz" : "5.2 GHz");
50 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
51 (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
52 ? "enabled" : "disabled");
53
54 return p;
55}
56
57ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
58 size_t count, loff_t *ppos)
59{
60 struct iwl_priv *priv = file->private_data;
61 int pos = 0;
62 char *buf;
63 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
64 sizeof(struct statistics_rx_non_phy) * 40 +
65 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
66 ssize_t ret;
67 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
68 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
69 struct statistics_rx_non_phy *general, *accum_general;
70 struct statistics_rx_non_phy *delta_general, *max_general;
71 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
72
73 if (!iwl_legacy_is_alive(priv))
74 return -EAGAIN;
75
76 buf = kzalloc(bufsz, GFP_KERNEL);
77 if (!buf) {
78 IWL_ERR(priv, "Can not allocate Buffer\n");
79 return -ENOMEM;
80 }
81
82 /*
83 * the statistic information display here is based on
84 * the last statistics notification from uCode
85 * might not reflect the current uCode activity
86 */
87 ofdm = &priv->_4965.statistics.rx.ofdm;
88 cck = &priv->_4965.statistics.rx.cck;
89 general = &priv->_4965.statistics.rx.general;
90 ht = &priv->_4965.statistics.rx.ofdm_ht;
91 accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
92 accum_cck = &priv->_4965.accum_statistics.rx.cck;
93 accum_general = &priv->_4965.accum_statistics.rx.general;
94 accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
95 delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
96 delta_cck = &priv->_4965.delta_statistics.rx.cck;
97 delta_general = &priv->_4965.delta_statistics.rx.general;
98 delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
99 max_ofdm = &priv->_4965.max_delta.rx.ofdm;
100 max_cck = &priv->_4965.max_delta.rx.cck;
101 max_general = &priv->_4965.max_delta.rx.general;
102 max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
103
104 pos += iwl4965_statistics_flag(priv, buf, bufsz);
105 pos += scnprintf(buf + pos, bufsz - pos,
106 fmt_header, "Statistics_Rx - OFDM:");
107 pos += scnprintf(buf + pos, bufsz - pos,
108 fmt_table, "ina_cnt:",
109 le32_to_cpu(ofdm->ina_cnt),
110 accum_ofdm->ina_cnt,
111 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 fmt_table, "fina_cnt:",
114 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
115 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
116 pos += scnprintf(buf + pos, bufsz - pos,
117 fmt_table, "plcp_err:",
118 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
119 delta_ofdm->plcp_err, max_ofdm->plcp_err);
120 pos += scnprintf(buf + pos, bufsz - pos,
121 fmt_table, "crc32_err:",
122 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
123 delta_ofdm->crc32_err, max_ofdm->crc32_err);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 fmt_table, "overrun_err:",
126 le32_to_cpu(ofdm->overrun_err),
127 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
128 max_ofdm->overrun_err);
129 pos += scnprintf(buf + pos, bufsz - pos,
130 fmt_table, "early_overrun_err:",
131 le32_to_cpu(ofdm->early_overrun_err),
132 accum_ofdm->early_overrun_err,
133 delta_ofdm->early_overrun_err,
134 max_ofdm->early_overrun_err);
135 pos += scnprintf(buf + pos, bufsz - pos,
136 fmt_table, "crc32_good:",
137 le32_to_cpu(ofdm->crc32_good),
138 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
139 max_ofdm->crc32_good);
140 pos += scnprintf(buf + pos, bufsz - pos,
141 fmt_table, "false_alarm_cnt:",
142 le32_to_cpu(ofdm->false_alarm_cnt),
143 accum_ofdm->false_alarm_cnt,
144 delta_ofdm->false_alarm_cnt,
145 max_ofdm->false_alarm_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 fmt_table, "fina_sync_err_cnt:",
148 le32_to_cpu(ofdm->fina_sync_err_cnt),
149 accum_ofdm->fina_sync_err_cnt,
150 delta_ofdm->fina_sync_err_cnt,
151 max_ofdm->fina_sync_err_cnt);
152 pos += scnprintf(buf + pos, bufsz - pos,
153 fmt_table, "sfd_timeout:",
154 le32_to_cpu(ofdm->sfd_timeout),
155 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
156 max_ofdm->sfd_timeout);
157 pos += scnprintf(buf + pos, bufsz - pos,
158 fmt_table, "fina_timeout:",
159 le32_to_cpu(ofdm->fina_timeout),
160 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
161 max_ofdm->fina_timeout);
162 pos += scnprintf(buf + pos, bufsz - pos,
163 fmt_table, "unresponded_rts:",
164 le32_to_cpu(ofdm->unresponded_rts),
165 accum_ofdm->unresponded_rts,
166 delta_ofdm->unresponded_rts,
167 max_ofdm->unresponded_rts);
168 pos += scnprintf(buf + pos, bufsz - pos,
169 fmt_table, "rxe_frame_lmt_ovrun:",
170 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
171 accum_ofdm->rxe_frame_limit_overrun,
172 delta_ofdm->rxe_frame_limit_overrun,
173 max_ofdm->rxe_frame_limit_overrun);
174 pos += scnprintf(buf + pos, bufsz - pos,
175 fmt_table, "sent_ack_cnt:",
176 le32_to_cpu(ofdm->sent_ack_cnt),
177 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
178 max_ofdm->sent_ack_cnt);
179 pos += scnprintf(buf + pos, bufsz - pos,
180 fmt_table, "sent_cts_cnt:",
181 le32_to_cpu(ofdm->sent_cts_cnt),
182 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
183 max_ofdm->sent_cts_cnt);
184 pos += scnprintf(buf + pos, bufsz - pos,
185 fmt_table, "sent_ba_rsp_cnt:",
186 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
187 accum_ofdm->sent_ba_rsp_cnt,
188 delta_ofdm->sent_ba_rsp_cnt,
189 max_ofdm->sent_ba_rsp_cnt);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 fmt_table, "dsp_self_kill:",
192 le32_to_cpu(ofdm->dsp_self_kill),
193 accum_ofdm->dsp_self_kill,
194 delta_ofdm->dsp_self_kill,
195 max_ofdm->dsp_self_kill);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 fmt_table, "mh_format_err:",
198 le32_to_cpu(ofdm->mh_format_err),
199 accum_ofdm->mh_format_err,
200 delta_ofdm->mh_format_err,
201 max_ofdm->mh_format_err);
202 pos += scnprintf(buf + pos, bufsz - pos,
203 fmt_table, "re_acq_main_rssi_sum:",
204 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
205 accum_ofdm->re_acq_main_rssi_sum,
206 delta_ofdm->re_acq_main_rssi_sum,
207 max_ofdm->re_acq_main_rssi_sum);
208
209 pos += scnprintf(buf + pos, bufsz - pos,
210 fmt_header, "Statistics_Rx - CCK:");
211 pos += scnprintf(buf + pos, bufsz - pos,
212 fmt_table, "ina_cnt:",
213 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
214 delta_cck->ina_cnt, max_cck->ina_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 fmt_table, "fina_cnt:",
217 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
218 delta_cck->fina_cnt, max_cck->fina_cnt);
219 pos += scnprintf(buf + pos, bufsz - pos,
220 fmt_table, "plcp_err:",
221 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
222 delta_cck->plcp_err, max_cck->plcp_err);
223 pos += scnprintf(buf + pos, bufsz - pos,
224 fmt_table, "crc32_err:",
225 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
226 delta_cck->crc32_err, max_cck->crc32_err);
227 pos += scnprintf(buf + pos, bufsz - pos,
228 fmt_table, "overrun_err:",
229 le32_to_cpu(cck->overrun_err),
230 accum_cck->overrun_err, delta_cck->overrun_err,
231 max_cck->overrun_err);
232 pos += scnprintf(buf + pos, bufsz - pos,
233 fmt_table, "early_overrun_err:",
234 le32_to_cpu(cck->early_overrun_err),
235 accum_cck->early_overrun_err,
236 delta_cck->early_overrun_err,
237 max_cck->early_overrun_err);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 fmt_table, "crc32_good:",
240 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
241 delta_cck->crc32_good, max_cck->crc32_good);
242 pos += scnprintf(buf + pos, bufsz - pos,
243 fmt_table, "false_alarm_cnt:",
244 le32_to_cpu(cck->false_alarm_cnt),
245 accum_cck->false_alarm_cnt,
246 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 fmt_table, "fina_sync_err_cnt:",
249 le32_to_cpu(cck->fina_sync_err_cnt),
250 accum_cck->fina_sync_err_cnt,
251 delta_cck->fina_sync_err_cnt,
252 max_cck->fina_sync_err_cnt);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 fmt_table, "sfd_timeout:",
255 le32_to_cpu(cck->sfd_timeout),
256 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
257 max_cck->sfd_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 fmt_table, "fina_timeout:",
260 le32_to_cpu(cck->fina_timeout),
261 accum_cck->fina_timeout, delta_cck->fina_timeout,
262 max_cck->fina_timeout);
263 pos += scnprintf(buf + pos, bufsz - pos,
264 fmt_table, "unresponded_rts:",
265 le32_to_cpu(cck->unresponded_rts),
266 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
267 max_cck->unresponded_rts);
268 pos += scnprintf(buf + pos, bufsz - pos,
269 fmt_table, "rxe_frame_lmt_ovrun:",
270 le32_to_cpu(cck->rxe_frame_limit_overrun),
271 accum_cck->rxe_frame_limit_overrun,
272 delta_cck->rxe_frame_limit_overrun,
273 max_cck->rxe_frame_limit_overrun);
274 pos += scnprintf(buf + pos, bufsz - pos,
275 fmt_table, "sent_ack_cnt:",
276 le32_to_cpu(cck->sent_ack_cnt),
277 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
278 max_cck->sent_ack_cnt);
279 pos += scnprintf(buf + pos, bufsz - pos,
280 fmt_table, "sent_cts_cnt:",
281 le32_to_cpu(cck->sent_cts_cnt),
282 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
283 max_cck->sent_cts_cnt);
284 pos += scnprintf(buf + pos, bufsz - pos,
285 fmt_table, "sent_ba_rsp_cnt:",
286 le32_to_cpu(cck->sent_ba_rsp_cnt),
287 accum_cck->sent_ba_rsp_cnt,
288 delta_cck->sent_ba_rsp_cnt,
289 max_cck->sent_ba_rsp_cnt);
290 pos += scnprintf(buf + pos, bufsz - pos,
291 fmt_table, "dsp_self_kill:",
292 le32_to_cpu(cck->dsp_self_kill),
293 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
294 max_cck->dsp_self_kill);
295 pos += scnprintf(buf + pos, bufsz - pos,
296 fmt_table, "mh_format_err:",
297 le32_to_cpu(cck->mh_format_err),
298 accum_cck->mh_format_err, delta_cck->mh_format_err,
299 max_cck->mh_format_err);
300 pos += scnprintf(buf + pos, bufsz - pos,
301 fmt_table, "re_acq_main_rssi_sum:",
302 le32_to_cpu(cck->re_acq_main_rssi_sum),
303 accum_cck->re_acq_main_rssi_sum,
304 delta_cck->re_acq_main_rssi_sum,
305 max_cck->re_acq_main_rssi_sum);
306
307 pos += scnprintf(buf + pos, bufsz - pos,
308 fmt_header, "Statistics_Rx - GENERAL:");
309 pos += scnprintf(buf + pos, bufsz - pos,
310 fmt_table, "bogus_cts:",
311 le32_to_cpu(general->bogus_cts),
312 accum_general->bogus_cts, delta_general->bogus_cts,
313 max_general->bogus_cts);
314 pos += scnprintf(buf + pos, bufsz - pos,
315 fmt_table, "bogus_ack:",
316 le32_to_cpu(general->bogus_ack),
317 accum_general->bogus_ack, delta_general->bogus_ack,
318 max_general->bogus_ack);
319 pos += scnprintf(buf + pos, bufsz - pos,
320 fmt_table, "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 fmt_table, "filtered_frames:",
327 le32_to_cpu(general->filtered_frames),
328 accum_general->filtered_frames,
329 delta_general->filtered_frames,
330 max_general->filtered_frames);
331 pos += scnprintf(buf + pos, bufsz - pos,
332 fmt_table, "non_channel_beacons:",
333 le32_to_cpu(general->non_channel_beacons),
334 accum_general->non_channel_beacons,
335 delta_general->non_channel_beacons,
336 max_general->non_channel_beacons);
337 pos += scnprintf(buf + pos, bufsz - pos,
338 fmt_table, "channel_beacons:",
339 le32_to_cpu(general->channel_beacons),
340 accum_general->channel_beacons,
341 delta_general->channel_beacons,
342 max_general->channel_beacons);
343 pos += scnprintf(buf + pos, bufsz - pos,
344 fmt_table, "num_missed_bcon:",
345 le32_to_cpu(general->num_missed_bcon),
346 accum_general->num_missed_bcon,
347 delta_general->num_missed_bcon,
348 max_general->num_missed_bcon);
349 pos += scnprintf(buf + pos, bufsz - pos,
350 fmt_table, "adc_rx_saturation_time:",
351 le32_to_cpu(general->adc_rx_saturation_time),
352 accum_general->adc_rx_saturation_time,
353 delta_general->adc_rx_saturation_time,
354 max_general->adc_rx_saturation_time);
355 pos += scnprintf(buf + pos, bufsz - pos,
356 fmt_table, "ina_detect_search_tm:",
357 le32_to_cpu(general->ina_detection_search_time),
358 accum_general->ina_detection_search_time,
359 delta_general->ina_detection_search_time,
360 max_general->ina_detection_search_time);
361 pos += scnprintf(buf + pos, bufsz - pos,
362 fmt_table, "beacon_silence_rssi_a:",
363 le32_to_cpu(general->beacon_silence_rssi_a),
364 accum_general->beacon_silence_rssi_a,
365 delta_general->beacon_silence_rssi_a,
366 max_general->beacon_silence_rssi_a);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 fmt_table, "beacon_silence_rssi_b:",
369 le32_to_cpu(general->beacon_silence_rssi_b),
370 accum_general->beacon_silence_rssi_b,
371 delta_general->beacon_silence_rssi_b,
372 max_general->beacon_silence_rssi_b);
373 pos += scnprintf(buf + pos, bufsz - pos,
374 fmt_table, "beacon_silence_rssi_c:",
375 le32_to_cpu(general->beacon_silence_rssi_c),
376 accum_general->beacon_silence_rssi_c,
377 delta_general->beacon_silence_rssi_c,
378 max_general->beacon_silence_rssi_c);
379 pos += scnprintf(buf + pos, bufsz - pos,
380 fmt_table, "interference_data_flag:",
381 le32_to_cpu(general->interference_data_flag),
382 accum_general->interference_data_flag,
383 delta_general->interference_data_flag,
384 max_general->interference_data_flag);
385 pos += scnprintf(buf + pos, bufsz - pos,
386 fmt_table, "channel_load:",
387 le32_to_cpu(general->channel_load),
388 accum_general->channel_load,
389 delta_general->channel_load,
390 max_general->channel_load);
391 pos += scnprintf(buf + pos, bufsz - pos,
392 fmt_table, "dsp_false_alarms:",
393 le32_to_cpu(general->dsp_false_alarms),
394 accum_general->dsp_false_alarms,
395 delta_general->dsp_false_alarms,
396 max_general->dsp_false_alarms);
397 pos += scnprintf(buf + pos, bufsz - pos,
398 fmt_table, "beacon_rssi_a:",
399 le32_to_cpu(general->beacon_rssi_a),
400 accum_general->beacon_rssi_a,
401 delta_general->beacon_rssi_a,
402 max_general->beacon_rssi_a);
403 pos += scnprintf(buf + pos, bufsz - pos,
404 fmt_table, "beacon_rssi_b:",
405 le32_to_cpu(general->beacon_rssi_b),
406 accum_general->beacon_rssi_b,
407 delta_general->beacon_rssi_b,
408 max_general->beacon_rssi_b);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 fmt_table, "beacon_rssi_c:",
411 le32_to_cpu(general->beacon_rssi_c),
412 accum_general->beacon_rssi_c,
413 delta_general->beacon_rssi_c,
414 max_general->beacon_rssi_c);
415 pos += scnprintf(buf + pos, bufsz - pos,
416 fmt_table, "beacon_energy_a:",
417 le32_to_cpu(general->beacon_energy_a),
418 accum_general->beacon_energy_a,
419 delta_general->beacon_energy_a,
420 max_general->beacon_energy_a);
421 pos += scnprintf(buf + pos, bufsz - pos,
422 fmt_table, "beacon_energy_b:",
423 le32_to_cpu(general->beacon_energy_b),
424 accum_general->beacon_energy_b,
425 delta_general->beacon_energy_b,
426 max_general->beacon_energy_b);
427 pos += scnprintf(buf + pos, bufsz - pos,
428 fmt_table, "beacon_energy_c:",
429 le32_to_cpu(general->beacon_energy_c),
430 accum_general->beacon_energy_c,
431 delta_general->beacon_energy_c,
432 max_general->beacon_energy_c);
433
434 pos += scnprintf(buf + pos, bufsz - pos,
435 fmt_header, "Statistics_Rx - OFDM_HT:");
436 pos += scnprintf(buf + pos, bufsz - pos,
437 fmt_table, "plcp_err:",
438 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
439 delta_ht->plcp_err, max_ht->plcp_err);
440 pos += scnprintf(buf + pos, bufsz - pos,
441 fmt_table, "overrun_err:",
442 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
443 delta_ht->overrun_err, max_ht->overrun_err);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 fmt_table, "early_overrun_err:",
446 le32_to_cpu(ht->early_overrun_err),
447 accum_ht->early_overrun_err,
448 delta_ht->early_overrun_err,
449 max_ht->early_overrun_err);
450 pos += scnprintf(buf + pos, bufsz - pos,
451 fmt_table, "crc32_good:",
452 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
453 delta_ht->crc32_good, max_ht->crc32_good);
454 pos += scnprintf(buf + pos, bufsz - pos,
455 fmt_table, "crc32_err:",
456 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
457 delta_ht->crc32_err, max_ht->crc32_err);
458 pos += scnprintf(buf + pos, bufsz - pos,
459 fmt_table, "mh_format_err:",
460 le32_to_cpu(ht->mh_format_err),
461 accum_ht->mh_format_err,
462 delta_ht->mh_format_err, max_ht->mh_format_err);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 fmt_table, "agg_crc32_good:",
465 le32_to_cpu(ht->agg_crc32_good),
466 accum_ht->agg_crc32_good,
467 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
468 pos += scnprintf(buf + pos, bufsz - pos,
469 fmt_table, "agg_mpdu_cnt:",
470 le32_to_cpu(ht->agg_mpdu_cnt),
471 accum_ht->agg_mpdu_cnt,
472 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 fmt_table, "agg_cnt:",
475 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
476 delta_ht->agg_cnt, max_ht->agg_cnt);
477 pos += scnprintf(buf + pos, bufsz - pos,
478 fmt_table, "unsupport_mcs:",
479 le32_to_cpu(ht->unsupport_mcs),
480 accum_ht->unsupport_mcs,
481 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
482
483 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
484 kfree(buf);
485 return ret;
486}
487
488ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
489 char __user *user_buf,
490 size_t count, loff_t *ppos)
491{
492 struct iwl_priv *priv = file->private_data;
493 int pos = 0;
494 char *buf;
495 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
496 ssize_t ret;
497 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
498
499 if (!iwl_legacy_is_alive(priv))
500 return -EAGAIN;
501
502 buf = kzalloc(bufsz, GFP_KERNEL);
503 if (!buf) {
504 IWL_ERR(priv, "Can not allocate Buffer\n");
505 return -ENOMEM;
506 }
507
508 /* the statistic information display here is based on
509 * the last statistics notification from uCode
510 * might not reflect the current uCode activity
511 */
512 tx = &priv->_4965.statistics.tx;
513 accum_tx = &priv->_4965.accum_statistics.tx;
514 delta_tx = &priv->_4965.delta_statistics.tx;
515 max_tx = &priv->_4965.max_delta.tx;
516
517 pos += iwl4965_statistics_flag(priv, buf, bufsz);
518 pos += scnprintf(buf + pos, bufsz - pos,
519 fmt_header, "Statistics_Tx:");
520 pos += scnprintf(buf + pos, bufsz - pos,
521 fmt_table, "preamble:",
522 le32_to_cpu(tx->preamble_cnt),
523 accum_tx->preamble_cnt,
524 delta_tx->preamble_cnt, max_tx->preamble_cnt);
525 pos += scnprintf(buf + pos, bufsz - pos,
526 fmt_table, "rx_detected_cnt:",
527 le32_to_cpu(tx->rx_detected_cnt),
528 accum_tx->rx_detected_cnt,
529 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
530 pos += scnprintf(buf + pos, bufsz - pos,
531 fmt_table, "bt_prio_defer_cnt:",
532 le32_to_cpu(tx->bt_prio_defer_cnt),
533 accum_tx->bt_prio_defer_cnt,
534 delta_tx->bt_prio_defer_cnt,
535 max_tx->bt_prio_defer_cnt);
536 pos += scnprintf(buf + pos, bufsz - pos,
537 fmt_table, "bt_prio_kill_cnt:",
538 le32_to_cpu(tx->bt_prio_kill_cnt),
539 accum_tx->bt_prio_kill_cnt,
540 delta_tx->bt_prio_kill_cnt,
541 max_tx->bt_prio_kill_cnt);
542 pos += scnprintf(buf + pos, bufsz - pos,
543 fmt_table, "few_bytes_cnt:",
544 le32_to_cpu(tx->few_bytes_cnt),
545 accum_tx->few_bytes_cnt,
546 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
547 pos += scnprintf(buf + pos, bufsz - pos,
548 fmt_table, "cts_timeout:",
549 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
550 delta_tx->cts_timeout, max_tx->cts_timeout);
551 pos += scnprintf(buf + pos, bufsz - pos,
552 fmt_table, "ack_timeout:",
553 le32_to_cpu(tx->ack_timeout),
554 accum_tx->ack_timeout,
555 delta_tx->ack_timeout, max_tx->ack_timeout);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 fmt_table, "expected_ack_cnt:",
558 le32_to_cpu(tx->expected_ack_cnt),
559 accum_tx->expected_ack_cnt,
560 delta_tx->expected_ack_cnt,
561 max_tx->expected_ack_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 fmt_table, "actual_ack_cnt:",
564 le32_to_cpu(tx->actual_ack_cnt),
565 accum_tx->actual_ack_cnt,
566 delta_tx->actual_ack_cnt,
567 max_tx->actual_ack_cnt);
568 pos += scnprintf(buf + pos, bufsz - pos,
569 fmt_table, "dump_msdu_cnt:",
570 le32_to_cpu(tx->dump_msdu_cnt),
571 accum_tx->dump_msdu_cnt,
572 delta_tx->dump_msdu_cnt,
573 max_tx->dump_msdu_cnt);
574 pos += scnprintf(buf + pos, bufsz - pos,
575 fmt_table, "abort_nxt_frame_mismatch:",
576 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
577 accum_tx->burst_abort_next_frame_mismatch_cnt,
578 delta_tx->burst_abort_next_frame_mismatch_cnt,
579 max_tx->burst_abort_next_frame_mismatch_cnt);
580 pos += scnprintf(buf + pos, bufsz - pos,
581 fmt_table, "abort_missing_nxt_frame:",
582 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
583 accum_tx->burst_abort_missing_next_frame_cnt,
584 delta_tx->burst_abort_missing_next_frame_cnt,
585 max_tx->burst_abort_missing_next_frame_cnt);
586 pos += scnprintf(buf + pos, bufsz - pos,
587 fmt_table, "cts_timeout_collision:",
588 le32_to_cpu(tx->cts_timeout_collision),
589 accum_tx->cts_timeout_collision,
590 delta_tx->cts_timeout_collision,
591 max_tx->cts_timeout_collision);
592 pos += scnprintf(buf + pos, bufsz - pos,
593 fmt_table, "ack_ba_timeout_collision:",
594 le32_to_cpu(tx->ack_or_ba_timeout_collision),
595 accum_tx->ack_or_ba_timeout_collision,
596 delta_tx->ack_or_ba_timeout_collision,
597 max_tx->ack_or_ba_timeout_collision);
598 pos += scnprintf(buf + pos, bufsz - pos,
599 fmt_table, "agg ba_timeout:",
600 le32_to_cpu(tx->agg.ba_timeout),
601 accum_tx->agg.ba_timeout,
602 delta_tx->agg.ba_timeout,
603 max_tx->agg.ba_timeout);
604 pos += scnprintf(buf + pos, bufsz - pos,
605 fmt_table, "agg ba_resched_frames:",
606 le32_to_cpu(tx->agg.ba_reschedule_frames),
607 accum_tx->agg.ba_reschedule_frames,
608 delta_tx->agg.ba_reschedule_frames,
609 max_tx->agg.ba_reschedule_frames);
610 pos += scnprintf(buf + pos, bufsz - pos,
611 fmt_table, "agg scd_query_agg_frame:",
612 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
613 accum_tx->agg.scd_query_agg_frame_cnt,
614 delta_tx->agg.scd_query_agg_frame_cnt,
615 max_tx->agg.scd_query_agg_frame_cnt);
616 pos += scnprintf(buf + pos, bufsz - pos,
617 fmt_table, "agg scd_query_no_agg:",
618 le32_to_cpu(tx->agg.scd_query_no_agg),
619 accum_tx->agg.scd_query_no_agg,
620 delta_tx->agg.scd_query_no_agg,
621 max_tx->agg.scd_query_no_agg);
622 pos += scnprintf(buf + pos, bufsz - pos,
623 fmt_table, "agg scd_query_agg:",
624 le32_to_cpu(tx->agg.scd_query_agg),
625 accum_tx->agg.scd_query_agg,
626 delta_tx->agg.scd_query_agg,
627 max_tx->agg.scd_query_agg);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 fmt_table, "agg scd_query_mismatch:",
630 le32_to_cpu(tx->agg.scd_query_mismatch),
631 accum_tx->agg.scd_query_mismatch,
632 delta_tx->agg.scd_query_mismatch,
633 max_tx->agg.scd_query_mismatch);
634 pos += scnprintf(buf + pos, bufsz - pos,
635 fmt_table, "agg frame_not_ready:",
636 le32_to_cpu(tx->agg.frame_not_ready),
637 accum_tx->agg.frame_not_ready,
638 delta_tx->agg.frame_not_ready,
639 max_tx->agg.frame_not_ready);
640 pos += scnprintf(buf + pos, bufsz - pos,
641 fmt_table, "agg underrun:",
642 le32_to_cpu(tx->agg.underrun),
643 accum_tx->agg.underrun,
644 delta_tx->agg.underrun, max_tx->agg.underrun);
645 pos += scnprintf(buf + pos, bufsz - pos,
646 fmt_table, "agg bt_prio_kill:",
647 le32_to_cpu(tx->agg.bt_prio_kill),
648 accum_tx->agg.bt_prio_kill,
649 delta_tx->agg.bt_prio_kill,
650 max_tx->agg.bt_prio_kill);
651 pos += scnprintf(buf + pos, bufsz - pos,
652 fmt_table, "agg rx_ba_rsp_cnt:",
653 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
654 accum_tx->agg.rx_ba_rsp_cnt,
655 delta_tx->agg.rx_ba_rsp_cnt,
656 max_tx->agg.rx_ba_rsp_cnt);
657
658 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
659 kfree(buf);
660 return ret;
661}
662
663ssize_t
664iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
665 size_t count, loff_t *ppos)
666{
667 struct iwl_priv *priv = file->private_data;
668 int pos = 0;
669 char *buf;
670 int bufsz = sizeof(struct statistics_general) * 10 + 300;
671 ssize_t ret;
672 struct statistics_general_common *general, *accum_general;
673 struct statistics_general_common *delta_general, *max_general;
674 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
675 struct statistics_div *div, *accum_div, *delta_div, *max_div;
676
677 if (!iwl_legacy_is_alive(priv))
678 return -EAGAIN;
679
680 buf = kzalloc(bufsz, GFP_KERNEL);
681 if (!buf) {
682 IWL_ERR(priv, "Can not allocate Buffer\n");
683 return -ENOMEM;
684 }
685
686 /* the statistic information display here is based on
687 * the last statistics notification from uCode
688 * might not reflect the current uCode activity
689 */
690 general = &priv->_4965.statistics.general.common;
691 dbg = &priv->_4965.statistics.general.common.dbg;
692 div = &priv->_4965.statistics.general.common.div;
693 accum_general = &priv->_4965.accum_statistics.general.common;
694 accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
695 accum_div = &priv->_4965.accum_statistics.general.common.div;
696 delta_general = &priv->_4965.delta_statistics.general.common;
697 max_general = &priv->_4965.max_delta.general.common;
698 delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
699 max_dbg = &priv->_4965.max_delta.general.common.dbg;
700 delta_div = &priv->_4965.delta_statistics.general.common.div;
701 max_div = &priv->_4965.max_delta.general.common.div;
702
703 pos += iwl4965_statistics_flag(priv, buf, bufsz);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 fmt_header, "Statistics_General:");
706 pos += scnprintf(buf + pos, bufsz - pos,
707 fmt_value, "temperature:",
708 le32_to_cpu(general->temperature));
709 pos += scnprintf(buf + pos, bufsz - pos,
710 fmt_value, "ttl_timestamp:",
711 le32_to_cpu(general->ttl_timestamp));
712 pos += scnprintf(buf + pos, bufsz - pos,
713 fmt_table, "burst_check:",
714 le32_to_cpu(dbg->burst_check),
715 accum_dbg->burst_check,
716 delta_dbg->burst_check, max_dbg->burst_check);
717 pos += scnprintf(buf + pos, bufsz - pos,
718 fmt_table, "burst_count:",
719 le32_to_cpu(dbg->burst_count),
720 accum_dbg->burst_count,
721 delta_dbg->burst_count, max_dbg->burst_count);
722 pos += scnprintf(buf + pos, bufsz - pos,
723 fmt_table, "wait_for_silence_timeout_count:",
724 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
725 accum_dbg->wait_for_silence_timeout_cnt,
726 delta_dbg->wait_for_silence_timeout_cnt,
727 max_dbg->wait_for_silence_timeout_cnt);
728 pos += scnprintf(buf + pos, bufsz - pos,
729 fmt_table, "sleep_time:",
730 le32_to_cpu(general->sleep_time),
731 accum_general->sleep_time,
732 delta_general->sleep_time, max_general->sleep_time);
733 pos += scnprintf(buf + pos, bufsz - pos,
734 fmt_table, "slots_out:",
735 le32_to_cpu(general->slots_out),
736 accum_general->slots_out,
737 delta_general->slots_out, max_general->slots_out);
738 pos += scnprintf(buf + pos, bufsz - pos,
739 fmt_table, "slots_idle:",
740 le32_to_cpu(general->slots_idle),
741 accum_general->slots_idle,
742 delta_general->slots_idle, max_general->slots_idle);
743 pos += scnprintf(buf + pos, bufsz - pos,
744 fmt_table, "tx_on_a:",
745 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
746 delta_div->tx_on_a, max_div->tx_on_a);
747 pos += scnprintf(buf + pos, bufsz - pos,
748 fmt_table, "tx_on_b:",
749 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
750 delta_div->tx_on_b, max_div->tx_on_b);
751 pos += scnprintf(buf + pos, bufsz - pos,
752 fmt_table, "exec_time:",
753 le32_to_cpu(div->exec_time), accum_div->exec_time,
754 delta_div->exec_time, max_div->exec_time);
755 pos += scnprintf(buf + pos, bufsz - pos,
756 fmt_table, "probe_time:",
757 le32_to_cpu(div->probe_time), accum_div->probe_time,
758 delta_div->probe_time, max_div->probe_time);
759 pos += scnprintf(buf + pos, bufsz - pos,
760 fmt_table, "rx_enable_counter:",
761 le32_to_cpu(general->rx_enable_counter),
762 accum_general->rx_enable_counter,
763 delta_general->rx_enable_counter,
764 max_general->rx_enable_counter);
765 pos += scnprintf(buf + pos, bufsz - pos,
766 fmt_table, "num_of_sos_states:",
767 le32_to_cpu(general->num_of_sos_states),
768 accum_general->num_of_sos_states,
769 delta_general->num_of_sos_states,
770 max_general->num_of_sos_states);
771 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
772 kfree(buf);
773 return ret;
774}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e35361a9e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
34ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl4965_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count, loff_t *ppos);
40#else
41static ssize_t
42iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
43 size_t count, loff_t *ppos)
44{
45 return 0;
46}
47static ssize_t
48iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
49 size_t count, loff_t *ppos)
50{
51 return 0;
52}
53static ssize_t
54iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
55 size_t count, loff_t *ppos)
56{
57 return 0;
58}
59#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab1ff7d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-4965.h"
76#include "iwl-io.h"
77
78/******************************************************************************
79 *
80 * EEPROM related functions
81 *
82******************************************************************************/
83
84/*
85 * The device's EEPROM semaphore prevents conflicts between driver and uCode
86 * when accessing the EEPROM; each access is a series of pulses to/from the
87 * EEPROM chip, not a single event, so even reads could conflict if they
88 * weren't arbitrated by the semaphore.
89 */
90int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
91{
92 u16 count;
93 int ret;
94
95 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
96 /* Request semaphore */
97 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
98 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
99
100 /* See if we got it */
101 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
102 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
103 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
104 EEPROM_SEM_TIMEOUT);
105 if (ret >= 0) {
106 IWL_DEBUG_IO(priv,
107 "Acquired semaphore after %d tries.\n",
108 count+1);
109 return ret;
110 }
111 }
112
113 return ret;
114}
115
116void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
117{
118 iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
119 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
120
121}
122
123int iwl4965_eeprom_check_version(struct iwl_priv *priv)
124{
125 u16 eeprom_ver;
126 u16 calib_ver;
127
128 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
129 calib_ver = iwl_legacy_eeprom_query16(priv,
130 EEPROM_4965_CALIB_VERSION_OFFSET);
131
132 if (eeprom_ver < priv->cfg->eeprom_ver ||
133 calib_ver < priv->cfg->eeprom_calib_ver)
134 goto err;
135
136 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
137 eeprom_ver, calib_ver);
138
139 return 0;
140err:
141 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
142 "CALIB=0x%x < 0x%x\n",
143 eeprom_ver, priv->cfg->eeprom_ver,
144 calib_ver, priv->cfg->eeprom_calib_ver);
145 return -EINVAL;
146
147}
148
149void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
150{
151 const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
152 EEPROM_MAC_ADDRESS);
153 memcpy(mac, addr, ETH_ALEN);
154}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa2886d9c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
65 * Use iwl-commands.h for uCode API definitions.
66 * Use iwl-dev.h for driver implementation definitions.
67 */
68
69#ifndef __iwl_4965_hw_h__
70#define __iwl_4965_hw_h__
71
72#include "iwl-fh.h"
73
74/* EEPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024
76
77/*
78 * uCode queue management definitions ...
79 * The first queue used for block-ack aggregation is #7 (4965 only).
80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7
83
84/* Sizes and addresses for instruction and data memory (SRAM) in
85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
87#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
88
89#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
90#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
91
92#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
93 IWL49_RTC_INST_LOWER_BOUND)
94#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
95 IWL49_RTC_DATA_LOWER_BOUND)
96
97#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
98#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
99
100/* Size of uCode instruction memory in bootstrap state machine */
101#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
102
103static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
104{
105 return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
106 (addr < IWL49_RTC_DATA_UPPER_BOUND);
107}
108
109/********************* START TEMPERATURE *************************************/
110
111/**
112 * 4965 temperature calculation.
113 *
114 * The driver must calculate the device temperature before calculating
115 * a txpower setting (amplifier gain is temperature dependent). The
116 * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
117 * values used for the life of the driver, and one of which (R4) is the
118 * real-time temperature indicator.
119 *
120 * uCode provides all 4 values to the driver via the "initialize alive"
121 * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
122 * image loads, uCode updates the R4 value via statistics notifications
123 * (see STATISTICS_NOTIFICATION), which occur after each received beacon
124 * when associated, or can be requested via REPLY_STATISTICS_CMD.
125 *
126 * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
127 * must sign-extend to 32 bits before applying formula below.
128 *
129 * Formula:
130 *
131 * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
132 *
133 * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
134 * an additional correction, which should be centered around 0 degrees
135 * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
136 * centering the 97/100 correction around 0 degrees K.
137 *
138 * Add 273 to Kelvin value to find degrees Celsius, for comparing current
139 * temperature with factory-measured temperatures when calculating txpower
140 * settings.
141 */
142#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
143#define TEMPERATURE_CALIB_A_VAL 259
144
145/* Limit range of calculated temperature to be between these Kelvin values */
146#define IWL_TX_POWER_TEMPERATURE_MIN (263)
147#define IWL_TX_POWER_TEMPERATURE_MAX (410)
148
149#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
150 (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
151 ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
152
153/********************* END TEMPERATURE ***************************************/
154
155/********************* START TXPOWER *****************************************/
156
157/**
158 * 4965 txpower calculations rely on information from three sources:
159 *
160 * 1) EEPROM
161 * 2) "initialize" alive notification
162 * 3) statistics notifications
163 *
164 * EEPROM data consists of:
165 *
166 * 1) Regulatory information (max txpower and channel usage flags) is provided
167 * separately for each channel that can possibly supported by 4965.
168 * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
169 * (legacy) channels.
170 *
171 * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
172 * for locations in EEPROM.
173 *
174 * 2) Factory txpower calibration information is provided separately for
175 * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
176 * but 5 GHz has several sub-bands.
177 *
178 * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
179 *
180 * See struct iwl4965_eeprom_calib_info (and the tree of structures
181 * contained within it) for format, and struct iwl4965_eeprom for
182 * locations in EEPROM.
183 *
184 * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
185 * consists of:
186 *
187 * 1) Temperature calculation parameters.
188 *
189 * 2) Power supply voltage measurement.
190 *
191 * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
192 *
193 * Statistics notifications deliver:
194 *
195 * 1) Current values for temperature param R4.
196 */
197
198/**
199 * To calculate a txpower setting for a given desired target txpower, channel,
200 * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
201 * support MIMO and transmit diversity), driver must do the following:
202 *
203 * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
204 * Do not exceed regulatory limit; reduce target txpower if necessary.
205 *
206 * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
207 * 2 transmitters will be used simultaneously; driver must reduce the
208 * regulatory limit by 3 dB (half-power) for each transmitter, so the
209 * combined total output of the 2 transmitters is within regulatory limits.
210 *
211 *
212 * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
213 * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
214 * reduce target txpower if necessary.
215 *
216 * Backoff values below are in 1/2 dB units (equivalent to steps in
217 * txpower gain tables):
218 *
219 * OFDM 6 - 36 MBit: 10 steps (5 dB)
220 * OFDM 48 MBit: 15 steps (7.5 dB)
221 * OFDM 54 MBit: 17 steps (8.5 dB)
222 * OFDM 60 MBit: 20 steps (10 dB)
223 * CCK all rates: 10 steps (5 dB)
224 *
225 * Backoff values apply to saturation txpower on a per-transmitter basis;
226 * when using MIMO (2 transmitters), each transmitter uses the same
227 * saturation level provided in EEPROM, and the same backoff values;
228 * no reduction (such as with regulatory txpower limits) is required.
229 *
230 * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
231 * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
232 * factory measurement for ht40 channels.
233 *
234 * The result of this step is the final target txpower. The rest of
235 * the steps figure out the proper settings for the device to achieve
236 * that target txpower.
237 *
238 *
239 * 3) Determine (EEPROM) calibration sub band for the target channel, by
240 * comparing against first and last channels in each sub band
241 * (see struct iwl4965_eeprom_calib_subband_info).
242 *
243 *
244 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
245 * referencing the 2 factory-measured (sample) channels within the sub band.
246 *
247 * Interpolation is based on difference between target channel's frequency
248 * and the sample channels' frequencies. Since channel numbers are based
249 * on frequency (5 MHz between each channel number), this is equivalent
250 * to interpolating based on channel number differences.
251 *
252 * Note that the sample channels may or may not be the channels at the
253 * edges of the sub band. The target channel may be "outside" of the
254 * span of the sampled channels.
255 *
256 * Driver may choose the pair (for 2 Tx chains) of measurements (see
257 * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
258 * txpower comes closest to the desired txpower. Usually, though,
259 * the middle set of measurements is closest to the regulatory limits,
260 * and is therefore a good choice for all txpower calculations (this
261 * assumes that high accuracy is needed for maximizing legal txpower,
262 * while lower txpower configurations do not need as much accuracy).
263 *
264 * Driver should interpolate both members of the chosen measurement pair,
265 * i.e. for both Tx chains (radio transmitters), unless the driver knows
266 * that only one of the chains will be used (e.g. only one tx antenna
267 * connected, but this should be unusual). The rate scaling algorithm
268 * switches antennas to find best performance, so both Tx chains will
269 * be used (although only one at a time) even for non-MIMO transmissions.
270 *
271 * Driver should interpolate factory values for temperature, gain table
272 * index, and actual power. The power amplifier detector values are
273 * not used by the driver.
274 *
275 * Sanity check: If the target channel happens to be one of the sample
276 * channels, the results should agree with the sample channel's
277 * measurements!
278 *
279 *
280 * 5) Find difference between desired txpower and (interpolated)
281 * factory-measured txpower. Using (interpolated) factory gain table index
282 * (shown elsewhere) as a starting point, adjust this index lower to
283 * increase txpower, or higher to decrease txpower, until the target
284 * txpower is reached. Each step in the gain table is 1/2 dB.
285 *
286 * For example, if factory measured txpower is 16 dBm, and target txpower
287 * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
288 * by 3 dB.
289 *
290 *
291 * 6) Find difference between current device temperature and (interpolated)
292 * factory-measured temperature for sub-band. Factory values are in
293 * degrees Celsius. To calculate current temperature, see comments for
294 * "4965 temperature calculation".
295 *
296 * If current temperature is higher than factory temperature, driver must
297 * increase gain (lower gain table index), and vice verse.
298 *
299 * Temperature affects gain differently for different channels:
300 *
301 * 2.4 GHz all channels: 3.5 degrees per half-dB step
302 * 5 GHz channels 34-43: 4.5 degrees per half-dB step
303 * 5 GHz channels >= 44: 4.0 degrees per half-dB step
304 *
305 * NOTE: Temperature can increase rapidly when transmitting, especially
306 * with heavy traffic at high txpowers. Driver should update
307 * temperature calculations often under these conditions to
308 * maintain strong txpower in the face of rising temperature.
309 *
310 *
311 * 7) Find difference between current power supply voltage indicator
312 * (from "initialize alive") and factory-measured power supply voltage
313 * indicator (EEPROM).
314 *
315 * If the current voltage is higher (indicator is lower) than factory
316 * voltage, gain should be reduced (gain table index increased) by:
317 *
318 * (eeprom - current) / 7
319 *
320 * If the current voltage is lower (indicator is higher) than factory
321 * voltage, gain should be increased (gain table index decreased) by:
322 *
323 * 2 * (current - eeprom) / 7
324 *
325 * If number of index steps in either direction turns out to be > 2,
326 * something is wrong ... just use 0.
327 *
328 * NOTE: Voltage compensation is independent of band/channel.
329 *
330 * NOTE: "Initialize" uCode measures current voltage, which is assumed
331 * to be constant after this initial measurement. Voltage
332 * compensation for txpower (number of steps in gain table)
333 * may be calculated once and used until the next uCode bootload.
334 *
335 *
336 * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
337 * adjust txpower for each transmitter chain, so txpower is balanced
338 * between the two chains. There are 5 pairs of tx_atten[group][chain]
339 * values in "initialize alive", one pair for each of 5 channel ranges:
340 *
341 * Group 0: 5 GHz channel 34-43
342 * Group 1: 5 GHz channel 44-70
343 * Group 2: 5 GHz channel 71-124
344 * Group 3: 5 GHz channel 125-200
345 * Group 4: 2.4 GHz all channels
346 *
347 * Add the tx_atten[group][chain] value to the index for the target chain.
348 * The values are signed, but are in pairs of 0 and a non-negative number,
349 * so as to reduce gain (if necessary) of the "hotter" channel. This
350 * avoids any need to double-check for regulatory compliance after
351 * this step.
352 *
353 *
354 * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
355 * value to the index:
356 *
357 * Hardware rev B: 9 steps (4.5 dB)
358 * Hardware rev C: 5 steps (2.5 dB)
359 *
360 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
361 * bits [3:2], 1 = B, 2 = C.
362 *
363 * NOTE: This compensation is in addition to any saturation backoff that
364 * might have been applied in an earlier step.
365 *
366 *
367 * 10) Select the gain table, based on band (2.4 vs 5 GHz).
368 *
369 * Limit the adjusted index to stay within the table!
370 *
371 *
372 * 11) Read gain table entries for DSP and radio gain, place into appropriate
373 * location(s) in command (struct iwl4965_txpowertable_cmd).
374 */
375
376/**
377 * When MIMO is used (2 transmitters operating simultaneously), driver should
378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
379 * for the device. That is, use half power for each transmitter, so total
380 * txpower is within regulatory limits.
381 *
382 * The value "6" represents number of steps in gain table to reduce power 3 dB.
383 * Each step is 1/2 dB.
384 */
385#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
386
387/**
388 * CCK gain compensation.
389 *
390 * When calculating txpowers for CCK, after making sure that the target power
391 * is within regulatory and saturation limits, driver must additionally
392 * back off gain by adding these values to the gain table index.
393 *
394 * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
395 * bits [3:2], 1 = B, 2 = C.
396 */
397#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
398#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
399
400/*
401 * 4965 power supply voltage compensation for txpower
402 */
403#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
404
405/**
406 * Gain tables.
407 *
408 * The following tables contain pair of values for setting txpower, i.e.
409 * gain settings for the output of the device's digital signal processor (DSP),
410 * and for the analog gain structure of the transmitter.
411 *
412 * Each entry in the gain tables represents a step of 1/2 dB. Note that these
413 * are *relative* steps, not indications of absolute output power. Output
414 * power varies with temperature, voltage, and channel frequency, and also
415 * requires consideration of average power (to satisfy regulatory constraints),
416 * and peak power (to avoid distortion of the output signal).
417 *
418 * Each entry contains two values:
419 * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
420 * linear value that multiplies the output of the digital signal processor,
421 * before being sent to the analog radio.
422 * 2) Radio gain. This sets the analog gain of the radio Tx path.
423 * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
424 *
425 * EEPROM contains factory calibration data for txpower. This maps actual
426 * measured txpower levels to gain settings in the "well known" tables
427 * below ("well-known" means here that both factory calibration *and* the
428 * driver work with the same table).
429 *
430 * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
431 * has an extension (into negative indexes), in case the driver needs to
432 * boost power setting for high device temperatures (higher than would be
433 * present during factory calibration). A 5 Ghz EEPROM index of "40"
434 * corresponds to the 49th entry in the table used by the driver.
435 */
436#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
437#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
438
439/**
440 * 2.4 GHz gain table
441 *
442 * Index Dsp gain Radio gain
443 * 0 110 0x3f (highest gain)
444 * 1 104 0x3f
445 * 2 98 0x3f
446 * 3 110 0x3e
447 * 4 104 0x3e
448 * 5 98 0x3e
449 * 6 110 0x3d
450 * 7 104 0x3d
451 * 8 98 0x3d
452 * 9 110 0x3c
453 * 10 104 0x3c
454 * 11 98 0x3c
455 * 12 110 0x3b
456 * 13 104 0x3b
457 * 14 98 0x3b
458 * 15 110 0x3a
459 * 16 104 0x3a
460 * 17 98 0x3a
461 * 18 110 0x39
462 * 19 104 0x39
463 * 20 98 0x39
464 * 21 110 0x38
465 * 22 104 0x38
466 * 23 98 0x38
467 * 24 110 0x37
468 * 25 104 0x37
469 * 26 98 0x37
470 * 27 110 0x36
471 * 28 104 0x36
472 * 29 98 0x36
473 * 30 110 0x35
474 * 31 104 0x35
475 * 32 98 0x35
476 * 33 110 0x34
477 * 34 104 0x34
478 * 35 98 0x34
479 * 36 110 0x33
480 * 37 104 0x33
481 * 38 98 0x33
482 * 39 110 0x32
483 * 40 104 0x32
484 * 41 98 0x32
485 * 42 110 0x31
486 * 43 104 0x31
487 * 44 98 0x31
488 * 45 110 0x30
489 * 46 104 0x30
490 * 47 98 0x30
491 * 48 110 0x6
492 * 49 104 0x6
493 * 50 98 0x6
494 * 51 110 0x5
495 * 52 104 0x5
496 * 53 98 0x5
497 * 54 110 0x4
498 * 55 104 0x4
499 * 56 98 0x4
500 * 57 110 0x3
501 * 58 104 0x3
502 * 59 98 0x3
503 * 60 110 0x2
504 * 61 104 0x2
505 * 62 98 0x2
506 * 63 110 0x1
507 * 64 104 0x1
508 * 65 98 0x1
509 * 66 110 0x0
510 * 67 104 0x0
511 * 68 98 0x0
512 * 69 97 0
513 * 70 96 0
514 * 71 95 0
515 * 72 94 0
516 * 73 93 0
517 * 74 92 0
518 * 75 91 0
519 * 76 90 0
520 * 77 89 0
521 * 78 88 0
522 * 79 87 0
523 * 80 86 0
524 * 81 85 0
525 * 82 84 0
526 * 83 83 0
527 * 84 82 0
528 * 85 81 0
529 * 86 80 0
530 * 87 79 0
531 * 88 78 0
532 * 89 77 0
533 * 90 76 0
534 * 91 75 0
535 * 92 74 0
536 * 93 73 0
537 * 94 72 0
538 * 95 71 0
539 * 96 70 0
540 * 97 69 0
541 * 98 68 0
542 */
543
544/**
545 * 5 GHz gain table
546 *
547 * Index Dsp gain Radio gain
548 * -9 123 0x3F (highest gain)
549 * -8 117 0x3F
550 * -7 110 0x3F
551 * -6 104 0x3F
552 * -5 98 0x3F
553 * -4 110 0x3E
554 * -3 104 0x3E
555 * -2 98 0x3E
556 * -1 110 0x3D
557 * 0 104 0x3D
558 * 1 98 0x3D
559 * 2 110 0x3C
560 * 3 104 0x3C
561 * 4 98 0x3C
562 * 5 110 0x3B
563 * 6 104 0x3B
564 * 7 98 0x3B
565 * 8 110 0x3A
566 * 9 104 0x3A
567 * 10 98 0x3A
568 * 11 110 0x39
569 * 12 104 0x39
570 * 13 98 0x39
571 * 14 110 0x38
572 * 15 104 0x38
573 * 16 98 0x38
574 * 17 110 0x37
575 * 18 104 0x37
576 * 19 98 0x37
577 * 20 110 0x36
578 * 21 104 0x36
579 * 22 98 0x36
580 * 23 110 0x35
581 * 24 104 0x35
582 * 25 98 0x35
583 * 26 110 0x34
584 * 27 104 0x34
585 * 28 98 0x34
586 * 29 110 0x33
587 * 30 104 0x33
588 * 31 98 0x33
589 * 32 110 0x32
590 * 33 104 0x32
591 * 34 98 0x32
592 * 35 110 0x31
593 * 36 104 0x31
594 * 37 98 0x31
595 * 38 110 0x30
596 * 39 104 0x30
597 * 40 98 0x30
598 * 41 110 0x25
599 * 42 104 0x25
600 * 43 98 0x25
601 * 44 110 0x24
602 * 45 104 0x24
603 * 46 98 0x24
604 * 47 110 0x23
605 * 48 104 0x23
606 * 49 98 0x23
607 * 50 110 0x22
608 * 51 104 0x18
609 * 52 98 0x18
610 * 53 110 0x17
611 * 54 104 0x17
612 * 55 98 0x17
613 * 56 110 0x16
614 * 57 104 0x16
615 * 58 98 0x16
616 * 59 110 0x15
617 * 60 104 0x15
618 * 61 98 0x15
619 * 62 110 0x14
620 * 63 104 0x14
621 * 64 98 0x14
622 * 65 110 0x13
623 * 66 104 0x13
624 * 67 98 0x13
625 * 68 110 0x12
626 * 69 104 0x08
627 * 70 98 0x08
628 * 71 110 0x07
629 * 72 104 0x07
630 * 73 98 0x07
631 * 74 110 0x06
632 * 75 104 0x06
633 * 76 98 0x06
634 * 77 110 0x05
635 * 78 104 0x05
636 * 79 98 0x05
637 * 80 110 0x04
638 * 81 104 0x04
639 * 82 98 0x04
640 * 83 110 0x03
641 * 84 104 0x03
642 * 85 98 0x03
643 * 86 110 0x02
644 * 87 104 0x02
645 * 88 98 0x02
646 * 89 110 0x01
647 * 90 104 0x01
648 * 91 98 0x01
649 * 92 110 0x00
650 * 93 104 0x00
651 * 94 98 0x00
652 * 95 93 0x00
653 * 96 88 0x00
654 * 97 83 0x00
655 * 98 78 0x00
656 */
657
658
659/**
660 * Sanity checks and default values for EEPROM regulatory levels.
661 * If EEPROM values fall outside MIN/MAX range, use default values.
662 *
663 * Regulatory limits refer to the maximum average txpower allowed by
664 * regulatory agencies in the geographies in which the device is meant
665 * to be operated. These limits are SKU-specific (i.e. geography-specific),
666 * and channel-specific; each channel has an individual regulatory limit
667 * listed in the EEPROM.
668 *
669 * Units are in half-dBm (i.e. "34" means 17 dBm).
670 */
671#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
672#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
673#define IWL_TX_POWER_REGULATORY_MIN (0)
674#define IWL_TX_POWER_REGULATORY_MAX (34)
675
676/**
677 * Sanity checks and default values for EEPROM saturation levels.
678 * If EEPROM values fall outside MIN/MAX range, use default values.
679 *
680 * Saturation is the highest level that the output power amplifier can produce
681 * without significant clipping distortion. This is a "peak" power level.
682 * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
683 * require differing amounts of backoff, relative to their average power output,
684 * in order to avoid clipping distortion.
685 *
686 * Driver must make sure that it is violating neither the saturation limit,
687 * nor the regulatory limit, when calculating Tx power settings for various
688 * rates.
689 *
690 * Units are in half-dBm (i.e. "38" means 19 dBm).
691 */
692#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
693#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
694#define IWL_TX_POWER_SATURATION_MIN (20)
695#define IWL_TX_POWER_SATURATION_MAX (50)
696
697/**
698 * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
699 * and thermal Txpower calibration.
700 *
701 * When calculating txpower, driver must compensate for current device
702 * temperature; higher temperature requires higher gain. Driver must calculate
703 * current temperature (see "4965 temperature calculation"), then compare vs.
704 * factory calibration temperature in EEPROM; if current temperature is higher
705 * than factory temperature, driver must *increase* gain by proportions shown
706 * in table below. If current temperature is lower than factory, driver must
707 * *decrease* gain.
708 *
709 * Different frequency ranges require different compensation, as shown below.
710 */
711/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
712#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
713#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
714
715/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
716#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
717#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
718
719/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
720#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
721#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
722
723/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
724#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
725#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
726
727/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
728#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
729#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
730
731enum {
732 CALIB_CH_GROUP_1 = 0,
733 CALIB_CH_GROUP_2 = 1,
734 CALIB_CH_GROUP_3 = 2,
735 CALIB_CH_GROUP_4 = 3,
736 CALIB_CH_GROUP_5 = 4,
737 CALIB_CH_GROUP_MAX
738};
739
740/********************* END TXPOWER *****************************************/
741
742
743/**
744 * Tx/Rx Queues
745 *
746 * Most communication between driver and 4965 is via queues of data buffers.
747 * For example, all commands that the driver issues to device's embedded
748 * controller (uCode) are via the command queue (one of the Tx queues). All
749 * uCode command responses/replies/notifications, including Rx frames, are
750 * conveyed from uCode to driver via the Rx queue.
751 *
752 * Most support for these queues, including handshake support, resides in
753 * structures in host DRAM, shared between the driver and the device. When
754 * allocating this memory, the driver must make sure that data written by
755 * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
756 * cache memory), so DRAM and cache are consistent, and the device can
757 * immediately see changes made by the driver.
758 *
759 * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
760 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
761 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
762 */
763#define IWL49_NUM_FIFOS 7
764#define IWL49_CMD_FIFO_NUM 4
765#define IWL49_NUM_QUEUES 16
766#define IWL49_NUM_AMPDU_QUEUES 8
767
768
769/**
770 * struct iwl4965_schedq_bc_tbl
771 *
772 * Byte Count table
773 *
774 * Each Tx queue uses a byte-count table containing 320 entries:
775 * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
776 * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
777 * max Tx window is 64 TFDs).
778 *
779 * When driver sets up a new TFD, it must also enter the total byte count
780 * of the frame to be transmitted into the corresponding entry in the byte
781 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
782 * must duplicate the byte count entry in corresponding index 256-319.
783 *
784 * padding puts each byte count table on a 1024-byte boundary;
785 * 4965 assumes tables are separated by 1024 bytes.
786 */
787struct iwl4965_scd_bc_tbl {
788 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
789 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
790} __packed;
791
792
793#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
794
795/* RSSI to dBm */
796#define IWL4965_RSSI_OFFSET 44
797
798/* PCI registers */
799#define PCI_CFG_RETRY_TIMEOUT 0x041
800
801/* PCI register values */
802#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
803#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
804
805#define IWL4965_DEFAULT_TX_RETRY 15
806
807/* EEPROM */
808#define IWL4965_FIRST_AMPDU_QUEUE 10
809
810
811#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdcaee62..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <net/mac80211.h>
36#include <linux/etherdevice.h>
37#include <asm/unaligned.h>
38
39#include "iwl-commands.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-4965-led.h"
44
45/* Send led command */
46static int
47iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
48{
49 struct iwl_host_cmd cmd = {
50 .id = REPLY_LEDS_CMD,
51 .len = sizeof(struct iwl_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56 u32 reg;
57
58 reg = iwl_read32(priv, CSR_LED_REG);
59 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
60 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
61
62 return iwl_legacy_send_cmd(priv, &cmd);
63}
64
65/* Set led register off */
66void iwl4965_led_enable(struct iwl_priv *priv)
67{
68 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
69}
70
71const struct iwl_led_ops iwl4965_led_ops = {
72 .cmd = iwl4965_send_led_cmd,
73};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615fc338..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_4965_led_h__
28#define __iwl_4965_led_h__
29
30extern const struct iwl_led_ops iwl4965_led_ops;
31void iwl4965_led_enable(struct iwl_priv *priv);
32
33#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e3b019..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-sta.h"
42
43void iwl4965_check_abort_status(struct iwl_priv *priv,
44 u8 frame_count, u32 status)
45{
46 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
47 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
48 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
49 queue_work(priv->workqueue, &priv->tx_flush);
50 }
51}
52
53/*
54 * EEPROM
55 */
56struct iwl_mod_params iwl4965_mod_params = {
57 .amsdu_size_8K = 1,
58 .restart_fw = 1,
59 /* the rest are 0 by default */
60};
61
62void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
63{
64 unsigned long flags;
65 int i;
66 spin_lock_irqsave(&rxq->lock, flags);
67 INIT_LIST_HEAD(&rxq->rx_free);
68 INIT_LIST_HEAD(&rxq->rx_used);
69 /* Fill the rx_used queue with _all_ of the Rx buffers */
70 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
71 /* In the reset function, these buffers may have been allocated
72 * to an SKB, so we need to unmap and free potential storage */
73 if (rxq->pool[i].page != NULL) {
74 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
75 PAGE_SIZE << priv->hw_params.rx_page_order,
76 PCI_DMA_FROMDEVICE);
77 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
78 rxq->pool[i].page = NULL;
79 }
80 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
81 }
82
83 for (i = 0; i < RX_QUEUE_SIZE; i++)
84 rxq->queue[i] = NULL;
85
86 /* Set us so that we have processed and used all buffers, but have
87 * not restocked the Rx queue with fresh buffers */
88 rxq->read = rxq->write = 0;
89 rxq->write_actual = 0;
90 rxq->free_count = 0;
91 spin_unlock_irqrestore(&rxq->lock, flags);
92}
93
94int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
95{
96 u32 rb_size;
97 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
98 u32 rb_timeout = 0;
99
100 if (priv->cfg->mod_params->amsdu_size_8K)
101 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
102 else
103 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
104
105 /* Stop Rx DMA */
106 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
107
108 /* Reset driver's Rx queue write index */
109 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
110
111 /* Tell device where to find RBD circular buffer in DRAM */
112 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
113 (u32)(rxq->bd_dma >> 8));
114
115 /* Tell device where in DRAM to update its Rx status */
116 iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
117 rxq->rb_stts_dma >> 4);
118
119 /* Enable Rx DMA
120 * Direct rx interrupts to hosts
121 * Rx buffer size 4 or 8k
122 * RB timeout 0x10
123 * 256 RBDs
124 */
125 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
126 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
127 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
128 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
129 rb_size|
130 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
131 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
132
133 /* Set interrupt coalescing timer to default (2048 usecs) */
134 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
135
136 return 0;
137}
138
139static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
140{
141/*
142 * (for documentation purposes)
143 * to set power to V_AUX, do:
144
145 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
146 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
147 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
148 ~APMG_PS_CTRL_MSK_PWR_SRC);
149 */
150
151 iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
152 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
153 ~APMG_PS_CTRL_MSK_PWR_SRC);
154}
155
156int iwl4965_hw_nic_init(struct iwl_priv *priv)
157{
158 unsigned long flags;
159 struct iwl_rx_queue *rxq = &priv->rxq;
160 int ret;
161
162 /* nic_init */
163 spin_lock_irqsave(&priv->lock, flags);
164 priv->cfg->ops->lib->apm_ops.init(priv);
165
166 /* Set interrupt coalescing calibration timer to default (512 usecs) */
167 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
168
169 spin_unlock_irqrestore(&priv->lock, flags);
170
171 iwl4965_set_pwr_vmain(priv);
172
173 priv->cfg->ops->lib->apm_ops.config(priv);
174
175 /* Allocate the RX queue, or reset if it is already allocated */
176 if (!rxq->bd) {
177 ret = iwl_legacy_rx_queue_alloc(priv);
178 if (ret) {
179 IWL_ERR(priv, "Unable to initialize Rx queue\n");
180 return -ENOMEM;
181 }
182 } else
183 iwl4965_rx_queue_reset(priv, rxq);
184
185 iwl4965_rx_replenish(priv);
186
187 iwl4965_rx_init(priv, rxq);
188
189 spin_lock_irqsave(&priv->lock, flags);
190
191 rxq->need_update = 1;
192 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
193
194 spin_unlock_irqrestore(&priv->lock, flags);
195
196 /* Allocate or reset and init all Tx and Command queues */
197 if (!priv->txq) {
198 ret = iwl4965_txq_ctx_alloc(priv);
199 if (ret)
200 return ret;
201 } else
202 iwl4965_txq_ctx_reset(priv);
203
204 set_bit(STATUS_INIT, &priv->status);
205
206 return 0;
207}
208
209/**
210 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
211 */
212static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
213 dma_addr_t dma_addr)
214{
215 return cpu_to_le32((u32)(dma_addr >> 8));
216}
217
218/**
219 * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
220 *
221 * If there are slots in the RX queue that need to be restocked,
222 * and we have free pre-allocated buffers, fill the ranks as much
223 * as we can, pulling from rx_free.
224 *
225 * This moves the 'write' index forward to catch up with 'processed', and
226 * also updates the memory address in the firmware to reference the new
227 * target buffer.
228 */
229void iwl4965_rx_queue_restock(struct iwl_priv *priv)
230{
231 struct iwl_rx_queue *rxq = &priv->rxq;
232 struct list_head *element;
233 struct iwl_rx_mem_buffer *rxb;
234 unsigned long flags;
235
236 spin_lock_irqsave(&rxq->lock, flags);
237 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
238 /* The overwritten rxb must be a used one */
239 rxb = rxq->queue[rxq->write];
240 BUG_ON(rxb && rxb->page);
241
242 /* Get next free Rx buffer, remove from free list */
243 element = rxq->rx_free.next;
244 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
245 list_del(element);
246
247 /* Point to Rx buffer via next RBD in circular buffer */
248 rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
249 rxb->page_dma);
250 rxq->queue[rxq->write] = rxb;
251 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
252 rxq->free_count--;
253 }
254 spin_unlock_irqrestore(&rxq->lock, flags);
255 /* If the pre-allocated buffer pool is dropping low, schedule to
256 * refill it */
257 if (rxq->free_count <= RX_LOW_WATERMARK)
258 queue_work(priv->workqueue, &priv->rx_replenish);
259
260
261 /* If we've added more space for the firmware to place data, tell it.
262 * Increment device's write pointer in multiples of 8. */
263 if (rxq->write_actual != (rxq->write & ~0x7)) {
264 spin_lock_irqsave(&rxq->lock, flags);
265 rxq->need_update = 1;
266 spin_unlock_irqrestore(&rxq->lock, flags);
267 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
268 }
269}
270
271/**
272 * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
273 *
274 * When moving to rx_free an SKB is allocated for the slot.
275 *
276 * Also restock the Rx queue via iwl_rx_queue_restock.
277 * This is called as a scheduled work item (except for during initialization)
278 */
279static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
280{
281 struct iwl_rx_queue *rxq = &priv->rxq;
282 struct list_head *element;
283 struct iwl_rx_mem_buffer *rxb;
284 struct page *page;
285 unsigned long flags;
286 gfp_t gfp_mask = priority;
287
288 while (1) {
289 spin_lock_irqsave(&rxq->lock, flags);
290 if (list_empty(&rxq->rx_used)) {
291 spin_unlock_irqrestore(&rxq->lock, flags);
292 return;
293 }
294 spin_unlock_irqrestore(&rxq->lock, flags);
295
296 if (rxq->free_count > RX_LOW_WATERMARK)
297 gfp_mask |= __GFP_NOWARN;
298
299 if (priv->hw_params.rx_page_order > 0)
300 gfp_mask |= __GFP_COMP;
301
302 /* Alloc a new receive buffer */
303 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
304 if (!page) {
305 if (net_ratelimit())
306 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
307 "order: %d\n",
308 priv->hw_params.rx_page_order);
309
310 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
311 net_ratelimit())
312 IWL_CRIT(priv,
313 "Failed to alloc_pages with %s. "
314 "Only %u free buffers remaining.\n",
315 priority == GFP_ATOMIC ?
316 "GFP_ATOMIC" : "GFP_KERNEL",
317 rxq->free_count);
318 /* We don't reschedule replenish work here -- we will
319 * call the restock method and if it still needs
320 * more buffers it will schedule replenish */
321 return;
322 }
323
324 spin_lock_irqsave(&rxq->lock, flags);
325
326 if (list_empty(&rxq->rx_used)) {
327 spin_unlock_irqrestore(&rxq->lock, flags);
328 __free_pages(page, priv->hw_params.rx_page_order);
329 return;
330 }
331 element = rxq->rx_used.next;
332 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
333 list_del(element);
334
335 spin_unlock_irqrestore(&rxq->lock, flags);
336
337 BUG_ON(rxb->page);
338 rxb->page = page;
339 /* Get physical address of the RB */
340 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
341 PAGE_SIZE << priv->hw_params.rx_page_order,
342 PCI_DMA_FROMDEVICE);
343 /* dma address must be no more than 36 bits */
344 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
345 /* and also 256 byte aligned! */
346 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
347
348 spin_lock_irqsave(&rxq->lock, flags);
349
350 list_add_tail(&rxb->list, &rxq->rx_free);
351 rxq->free_count++;
352 priv->alloc_rxb_page++;
353
354 spin_unlock_irqrestore(&rxq->lock, flags);
355 }
356}
357
358void iwl4965_rx_replenish(struct iwl_priv *priv)
359{
360 unsigned long flags;
361
362 iwl4965_rx_allocate(priv, GFP_KERNEL);
363
364 spin_lock_irqsave(&priv->lock, flags);
365 iwl4965_rx_queue_restock(priv);
366 spin_unlock_irqrestore(&priv->lock, flags);
367}
368
369void iwl4965_rx_replenish_now(struct iwl_priv *priv)
370{
371 iwl4965_rx_allocate(priv, GFP_ATOMIC);
372
373 iwl4965_rx_queue_restock(priv);
374}
375
376/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
377 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
378 * This free routine walks the list of POOL entries and if SKB is set to
379 * non NULL it is unmapped and freed
380 */
381void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
382{
383 int i;
384 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
385 if (rxq->pool[i].page != NULL) {
386 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
387 PAGE_SIZE << priv->hw_params.rx_page_order,
388 PCI_DMA_FROMDEVICE);
389 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
390 rxq->pool[i].page = NULL;
391 }
392 }
393
394 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
395 rxq->bd_dma);
396 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
397 rxq->rb_stts, rxq->rb_stts_dma);
398 rxq->bd = NULL;
399 rxq->rb_stts = NULL;
400}
401
402int iwl4965_rxq_stop(struct iwl_priv *priv)
403{
404
405 /* stop Rx DMA */
406 iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
407 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
408 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
409
410 return 0;
411}
412
413int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
414{
415 int idx = 0;
416 int band_offset = 0;
417
418 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
419 if (rate_n_flags & RATE_MCS_HT_MSK) {
420 idx = (rate_n_flags & 0xff);
421 return idx;
422 /* Legacy rate format, search for match in table */
423 } else {
424 if (band == IEEE80211_BAND_5GHZ)
425 band_offset = IWL_FIRST_OFDM_RATE;
426 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
427 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
428 return idx - band_offset;
429 }
430
431 return -1;
432}
433
434static int iwl4965_calc_rssi(struct iwl_priv *priv,
435 struct iwl_rx_phy_res *rx_resp)
436{
437 /* data from PHY/DSP regarding signal strength, etc.,
438 * contents are always there, not configurable by host. */
439 struct iwl4965_rx_non_cfg_phy *ncphy =
440 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
441 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
442 >> IWL49_AGC_DB_POS;
443
444 u32 valid_antennae =
445 (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
446 >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
447 u8 max_rssi = 0;
448 u32 i;
449
450 /* Find max rssi among 3 possible receivers.
451 * These values are measured by the digital signal processor (DSP).
452 * They should stay fairly constant even as the signal strength varies,
453 * if the radio's automatic gain control (AGC) is working right.
454 * AGC value (see below) will provide the "interesting" info. */
455 for (i = 0; i < 3; i++)
456 if (valid_antennae & (1 << i))
457 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
458
459 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
460 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
461 max_rssi, agc);
462
463 /* dBm = max_rssi dB - agc dB - constant.
464 * Higher AGC (higher radio gain) means lower signal. */
465 return max_rssi - agc - IWL4965_RSSI_OFFSET;
466}
467
468
469static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
470{
471 u32 decrypt_out = 0;
472
473 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
474 RX_RES_STATUS_STATION_FOUND)
475 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
476 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
477
478 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
479
480 /* packet was not encrypted */
481 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
482 RX_RES_STATUS_SEC_TYPE_NONE)
483 return decrypt_out;
484
485 /* packet was encrypted with unknown alg */
486 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
487 RX_RES_STATUS_SEC_TYPE_ERR)
488 return decrypt_out;
489
490 /* decryption was not done in HW */
491 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
492 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
493 return decrypt_out;
494
495 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
496
497 case RX_RES_STATUS_SEC_TYPE_CCMP:
498 /* alg is CCM: check MIC only */
499 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
500 /* Bad MIC */
501 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
502 else
503 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
504
505 break;
506
507 case RX_RES_STATUS_SEC_TYPE_TKIP:
508 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
509 /* Bad TTAK */
510 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
511 break;
512 }
513 /* fall through if TTAK OK */
514 default:
515 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
516 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
517 else
518 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
519 break;
520 }
521
522 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
523 decrypt_in, decrypt_out);
524
525 return decrypt_out;
526}
527
528static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
529 struct ieee80211_hdr *hdr,
530 u16 len,
531 u32 ampdu_status,
532 struct iwl_rx_mem_buffer *rxb,
533 struct ieee80211_rx_status *stats)
534{
535 struct sk_buff *skb;
536 __le16 fc = hdr->frame_control;
537
538 /* We only process data packets if the interface is open */
539 if (unlikely(!priv->is_open)) {
540 IWL_DEBUG_DROP_LIMIT(priv,
541 "Dropping packet while interface is not open.\n");
542 return;
543 }
544
545 /* In case of HW accelerated crypto and bad decryption, drop */
546 if (!priv->cfg->mod_params->sw_crypto &&
547 iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
548 return;
549
550 skb = dev_alloc_skb(128);
551 if (!skb) {
552 IWL_ERR(priv, "dev_alloc_skb failed\n");
553 return;
554 }
555
556 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
557
558 iwl_legacy_update_stats(priv, false, fc, len);
559 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
560
561 ieee80211_rx(priv->hw, skb);
562 priv->alloc_rxb_page--;
563 rxb->page = NULL;
564}
565
566/* Called for REPLY_RX (legacy ABG frames), or
567 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
568void iwl4965_rx_reply_rx(struct iwl_priv *priv,
569 struct iwl_rx_mem_buffer *rxb)
570{
571 struct ieee80211_hdr *header;
572 struct ieee80211_rx_status rx_status;
573 struct iwl_rx_packet *pkt = rxb_addr(rxb);
574 struct iwl_rx_phy_res *phy_res;
575 __le32 rx_pkt_status;
576 struct iwl_rx_mpdu_res_start *amsdu;
577 u32 len;
578 u32 ampdu_status;
579 u32 rate_n_flags;
580
581 /**
582 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
583 * REPLY_RX: physical layer info is in this buffer
584 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
585 * command and cached in priv->last_phy_res
586 *
587 * Here we set up local variables depending on which command is
588 * received.
589 */
590 if (pkt->hdr.cmd == REPLY_RX) {
591 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
592 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
593 + phy_res->cfg_phy_cnt);
594
595 len = le16_to_cpu(phy_res->byte_count);
596 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
597 phy_res->cfg_phy_cnt + len);
598 ampdu_status = le32_to_cpu(rx_pkt_status);
599 } else {
600 if (!priv->_4965.last_phy_res_valid) {
601 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
602 return;
603 }
604 phy_res = &priv->_4965.last_phy_res;
605 amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
606 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
607 len = le16_to_cpu(amsdu->byte_count);
608 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
609 ampdu_status = iwl4965_translate_rx_status(priv,
610 le32_to_cpu(rx_pkt_status));
611 }
612
613 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
614 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
615 phy_res->cfg_phy_cnt);
616 return;
617 }
618
619 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
620 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
621 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
622 le32_to_cpu(rx_pkt_status));
623 return;
624 }
625
626 /* This will be used in several places later */
627 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
628
629 /* rx_status carries information about the packet to mac80211 */
630 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
631 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
632 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
633 rx_status.freq =
634 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
635 rx_status.band);
636 rx_status.rate_idx =
637 iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
638 rx_status.flag = 0;
639
640 /* TSF isn't reliable. In order to allow smooth user experience,
641 * this W/A doesn't propagate it to the mac80211 */
642 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
643
644 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
645
646 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
647 rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
648
649 iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
650 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
651 rx_status.signal, (unsigned long long)rx_status.mactime);
652
653 /*
654 * "antenna number"
655 *
656 * It seems that the antenna field in the phy flags value
657 * is actually a bit field. This is undefined by radiotap,
658 * it wants an actual antenna number but I always get "7"
659 * for most legacy frames I receive indicating that the
660 * same frame was received on all three RX chains.
661 *
662 * I think this field should be removed in favor of a
663 * new 802.11n radiotap field "RX chains" that is defined
664 * as a bitmask.
665 */
666 rx_status.antenna =
667 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
668 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
669
670 /* set the preamble flag if appropriate */
671 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
672 rx_status.flag |= RX_FLAG_SHORTPRE;
673
674 /* Set up the HT phy flags */
675 if (rate_n_flags & RATE_MCS_HT_MSK)
676 rx_status.flag |= RX_FLAG_HT;
677 if (rate_n_flags & RATE_MCS_HT40_MSK)
678 rx_status.flag |= RX_FLAG_40MHZ;
679 if (rate_n_flags & RATE_MCS_SGI_MSK)
680 rx_status.flag |= RX_FLAG_SHORT_GI;
681
682 iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
683 rxb, &rx_status);
684}
685
686/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
687 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
688void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
689 struct iwl_rx_mem_buffer *rxb)
690{
691 struct iwl_rx_packet *pkt = rxb_addr(rxb);
692 priv->_4965.last_phy_res_valid = true;
693 memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
694 sizeof(struct iwl_rx_phy_res));
695}
696
697static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
698 struct ieee80211_vif *vif,
699 enum ieee80211_band band,
700 u8 is_active, u8 n_probes,
701 struct iwl_scan_channel *scan_ch)
702{
703 struct ieee80211_channel *chan;
704 const struct ieee80211_supported_band *sband;
705 const struct iwl_channel_info *ch_info;
706 u16 passive_dwell = 0;
707 u16 active_dwell = 0;
708 int added, i;
709 u16 channel;
710
711 sband = iwl_get_hw_mode(priv, band);
712 if (!sband)
713 return 0;
714
715 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
716 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
717
718 if (passive_dwell <= active_dwell)
719 passive_dwell = active_dwell + 1;
720
721 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
722 chan = priv->scan_request->channels[i];
723
724 if (chan->band != band)
725 continue;
726
727 channel = chan->hw_value;
728 scan_ch->channel = cpu_to_le16(channel);
729
730 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
731 if (!iwl_legacy_is_channel_valid(ch_info)) {
732 IWL_DEBUG_SCAN(priv,
733 "Channel %d is INVALID for this band.\n",
734 channel);
735 continue;
736 }
737
738 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
739 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
740 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
741 else
742 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
743
744 if (n_probes)
745 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
746
747 scan_ch->active_dwell = cpu_to_le16(active_dwell);
748 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
749
750 /* Set txpower levels to defaults */
751 scan_ch->dsp_atten = 110;
752
753 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
754 * power level:
755 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
756 */
757 if (band == IEEE80211_BAND_5GHZ)
758 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
759 else
760 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
761
762 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
763 channel, le32_to_cpu(scan_ch->type),
764 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
765 "ACTIVE" : "PASSIVE",
766 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
767 active_dwell : passive_dwell);
768
769 scan_ch++;
770 added++;
771 }
772
773 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
774 return added;
775}
776
777int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
778{
779 struct iwl_host_cmd cmd = {
780 .id = REPLY_SCAN_CMD,
781 .len = sizeof(struct iwl_scan_cmd),
782 .flags = CMD_SIZE_HUGE,
783 };
784 struct iwl_scan_cmd *scan;
785 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
786 u32 rate_flags = 0;
787 u16 cmd_len;
788 u16 rx_chain = 0;
789 enum ieee80211_band band;
790 u8 n_probes = 0;
791 u8 rx_ant = priv->hw_params.valid_rx_ant;
792 u8 rate;
793 bool is_active = false;
794 int chan_mod;
795 u8 active_chains;
796 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
797 int ret;
798
799 lockdep_assert_held(&priv->mutex);
800
801 if (vif)
802 ctx = iwl_legacy_rxon_ctx_from_vif(vif);
803
804 if (!priv->scan_cmd) {
805 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
806 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
807 if (!priv->scan_cmd) {
808 IWL_DEBUG_SCAN(priv,
809 "fail to allocate memory for scan\n");
810 return -ENOMEM;
811 }
812 }
813 scan = priv->scan_cmd;
814 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
815
816 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
817 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
818
819 if (iwl_legacy_is_any_associated(priv)) {
820 u16 interval;
821 u32 extra;
822 u32 suspend_time = 100;
823 u32 scan_suspend_time = 100;
824
825 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
826 interval = vif->bss_conf.beacon_int;
827
828 scan->suspend_time = 0;
829 scan->max_out_time = cpu_to_le32(200 * 1024);
830 if (!interval)
831 interval = suspend_time;
832
833 extra = (suspend_time / interval) << 22;
834 scan_suspend_time = (extra |
835 ((suspend_time % interval) * 1024));
836 scan->suspend_time = cpu_to_le32(scan_suspend_time);
837 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
838 scan_suspend_time, interval);
839 }
840
841 if (priv->scan_request->n_ssids) {
842 int i, p = 0;
843 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
844 for (i = 0; i < priv->scan_request->n_ssids; i++) {
845 /* always does wildcard anyway */
846 if (!priv->scan_request->ssids[i].ssid_len)
847 continue;
848 scan->direct_scan[p].id = WLAN_EID_SSID;
849 scan->direct_scan[p].len =
850 priv->scan_request->ssids[i].ssid_len;
851 memcpy(scan->direct_scan[p].ssid,
852 priv->scan_request->ssids[i].ssid,
853 priv->scan_request->ssids[i].ssid_len);
854 n_probes++;
855 p++;
856 }
857 is_active = true;
858 } else
859 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
860
861 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
862 scan->tx_cmd.sta_id = ctx->bcast_sta_id;
863 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
864
865 switch (priv->scan_band) {
866 case IEEE80211_BAND_2GHZ:
867 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
868 chan_mod = le32_to_cpu(
869 priv->contexts[IWL_RXON_CTX_BSS].active.flags &
870 RXON_FLG_CHANNEL_MODE_MSK)
871 >> RXON_FLG_CHANNEL_MODE_POS;
872 if (chan_mod == CHANNEL_MODE_PURE_40) {
873 rate = IWL_RATE_6M_PLCP;
874 } else {
875 rate = IWL_RATE_1M_PLCP;
876 rate_flags = RATE_MCS_CCK_MSK;
877 }
878 break;
879 case IEEE80211_BAND_5GHZ:
880 rate = IWL_RATE_6M_PLCP;
881 break;
882 default:
883 IWL_WARN(priv, "Invalid scan band\n");
884 return -EIO;
885 }
886
887 /*
888 * If active scanning is requested but a certain channel is
889 * marked passive, we can do active scanning if we detect
890 * transmissions.
891 *
892 * There is an issue with some firmware versions that triggers
893 * a sysassert on a "good CRC threshold" of zero (== disabled),
894 * on a radar channel even though this means that we should NOT
895 * send probes.
896 *
897 * The "good CRC threshold" is the number of frames that we
898 * need to receive during our dwell time on a channel before
899 * sending out probes -- setting this to a huge value will
900 * mean we never reach it, but at the same time work around
901 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
902 * here instead of IWL_GOOD_CRC_TH_DISABLED.
903 */
904 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
905 IWL_GOOD_CRC_TH_NEVER;
906
907 band = priv->scan_band;
908
909 if (priv->cfg->scan_rx_antennas[band])
910 rx_ant = priv->cfg->scan_rx_antennas[band];
911
912 priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
913 priv->scan_tx_ant[band],
914 scan_tx_antennas);
915 rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
916 scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
917
918 /* In power save mode use one chain, otherwise use all chains */
919 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
920 /* rx_ant has been set to all valid chains previously */
921 active_chains = rx_ant &
922 ((u8)(priv->chain_noise_data.active_chains));
923 if (!active_chains)
924 active_chains = rx_ant;
925
926 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
927 priv->chain_noise_data.active_chains);
928
929 rx_ant = iwl4965_first_antenna(active_chains);
930 }
931
932 /* MIMO is not used here, but value is required */
933 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
934 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
935 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
936 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
937 scan->rx_chain = cpu_to_le16(rx_chain);
938
939 cmd_len = iwl_legacy_fill_probe_req(priv,
940 (struct ieee80211_mgmt *)scan->data,
941 vif->addr,
942 priv->scan_request->ie,
943 priv->scan_request->ie_len,
944 IWL_MAX_SCAN_SIZE - sizeof(*scan));
945 scan->tx_cmd.len = cpu_to_le16(cmd_len);
946
947 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
948 RXON_FILTER_BCON_AWARE_MSK);
949
950 scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
951 is_active, n_probes,
952 (void *)&scan->data[cmd_len]);
953 if (scan->channel_count == 0) {
954 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
955 return -EIO;
956 }
957
958 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
959 scan->channel_count * sizeof(struct iwl_scan_channel);
960 cmd.data = scan;
961 scan->len = cpu_to_le16(cmd.len);
962
963 set_bit(STATUS_SCAN_HW, &priv->status);
964
965 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
966 if (ret)
967 clear_bit(STATUS_SCAN_HW, &priv->status);
968
969 return ret;
970}
971
972int iwl4965_manage_ibss_station(struct iwl_priv *priv,
973 struct ieee80211_vif *vif, bool add)
974{
975 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
976
977 if (add)
978 return iwl4965_add_bssid_station(priv, vif_priv->ctx,
979 vif->bss_conf.bssid,
980 &vif_priv->ibss_bssid_sta_id);
981 return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
982 vif->bss_conf.bssid);
983}
984
985void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
986 int sta_id, int tid, int freed)
987{
988 lockdep_assert_held(&priv->sta_lock);
989
990 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
991 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
992 else {
993 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
994 priv->stations[sta_id].tid[tid].tfds_in_queue,
995 freed);
996 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
997 }
998}
999
1000#define IWL_TX_QUEUE_MSK 0xfffff
1001
1002static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
1003{
1004 return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
1005 priv->current_ht_config.single_chain_sufficient;
1006}
1007
1008#define IWL_NUM_RX_CHAINS_MULTIPLE 3
1009#define IWL_NUM_RX_CHAINS_SINGLE 2
1010#define IWL_NUM_IDLE_CHAINS_DUAL 2
1011#define IWL_NUM_IDLE_CHAINS_SINGLE 1
1012
1013/*
1014 * Determine how many receiver/antenna chains to use.
1015 *
1016 * More provides better reception via diversity. Fewer saves power
1017 * at the expense of throughput, but only when not in powersave to
1018 * start with.
1019 *
1020 * MIMO (dual stream) requires at least 2, but works better with 3.
1021 * This does not determine *which* chains to use, just how many.
1022 */
1023static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
1024{
1025 /* # of Rx chains to use when expecting MIMO. */
1026 if (iwl4965_is_single_rx_stream(priv))
1027 return IWL_NUM_RX_CHAINS_SINGLE;
1028 else
1029 return IWL_NUM_RX_CHAINS_MULTIPLE;
1030}
1031
1032/*
1033 * When we are in power saving mode, unless device support spatial
1034 * multiplexing power save, use the active count for rx chain count.
1035 */
1036static int
1037iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
1038{
1039 /* # Rx chains when idling, depending on SMPS mode */
1040 switch (priv->current_ht_config.smps) {
1041 case IEEE80211_SMPS_STATIC:
1042 case IEEE80211_SMPS_DYNAMIC:
1043 return IWL_NUM_IDLE_CHAINS_SINGLE;
1044 case IEEE80211_SMPS_OFF:
1045 return active_cnt;
1046 default:
1047 WARN(1, "invalid SMPS mode %d",
1048 priv->current_ht_config.smps);
1049 return active_cnt;
1050 }
1051}
1052
1053/* up to 4 chains */
1054static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
1055{
1056 u8 res;
1057 res = (chain_bitmap & BIT(0)) >> 0;
1058 res += (chain_bitmap & BIT(1)) >> 1;
1059 res += (chain_bitmap & BIT(2)) >> 2;
1060 res += (chain_bitmap & BIT(3)) >> 3;
1061 return res;
1062}
1063
1064/**
1065 * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1066 *
1067 * Selects how many and which Rx receivers/antennas/chains to use.
1068 * This should not be used for scan command ... it puts data in wrong place.
1069 */
1070void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1071{
1072 bool is_single = iwl4965_is_single_rx_stream(priv);
1073 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1074 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1075 u32 active_chains;
1076 u16 rx_chain;
1077
1078 /* Tell uCode which antennas are actually connected.
1079 * Before first association, we assume all antennas are connected.
1080 * Just after first association, iwl4965_chain_noise_calibration()
1081 * checks which antennas actually *are* connected. */
1082 if (priv->chain_noise_data.active_chains)
1083 active_chains = priv->chain_noise_data.active_chains;
1084 else
1085 active_chains = priv->hw_params.valid_rx_ant;
1086
1087 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
1088
1089 /* How many receivers should we use? */
1090 active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
1091 idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
1092
1093
1094 /* correct rx chain count according hw settings
1095 * and chain noise calibration
1096 */
1097 valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
1098 if (valid_rx_cnt < active_rx_cnt)
1099 active_rx_cnt = valid_rx_cnt;
1100
1101 if (valid_rx_cnt < idle_rx_cnt)
1102 idle_rx_cnt = valid_rx_cnt;
1103
1104 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1105 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1106
1107 ctx->staging.rx_chain = cpu_to_le16(rx_chain);
1108
1109 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
1110 ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
1111 else
1112 ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
1113
1114 IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
1115 ctx->staging.rx_chain,
1116 active_rx_cnt, idle_rx_cnt);
1117
1118 WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
1119 active_rx_cnt < idle_rx_cnt);
1120}
1121
1122u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1123{
1124 int i;
1125 u8 ind = ant;
1126
1127 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
1128 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
1129 if (valid & BIT(ind))
1130 return ind;
1131 }
1132 return ant;
1133}
1134
1135static const char *iwl4965_get_fh_string(int cmd)
1136{
1137 switch (cmd) {
1138 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1139 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1140 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1141 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1142 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1143 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1144 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1145 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1146 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1147 default:
1148 return "UNKNOWN";
1149 }
1150}
1151
1152int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1153{
1154 int i;
1155#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1156 int pos = 0;
1157 size_t bufsz = 0;
1158#endif
1159 static const u32 fh_tbl[] = {
1160 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1161 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1162 FH_RSCSR_CHNL0_WPTR,
1163 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1164 FH_MEM_RSSR_SHARED_CTRL_REG,
1165 FH_MEM_RSSR_RX_STATUS_REG,
1166 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1167 FH_TSSR_TX_STATUS_REG,
1168 FH_TSSR_TX_ERROR_REG
1169 };
1170#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1171 if (display) {
1172 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1173 *buf = kmalloc(bufsz, GFP_KERNEL);
1174 if (!*buf)
1175 return -ENOMEM;
1176 pos += scnprintf(*buf + pos, bufsz - pos,
1177 "FH register values:\n");
1178 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1179 pos += scnprintf(*buf + pos, bufsz - pos,
1180 " %34s: 0X%08x\n",
1181 iwl4965_get_fh_string(fh_tbl[i]),
1182 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1183 }
1184 return pos;
1185 }
1186#endif
1187 IWL_ERR(priv, "FH register values:\n");
1188 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1189 IWL_ERR(priv, " %34s: 0X%08x\n",
1190 iwl4965_get_fh_string(fh_tbl[i]),
1191 iwl_legacy_read_direct32(priv, fh_tbl[i]));
1192 }
1193 return 0;
1194}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe214e68c..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/skbuff.h>
29#include <linux/slab.h>
30#include <net/mac80211.h>
31
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/delay.h>
35
36#include <linux/workqueue.h>
37
38#include "iwl-dev.h"
39#include "iwl-sta.h"
40#include "iwl-core.h"
41#include "iwl-4965.h"
42
43#define IWL4965_RS_NAME "iwl-4965-rs"
44
45#define NUM_TRY_BEFORE_ANT_TOGGLE 1
46#define IWL_NUMBER_TRY 1
47#define IWL_HT_NUMBER_TRY 3
48
49#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
50#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
51#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
52
53/* max allowed rate miss before sync LQ cmd */
54#define IWL_MISSED_RATE_MAX 15
55/* max time to accum history 2 seconds */
56#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
57
58static u8 rs_ht_to_legacy[] = {
59 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
60 IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
61 IWL_RATE_6M_INDEX,
62 IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
63 IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
64 IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
65 IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
66};
67
68static const u8 ant_toggle_lookup[] = {
69 /*ANT_NONE -> */ ANT_NONE,
70 /*ANT_A -> */ ANT_B,
71 /*ANT_B -> */ ANT_C,
72 /*ANT_AB -> */ ANT_BC,
73 /*ANT_C -> */ ANT_A,
74 /*ANT_AC -> */ ANT_AB,
75 /*ANT_BC -> */ ANT_AC,
76 /*ANT_ABC -> */ ANT_ABC,
77};
78
79#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
80 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
81 IWL_RATE_SISO_##s##M_PLCP, \
82 IWL_RATE_MIMO2_##s##M_PLCP,\
83 IWL_RATE_##r##M_IEEE, \
84 IWL_RATE_##ip##M_INDEX, \
85 IWL_RATE_##in##M_INDEX, \
86 IWL_RATE_##rp##M_INDEX, \
87 IWL_RATE_##rn##M_INDEX, \
88 IWL_RATE_##pp##M_INDEX, \
89 IWL_RATE_##np##M_INDEX }
90
91/*
92 * Parameter order:
93 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
94 *
95 * If there isn't a valid next or previous rate then INV is used which
96 * maps to IWL_RATE_INVALID
97 *
98 */
99const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
100 IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
101 IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
102 IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
103 IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
104 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
105 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
106 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
107 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
108 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
109 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
110 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
111 IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
112 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
113};
114
115static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
116{
117 int idx = 0;
118
119 /* HT rate format */
120 if (rate_n_flags & RATE_MCS_HT_MSK) {
121 idx = (rate_n_flags & 0xff);
122
123 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
124 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
125
126 idx += IWL_FIRST_OFDM_RATE;
127 /* skip 9M not supported in ht*/
128 if (idx >= IWL_RATE_9M_INDEX)
129 idx += 1;
130 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
131 return idx;
132
133 /* legacy rate format, search for match in table */
134 } else {
135 for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
136 if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
137 return idx;
138 }
139
140 return -1;
141}
142
143static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
144 struct sk_buff *skb,
145 struct ieee80211_sta *sta,
146 struct iwl_lq_sta *lq_sta);
147static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
148 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
149static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
150 bool force_search);
151
152#ifdef CONFIG_MAC80211_DEBUGFS
153static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
154 u32 *rate_n_flags, int index);
155#else
156static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
157 u32 *rate_n_flags, int index)
158{}
159#endif
160
161/**
162 * The following tables contain the expected throughput metrics for all rates
163 *
164 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
165 *
166 * where invalid entries are zeros.
167 *
168 * CCK rates are only valid in legacy table and will only be used in G
169 * (2.4 GHz) band.
170 */
171
172static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
173 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
174};
175
176static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
177 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
178 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
179 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
180 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
181};
182
183static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
184 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
185 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
186 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
187 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
188};
189
190static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
191 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
192 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
193 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
194 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
195};
196
197static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
198 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
199 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
200 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
201 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
202};
203
204/* mbps, mcs */
205static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
206 { "1", "BPSK DSSS"},
207 { "2", "QPSK DSSS"},
208 {"5.5", "BPSK CCK"},
209 { "11", "QPSK CCK"},
210 { "6", "BPSK 1/2"},
211 { "9", "BPSK 1/2"},
212 { "12", "QPSK 1/2"},
213 { "18", "QPSK 3/4"},
214 { "24", "16QAM 1/2"},
215 { "36", "16QAM 3/4"},
216 { "48", "64QAM 2/3"},
217 { "54", "64QAM 3/4"},
218 { "60", "64QAM 5/6"},
219};
220
221#define MCS_INDEX_PER_STREAM (8)
222
223static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
224{
225 return (u8)(rate_n_flags & 0xFF);
226}
227
228static void
229iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
230{
231 window->data = 0;
232 window->success_counter = 0;
233 window->success_ratio = IWL_INVALID_VALUE;
234 window->counter = 0;
235 window->average_tpt = IWL_INVALID_VALUE;
236 window->stamp = 0;
237}
238
239static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
240{
241 return (ant_type & valid_antenna) == ant_type;
242}
243
244/*
245 * removes the old data from the statistics. All data that is older than
246 * TID_MAX_TIME_DIFF, will be deleted.
247 */
248static void
249iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
250{
251 /* The oldest age we want to keep */
252 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
253
254 while (tl->queue_count &&
255 (tl->time_stamp < oldest_time)) {
256 tl->total -= tl->packet_count[tl->head];
257 tl->packet_count[tl->head] = 0;
258 tl->time_stamp += TID_QUEUE_CELL_SPACING;
259 tl->queue_count--;
260 tl->head++;
261 if (tl->head >= TID_QUEUE_MAX_SIZE)
262 tl->head = 0;
263 }
264}
265
266/*
267 * increment traffic load value for tid and also remove
268 * any old values if passed the certain time period
269 */
270static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
271 struct ieee80211_hdr *hdr)
272{
273 u32 curr_time = jiffies_to_msecs(jiffies);
274 u32 time_diff;
275 s32 index;
276 struct iwl_traffic_load *tl = NULL;
277 u8 tid;
278
279 if (ieee80211_is_data_qos(hdr->frame_control)) {
280 u8 *qc = ieee80211_get_qos_ctl(hdr);
281 tid = qc[0] & 0xf;
282 } else
283 return MAX_TID_COUNT;
284
285 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
286 return MAX_TID_COUNT;
287
288 tl = &lq_data->load[tid];
289
290 curr_time -= curr_time % TID_ROUND_VALUE;
291
292 /* Happens only for the first packet. Initialize the data */
293 if (!(tl->queue_count)) {
294 tl->total = 1;
295 tl->time_stamp = curr_time;
296 tl->queue_count = 1;
297 tl->head = 0;
298 tl->packet_count[0] = 1;
299 return MAX_TID_COUNT;
300 }
301
302 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
303 index = time_diff / TID_QUEUE_CELL_SPACING;
304
305 /* The history is too long: remove data that is older than */
306 /* TID_MAX_TIME_DIFF */
307 if (index >= TID_QUEUE_MAX_SIZE)
308 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
309
310 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
311 tl->packet_count[index] = tl->packet_count[index] + 1;
312 tl->total = tl->total + 1;
313
314 if ((index + 1) > tl->queue_count)
315 tl->queue_count = index + 1;
316
317 return tid;
318}
319
320/*
321 get the traffic load value for tid
322*/
323static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
324{
325 u32 curr_time = jiffies_to_msecs(jiffies);
326 u32 time_diff;
327 s32 index;
328 struct iwl_traffic_load *tl = NULL;
329
330 if (tid >= TID_MAX_LOAD_COUNT)
331 return 0;
332
333 tl = &(lq_data->load[tid]);
334
335 curr_time -= curr_time % TID_ROUND_VALUE;
336
337 if (!(tl->queue_count))
338 return 0;
339
340 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
341 index = time_diff / TID_QUEUE_CELL_SPACING;
342
343 /* The history is too long: remove data that is older than */
344 /* TID_MAX_TIME_DIFF */
345 if (index >= TID_QUEUE_MAX_SIZE)
346 iwl4965_rs_tl_rm_old_stats(tl, curr_time);
347
348 return tl->total;
349}
350
351static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
352 struct iwl_lq_sta *lq_data, u8 tid,
353 struct ieee80211_sta *sta)
354{
355 int ret = -EAGAIN;
356 u32 load;
357
358 load = iwl4965_rs_tl_get_load(lq_data, tid);
359
360 if (load > IWL_AGG_LOAD_THRESHOLD) {
361 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
362 sta->addr, tid);
363 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
364 if (ret == -EAGAIN) {
365 /*
366 * driver and mac80211 is out of sync
367 * this might be cause by reloading firmware
368 * stop the tx ba session here
369 */
370 IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
371 tid);
372 ieee80211_stop_tx_ba_session(sta, tid);
373 }
374 } else {
375 IWL_ERR(priv, "Aggregation not enabled for tid %d "
376 "because load = %u\n", tid, load);
377 }
378 return ret;
379}
380
381static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
382 struct iwl_lq_sta *lq_data,
383 struct ieee80211_sta *sta)
384{
385 if (tid < TID_MAX_LOAD_COUNT)
386 iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
387 else
388 IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
389 tid, TID_MAX_LOAD_COUNT);
390}
391
392static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
393{
394 return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
395 !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
396 !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
397}
398
399/*
400 * Static function to get the expected throughput from an iwl_scale_tbl_info
401 * that wraps a NULL pointer check
402 */
403static s32
404iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
405{
406 if (tbl->expected_tpt)
407 return tbl->expected_tpt[rs_index];
408 return 0;
409}
410
411/**
412 * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
413 *
414 * We keep a sliding window of the last 62 packets transmitted
415 * at this rate. window->data contains the bitmask of successful
416 * packets.
417 */
418static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
419 int scale_index, int attempts, int successes)
420{
421 struct iwl_rate_scale_data *window = NULL;
422 static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
423 s32 fail_count, tpt;
424
425 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
426 return -EINVAL;
427
428 /* Select window for current tx bit rate */
429 window = &(tbl->win[scale_index]);
430
431 /* Get expected throughput */
432 tpt = iwl4965_get_expected_tpt(tbl, scale_index);
433
434 /*
435 * Keep track of only the latest 62 tx frame attempts in this rate's
436 * history window; anything older isn't really relevant any more.
437 * If we have filled up the sliding window, drop the oldest attempt;
438 * if the oldest attempt (highest bit in bitmap) shows "success",
439 * subtract "1" from the success counter (this is the main reason
440 * we keep these bitmaps!).
441 */
442 while (attempts > 0) {
443 if (window->counter >= IWL_RATE_MAX_WINDOW) {
444
445 /* remove earliest */
446 window->counter = IWL_RATE_MAX_WINDOW - 1;
447
448 if (window->data & mask) {
449 window->data &= ~mask;
450 window->success_counter--;
451 }
452 }
453
454 /* Increment frames-attempted counter */
455 window->counter++;
456
457 /* Shift bitmap by one frame to throw away oldest history */
458 window->data <<= 1;
459
460 /* Mark the most recent #successes attempts as successful */
461 if (successes > 0) {
462 window->success_counter++;
463 window->data |= 0x1;
464 successes--;
465 }
466
467 attempts--;
468 }
469
470 /* Calculate current success ratio, avoid divide-by-0! */
471 if (window->counter > 0)
472 window->success_ratio = 128 * (100 * window->success_counter)
473 / window->counter;
474 else
475 window->success_ratio = IWL_INVALID_VALUE;
476
477 fail_count = window->counter - window->success_counter;
478
479 /* Calculate average throughput, if we have enough history. */
480 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
481 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
482 window->average_tpt = (window->success_ratio * tpt + 64) / 128;
483 else
484 window->average_tpt = IWL_INVALID_VALUE;
485
486 /* Tag this window as having been updated */
487 window->stamp = jiffies;
488
489 return 0;
490}
491
492/*
493 * Fill uCode API rate_n_flags field, based on "search" or "active" table.
494 */
495static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
496 struct iwl_scale_tbl_info *tbl,
497 int index, u8 use_green)
498{
499 u32 rate_n_flags = 0;
500
501 if (is_legacy(tbl->lq_type)) {
502 rate_n_flags = iwlegacy_rates[index].plcp;
503 if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
504 rate_n_flags |= RATE_MCS_CCK_MSK;
505
506 } else if (is_Ht(tbl->lq_type)) {
507 if (index > IWL_LAST_OFDM_RATE) {
508 IWL_ERR(priv, "Invalid HT rate index %d\n", index);
509 index = IWL_LAST_OFDM_RATE;
510 }
511 rate_n_flags = RATE_MCS_HT_MSK;
512
513 if (is_siso(tbl->lq_type))
514 rate_n_flags |= iwlegacy_rates[index].plcp_siso;
515 else
516 rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
517 } else {
518 IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
519 }
520
521 rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
522 RATE_MCS_ANT_ABC_MSK);
523
524 if (is_Ht(tbl->lq_type)) {
525 if (tbl->is_ht40) {
526 if (tbl->is_dup)
527 rate_n_flags |= RATE_MCS_DUP_MSK;
528 else
529 rate_n_flags |= RATE_MCS_HT40_MSK;
530 }
531 if (tbl->is_SGI)
532 rate_n_flags |= RATE_MCS_SGI_MSK;
533
534 if (use_green) {
535 rate_n_flags |= RATE_MCS_GF_MSK;
536 if (is_siso(tbl->lq_type) && tbl->is_SGI) {
537 rate_n_flags &= ~RATE_MCS_SGI_MSK;
538 IWL_ERR(priv, "GF was set with SGI:SISO\n");
539 }
540 }
541 }
542 return rate_n_flags;
543}
544
545/*
546 * Interpret uCode API's rate_n_flags format,
547 * fill "search" or "active" tx mode table.
548 */
549static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
550 enum ieee80211_band band,
551 struct iwl_scale_tbl_info *tbl,
552 int *rate_idx)
553{
554 u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
555 u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
556 u8 mcs;
557
558 memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
559 *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
560
561 if (*rate_idx == IWL_RATE_INVALID) {
562 *rate_idx = -1;
563 return -EINVAL;
564 }
565 tbl->is_SGI = 0; /* default legacy setup */
566 tbl->is_ht40 = 0;
567 tbl->is_dup = 0;
568 tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
569 tbl->lq_type = LQ_NONE;
570 tbl->max_search = IWL_MAX_SEARCH;
571
572 /* legacy rate format */
573 if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
574 if (iwl4965_num_of_ant == 1) {
575 if (band == IEEE80211_BAND_5GHZ)
576 tbl->lq_type = LQ_A;
577 else
578 tbl->lq_type = LQ_G;
579 }
580 /* HT rate format */
581 } else {
582 if (rate_n_flags & RATE_MCS_SGI_MSK)
583 tbl->is_SGI = 1;
584
585 if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
586 (rate_n_flags & RATE_MCS_DUP_MSK))
587 tbl->is_ht40 = 1;
588
589 if (rate_n_flags & RATE_MCS_DUP_MSK)
590 tbl->is_dup = 1;
591
592 mcs = iwl4965_rs_extract_rate(rate_n_flags);
593
594 /* SISO */
595 if (mcs <= IWL_RATE_SISO_60M_PLCP) {
596 if (iwl4965_num_of_ant == 1)
597 tbl->lq_type = LQ_SISO; /*else NONE*/
598 /* MIMO2 */
599 } else {
600 if (iwl4965_num_of_ant == 2)
601 tbl->lq_type = LQ_MIMO2;
602 }
603 }
604 return 0;
605}
606
607/* switch to another antenna/antennas and return 1 */
608/* if no other valid antenna found, return 0 */
609static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
610 struct iwl_scale_tbl_info *tbl)
611{
612 u8 new_ant_type;
613
614 if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
615 return 0;
616
617 if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
618 return 0;
619
620 new_ant_type = ant_toggle_lookup[tbl->ant_type];
621
622 while ((new_ant_type != tbl->ant_type) &&
623 !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
624 new_ant_type = ant_toggle_lookup[new_ant_type];
625
626 if (new_ant_type == tbl->ant_type)
627 return 0;
628
629 tbl->ant_type = new_ant_type;
630 *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
631 *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
632 return 1;
633}
634
635/**
636 * Green-field mode is valid if the station supports it and
637 * there are no non-GF stations present in the BSS.
638 */
639static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
640{
641 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
642 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
643
644 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
645 !(ctx->ht.non_gf_sta_present);
646}
647
648/**
649 * iwl4965_rs_get_supported_rates - get the available rates
650 *
651 * if management frame or broadcast frame only return
652 * basic available rates.
653 *
654 */
655static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
656 struct ieee80211_hdr *hdr,
657 enum iwl_table_type rate_type)
658{
659 if (is_legacy(rate_type)) {
660 return lq_sta->active_legacy_rate;
661 } else {
662 if (is_siso(rate_type))
663 return lq_sta->active_siso_rate;
664 else
665 return lq_sta->active_mimo2_rate;
666 }
667}
668
669static u16
670iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
671 int rate_type)
672{
673 u8 high = IWL_RATE_INVALID;
674 u8 low = IWL_RATE_INVALID;
675
676 /* 802.11A or ht walks to the next literal adjacent rate in
677 * the rate table */
678 if (is_a_band(rate_type) || !is_legacy(rate_type)) {
679 int i;
680 u32 mask;
681
682 /* Find the previous rate that is in the rate mask */
683 i = index - 1;
684 for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
685 if (rate_mask & mask) {
686 low = i;
687 break;
688 }
689 }
690
691 /* Find the next rate that is in the rate mask */
692 i = index + 1;
693 for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
694 if (rate_mask & mask) {
695 high = i;
696 break;
697 }
698 }
699
700 return (high << 8) | low;
701 }
702
703 low = index;
704 while (low != IWL_RATE_INVALID) {
705 low = iwlegacy_rates[low].prev_rs;
706 if (low == IWL_RATE_INVALID)
707 break;
708 if (rate_mask & (1 << low))
709 break;
710 IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
711 }
712
713 high = index;
714 while (high != IWL_RATE_INVALID) {
715 high = iwlegacy_rates[high].next_rs;
716 if (high == IWL_RATE_INVALID)
717 break;
718 if (rate_mask & (1 << high))
719 break;
720 IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
721 }
722
723 return (high << 8) | low;
724}
725
726static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
727 struct iwl_scale_tbl_info *tbl,
728 u8 scale_index, u8 ht_possible)
729{
730 s32 low;
731 u16 rate_mask;
732 u16 high_low;
733 u8 switch_to_legacy = 0;
734 u8 is_green = lq_sta->is_green;
735 struct iwl_priv *priv = lq_sta->drv;
736
737 /* check if we need to switch from HT to legacy rates.
738 * assumption is that mandatory rates (1Mbps or 6Mbps)
739 * are always supported (spec demand) */
740 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
741 switch_to_legacy = 1;
742 scale_index = rs_ht_to_legacy[scale_index];
743 if (lq_sta->band == IEEE80211_BAND_5GHZ)
744 tbl->lq_type = LQ_A;
745 else
746 tbl->lq_type = LQ_G;
747
748 if (iwl4965_num_of_ant(tbl->ant_type) > 1)
749 tbl->ant_type =
750 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
751
752 tbl->is_ht40 = 0;
753 tbl->is_SGI = 0;
754 tbl->max_search = IWL_MAX_SEARCH;
755 }
756
757 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
758
759 /* Mask with station rate restriction */
760 if (is_legacy(tbl->lq_type)) {
761 /* supp_rates has no CCK bits in A mode */
762 if (lq_sta->band == IEEE80211_BAND_5GHZ)
763 rate_mask = (u16)(rate_mask &
764 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
765 else
766 rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
767 }
768
769 /* If we switched from HT to legacy, check current rate */
770 if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
771 low = scale_index;
772 goto out;
773 }
774
775 high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
776 scale_index, rate_mask,
777 tbl->lq_type);
778 low = high_low & 0xff;
779
780 if (low == IWL_RATE_INVALID)
781 low = scale_index;
782
783out:
784 return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
785}
786
787/*
788 * Simple function to compare two rate scale table types
789 */
790static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
791 struct iwl_scale_tbl_info *b)
792{
793 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
794 (a->is_SGI == b->is_SGI);
795}
796
797/*
798 * mac80211 sends us Tx status
799 */
800static void
801iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
802 struct ieee80211_sta *sta, void *priv_sta,
803 struct sk_buff *skb)
804{
805 int legacy_success;
806 int retries;
807 int rs_index, mac_index, i;
808 struct iwl_lq_sta *lq_sta = priv_sta;
809 struct iwl_link_quality_cmd *table;
810 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
811 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
812 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
813 enum mac80211_rate_control_flags mac_flags;
814 u32 tx_rate;
815 struct iwl_scale_tbl_info tbl_type;
816 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
817 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
818 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
819
820 IWL_DEBUG_RATE_LIMIT(priv,
821 "get frame ack response, update rate scale window\n");
822
823 /* Treat uninitialized rate scaling data same as non-existing. */
824 if (!lq_sta) {
825 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
826 return;
827 } else if (!lq_sta->drv) {
828 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
829 return;
830 }
831
832 if (!ieee80211_is_data(hdr->frame_control) ||
833 info->flags & IEEE80211_TX_CTL_NO_ACK)
834 return;
835
836 /* This packet was aggregated but doesn't carry status info */
837 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
838 !(info->flags & IEEE80211_TX_STAT_AMPDU))
839 return;
840
841 /*
842 * Ignore this Tx frame response if its initial rate doesn't match
843 * that of latest Link Quality command. There may be stragglers
844 * from a previous Link Quality command, but we're no longer interested
845 * in those; they're either from the "active" mode while we're trying
846 * to check "search" mode, or a prior "search" mode after we've moved
847 * to a new "search" mode (which might become the new "active" mode).
848 */
849 table = &lq_sta->lq;
850 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
851 iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
852 priv->band, &tbl_type, &rs_index);
853 if (priv->band == IEEE80211_BAND_5GHZ)
854 rs_index -= IWL_FIRST_OFDM_RATE;
855 mac_flags = info->status.rates[0].flags;
856 mac_index = info->status.rates[0].idx;
857 /* For HT packets, map MCS to PLCP */
858 if (mac_flags & IEEE80211_TX_RC_MCS) {
859 mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
860 if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
861 mac_index++;
862 /*
863 * mac80211 HT index is always zero-indexed; we need to move
864 * HT OFDM rates after CCK rates in 2.4 GHz band
865 */
866 if (priv->band == IEEE80211_BAND_2GHZ)
867 mac_index += IWL_FIRST_OFDM_RATE;
868 }
869 /* Here we actually compare this rate to the latest LQ command */
870 if ((mac_index < 0) ||
871 (tbl_type.is_SGI !=
872 !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
873 (tbl_type.is_ht40 !=
874 !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
875 (tbl_type.is_dup !=
876 !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
877 (tbl_type.ant_type != info->antenna_sel_tx) ||
878 (!!(tx_rate & RATE_MCS_HT_MSK) !=
879 !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
880 (!!(tx_rate & RATE_MCS_GF_MSK) !=
881 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
882 (rs_index != mac_index)) {
883 IWL_DEBUG_RATE(priv,
884 "initial rate %d does not match %d (0x%x)\n",
885 mac_index, rs_index, tx_rate);
886 /*
887 * Since rates mis-match, the last LQ command may have failed.
888 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
889 * ... driver.
890 */
891 lq_sta->missed_rate_counter++;
892 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
893 lq_sta->missed_rate_counter = 0;
894 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
895 CMD_ASYNC, false);
896 }
897 /* Regardless, ignore this status info for outdated rate */
898 return;
899 } else
900 /* Rate did match, so reset the missed_rate_counter */
901 lq_sta->missed_rate_counter = 0;
902
903 /* Figure out if rate scale algorithm is in active or search table */
904 if (iwl4965_table_type_matches(&tbl_type,
905 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
906 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
907 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
908 } else if (iwl4965_table_type_matches(&tbl_type,
909 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
910 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
911 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
912 } else {
913 IWL_DEBUG_RATE(priv,
914 "Neither active nor search matches tx rate\n");
915 tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
916 IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
917 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
918 tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
919 IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
920 tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
921 IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
922 tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
923 /*
924 * no matching table found, let's by-pass the data collection
925 * and continue to perform rate scale to find the rate table
926 */
927 iwl4965_rs_stay_in_table(lq_sta, true);
928 goto done;
929 }
930
931 /*
932 * Updating the frame history depends on whether packets were
933 * aggregated.
934 *
935 * For aggregation, all packets were transmitted at the same rate, the
936 * first index into rate scale table.
937 */
938 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
939 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
940 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
941 &rs_index);
942 iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
943 info->status.ampdu_len,
944 info->status.ampdu_ack_len);
945
946 /* Update success/fail counts if not searching for new mode */
947 if (lq_sta->stay_in_tbl) {
948 lq_sta->total_success += info->status.ampdu_ack_len;
949 lq_sta->total_failed += (info->status.ampdu_len -
950 info->status.ampdu_ack_len);
951 }
952 } else {
953 /*
954 * For legacy, update frame history with for each Tx retry.
955 */
956 retries = info->status.rates[0].count - 1;
957 /* HW doesn't send more than 15 retries */
958 retries = min(retries, 15);
959
960 /* The last transmission may have been successful */
961 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
962 /* Collect data for each rate used during failed TX attempts */
963 for (i = 0; i <= retries; ++i) {
964 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
965 iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
966 &tbl_type, &rs_index);
967 /*
968 * Only collect stats if retried rate is in the same RS
969 * table as active/search.
970 */
971 if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
972 tmp_tbl = curr_tbl;
973 else if (iwl4965_table_type_matches(&tbl_type,
974 other_tbl))
975 tmp_tbl = other_tbl;
976 else
977 continue;
978 iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
979 i < retries ? 0 : legacy_success);
980 }
981
982 /* Update success/fail counts if not searching for new mode */
983 if (lq_sta->stay_in_tbl) {
984 lq_sta->total_success += legacy_success;
985 lq_sta->total_failed += retries + (1 - legacy_success);
986 }
987 }
988 /* The last TX rate is cached in lq_sta; it's set in if/else above */
989 lq_sta->last_rate_n_flags = tx_rate;
990done:
991 /* See if there's a better rate or modulation mode to try. */
992 if (sta && sta->supp_rates[sband->band])
993 iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
994}
995
996/*
997 * Begin a period of staying with a selected modulation mode.
998 * Set "stay_in_tbl" flag to prevent any mode switches.
999 * Set frame tx success limits according to legacy vs. high-throughput,
1000 * and reset overall (spanning all rates) tx success history statistics.
1001 * These control how long we stay using same modulation mode before
1002 * searching for a new mode.
1003 */
1004static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1005 struct iwl_lq_sta *lq_sta)
1006{
1007 IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
1008 lq_sta->stay_in_tbl = 1; /* only place this gets set */
1009 if (is_legacy) {
1010 lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
1011 lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
1012 lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
1013 } else {
1014 lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
1015 lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
1016 lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
1017 }
1018 lq_sta->table_count = 0;
1019 lq_sta->total_failed = 0;
1020 lq_sta->total_success = 0;
1021 lq_sta->flush_timer = jiffies;
1022 lq_sta->action_counter = 0;
1023}
1024
1025/*
1026 * Find correct throughput table for given mode of modulation
1027 */
1028static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1029 struct iwl_scale_tbl_info *tbl)
1030{
1031 /* Used to choose among HT tables */
1032 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
1033
1034 /* Check for invalid LQ type */
1035 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
1036 tbl->expected_tpt = expected_tpt_legacy;
1037 return;
1038 }
1039
1040 /* Legacy rates have only one table */
1041 if (is_legacy(tbl->lq_type)) {
1042 tbl->expected_tpt = expected_tpt_legacy;
1043 return;
1044 }
1045
1046 /* Choose among many HT tables depending on number of streams
1047 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
1048 * status */
1049 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1050 ht_tbl_pointer = expected_tpt_siso20MHz;
1051 else if (is_siso(tbl->lq_type))
1052 ht_tbl_pointer = expected_tpt_siso40MHz;
1053 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1054 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1055 else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
1056 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1057
1058 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1059 tbl->expected_tpt = ht_tbl_pointer[0];
1060 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1061 tbl->expected_tpt = ht_tbl_pointer[1];
1062 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1063 tbl->expected_tpt = ht_tbl_pointer[2];
1064 else /* AGG+SGI */
1065 tbl->expected_tpt = ht_tbl_pointer[3];
1066}
1067
1068/*
1069 * Find starting rate for new "search" high-throughput mode of modulation.
1070 * Goal is to find lowest expected rate (under perfect conditions) that is
1071 * above the current measured throughput of "active" mode, to give new mode
1072 * a fair chance to prove itself without too many challenges.
1073 *
1074 * This gets called when transitioning to more aggressive modulation
1075 * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
1076 * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
1077 * to decrease to match "active" throughput. When moving from MIMO to SISO,
1078 * bit rate will typically need to increase, but not if performance was bad.
1079 */
1080static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
1081 struct iwl_lq_sta *lq_sta,
1082 struct iwl_scale_tbl_info *tbl, /* "search" */
1083 u16 rate_mask, s8 index)
1084{
1085 /* "active" values */
1086 struct iwl_scale_tbl_info *active_tbl =
1087 &(lq_sta->lq_info[lq_sta->active_tbl]);
1088 s32 active_sr = active_tbl->win[index].success_ratio;
1089 s32 active_tpt = active_tbl->expected_tpt[index];
1090
1091 /* expected "search" throughput */
1092 s32 *tpt_tbl = tbl->expected_tpt;
1093
1094 s32 new_rate, high, low, start_hi;
1095 u16 high_low;
1096 s8 rate = index;
1097
1098 new_rate = high = low = start_hi = IWL_RATE_INVALID;
1099
1100 for (; ;) {
1101 high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
1102 tbl->lq_type);
1103
1104 low = high_low & 0xff;
1105 high = (high_low >> 8) & 0xff;
1106
1107 /*
1108 * Lower the "search" bit rate, to give new "search" mode
1109 * approximately the same throughput as "active" if:
1110 *
1111 * 1) "Active" mode has been working modestly well (but not
1112 * great), and expected "search" throughput (under perfect
1113 * conditions) at candidate rate is above the actual
1114 * measured "active" throughput (but less than expected
1115 * "active" throughput under perfect conditions).
1116 * OR
1117 * 2) "Active" mode has been working perfectly or very well
1118 * and expected "search" throughput (under perfect
1119 * conditions) at candidate rate is above expected
1120 * "active" throughput (under perfect conditions).
1121 */
1122 if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
1123 ((active_sr > IWL_RATE_DECREASE_TH) &&
1124 (active_sr <= IWL_RATE_HIGH_TH) &&
1125 (tpt_tbl[rate] <= active_tpt))) ||
1126 ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
1127 (tpt_tbl[rate] > active_tpt))) {
1128
1129 /* (2nd or later pass)
1130 * If we've already tried to raise the rate, and are
1131 * now trying to lower it, use the higher rate. */
1132 if (start_hi != IWL_RATE_INVALID) {
1133 new_rate = start_hi;
1134 break;
1135 }
1136
1137 new_rate = rate;
1138
1139 /* Loop again with lower rate */
1140 if (low != IWL_RATE_INVALID)
1141 rate = low;
1142
1143 /* Lower rate not available, use the original */
1144 else
1145 break;
1146
1147 /* Else try to raise the "search" rate to match "active" */
1148 } else {
1149 /* (2nd or later pass)
1150 * If we've already tried to lower the rate, and are
1151 * now trying to raise it, use the lower rate. */
1152 if (new_rate != IWL_RATE_INVALID)
1153 break;
1154
1155 /* Loop again with higher rate */
1156 else if (high != IWL_RATE_INVALID) {
1157 start_hi = high;
1158 rate = high;
1159
1160 /* Higher rate not available, use the original */
1161 } else {
1162 new_rate = rate;
1163 break;
1164 }
1165 }
1166 }
1167
1168 return new_rate;
1169}
1170
1171/*
1172 * Set up search table for MIMO2
1173 */
1174static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
1175 struct iwl_lq_sta *lq_sta,
1176 struct ieee80211_conf *conf,
1177 struct ieee80211_sta *sta,
1178 struct iwl_scale_tbl_info *tbl, int index)
1179{
1180 u16 rate_mask;
1181 s32 rate;
1182 s8 is_green = lq_sta->is_green;
1183 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1184 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1185
1186 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1187 return -1;
1188
1189 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1190 == WLAN_HT_CAP_SM_PS_STATIC)
1191 return -1;
1192
1193 /* Need both Tx chains/antennas to support MIMO */
1194 if (priv->hw_params.tx_chains_num < 2)
1195 return -1;
1196
1197 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
1198
1199 tbl->lq_type = LQ_MIMO2;
1200 tbl->is_dup = lq_sta->is_dup;
1201 tbl->action = 0;
1202 tbl->max_search = IWL_MAX_SEARCH;
1203 rate_mask = lq_sta->active_mimo2_rate;
1204
1205 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1206 tbl->is_ht40 = 1;
1207 else
1208 tbl->is_ht40 = 0;
1209
1210 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1211
1212 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1213
1214 IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
1215 rate, rate_mask);
1216 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1217 IWL_DEBUG_RATE(priv,
1218 "Can't switch with index %d rate mask %x\n",
1219 rate, rate_mask);
1220 return -1;
1221 }
1222 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1223 tbl, rate, is_green);
1224
1225 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1226 tbl->current_rate, is_green);
1227 return 0;
1228}
1229
1230/*
1231 * Set up search table for SISO
1232 */
1233static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
1234 struct iwl_lq_sta *lq_sta,
1235 struct ieee80211_conf *conf,
1236 struct ieee80211_sta *sta,
1237 struct iwl_scale_tbl_info *tbl, int index)
1238{
1239 u16 rate_mask;
1240 u8 is_green = lq_sta->is_green;
1241 s32 rate;
1242 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1243 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1244
1245 if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
1246 return -1;
1247
1248 IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
1249
1250 tbl->is_dup = lq_sta->is_dup;
1251 tbl->lq_type = LQ_SISO;
1252 tbl->action = 0;
1253 tbl->max_search = IWL_MAX_SEARCH;
1254 rate_mask = lq_sta->active_siso_rate;
1255
1256 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
1257 tbl->is_ht40 = 1;
1258 else
1259 tbl->is_ht40 = 0;
1260
1261 if (is_green)
1262 tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
1263
1264 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
1265 rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
1266
1267 IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
1268 if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
1269 IWL_DEBUG_RATE(priv,
1270 "can not switch with index %d rate mask %x\n",
1271 rate, rate_mask);
1272 return -1;
1273 }
1274 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
1275 tbl, rate, is_green);
1276 IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
1277 tbl->current_rate, is_green);
1278 return 0;
1279}
1280
1281/*
1282 * Try to switch to new modulation mode from legacy
1283 */
1284static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
1285 struct iwl_lq_sta *lq_sta,
1286 struct ieee80211_conf *conf,
1287 struct ieee80211_sta *sta,
1288 int index)
1289{
1290 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1291 struct iwl_scale_tbl_info *search_tbl =
1292 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1293 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1294 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1295 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1296 u8 start_action;
1297 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1298 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1299 int ret = 0;
1300 u8 update_search_tbl_counter = 0;
1301
1302 tbl->action = IWL_LEGACY_SWITCH_SISO;
1303
1304 start_action = tbl->action;
1305 for (; ;) {
1306 lq_sta->action_counter++;
1307 switch (tbl->action) {
1308 case IWL_LEGACY_SWITCH_ANTENNA1:
1309 case IWL_LEGACY_SWITCH_ANTENNA2:
1310 IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
1311
1312 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1313 tx_chains_num <= 1) ||
1314 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1315 tx_chains_num <= 2))
1316 break;
1317
1318 /* Don't change antenna if success has been great */
1319 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1320 break;
1321
1322 /* Set up search table to try other antenna */
1323 memcpy(search_tbl, tbl, sz);
1324
1325 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1326 &search_tbl->current_rate, search_tbl)) {
1327 update_search_tbl_counter = 1;
1328 iwl4965_rs_set_expected_tpt_table(lq_sta,
1329 search_tbl);
1330 goto out;
1331 }
1332 break;
1333 case IWL_LEGACY_SWITCH_SISO:
1334 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
1335
1336 /* Set up search table to try SISO */
1337 memcpy(search_tbl, tbl, sz);
1338 search_tbl->is_SGI = 0;
1339 ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
1340 search_tbl, index);
1341 if (!ret) {
1342 lq_sta->action_counter = 0;
1343 goto out;
1344 }
1345
1346 break;
1347 case IWL_LEGACY_SWITCH_MIMO2_AB:
1348 case IWL_LEGACY_SWITCH_MIMO2_AC:
1349 case IWL_LEGACY_SWITCH_MIMO2_BC:
1350 IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
1351
1352 /* Set up search table to try MIMO */
1353 memcpy(search_tbl, tbl, sz);
1354 search_tbl->is_SGI = 0;
1355
1356 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1357 search_tbl->ant_type = ANT_AB;
1358 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1359 search_tbl->ant_type = ANT_AC;
1360 else
1361 search_tbl->ant_type = ANT_BC;
1362
1363 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1364 search_tbl->ant_type))
1365 break;
1366
1367 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1368 conf, sta,
1369 search_tbl, index);
1370 if (!ret) {
1371 lq_sta->action_counter = 0;
1372 goto out;
1373 }
1374 break;
1375 }
1376 tbl->action++;
1377 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1378 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1379
1380 if (tbl->action == start_action)
1381 break;
1382
1383 }
1384 search_tbl->lq_type = LQ_NONE;
1385 return 0;
1386
1387out:
1388 lq_sta->search_better_tbl = 1;
1389 tbl->action++;
1390 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1391 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1392 if (update_search_tbl_counter)
1393 search_tbl->action = tbl->action;
1394 return 0;
1395
1396}
1397
1398/*
1399 * Try to switch to new modulation mode from SISO
1400 */
1401static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
1402 struct iwl_lq_sta *lq_sta,
1403 struct ieee80211_conf *conf,
1404 struct ieee80211_sta *sta, int index)
1405{
1406 u8 is_green = lq_sta->is_green;
1407 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1408 struct iwl_scale_tbl_info *search_tbl =
1409 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1410 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1411 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1412 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1413 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1414 u8 start_action;
1415 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1416 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1417 u8 update_search_tbl_counter = 0;
1418 int ret;
1419
1420 start_action = tbl->action;
1421
1422 for (;;) {
1423 lq_sta->action_counter++;
1424 switch (tbl->action) {
1425 case IWL_SISO_SWITCH_ANTENNA1:
1426 case IWL_SISO_SWITCH_ANTENNA2:
1427 IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
1428 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1429 tx_chains_num <= 1) ||
1430 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1431 tx_chains_num <= 2))
1432 break;
1433
1434 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1435 break;
1436
1437 memcpy(search_tbl, tbl, sz);
1438 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1439 &search_tbl->current_rate, search_tbl)) {
1440 update_search_tbl_counter = 1;
1441 goto out;
1442 }
1443 break;
1444 case IWL_SISO_SWITCH_MIMO2_AB:
1445 case IWL_SISO_SWITCH_MIMO2_AC:
1446 case IWL_SISO_SWITCH_MIMO2_BC:
1447 IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
1448 memcpy(search_tbl, tbl, sz);
1449 search_tbl->is_SGI = 0;
1450
1451 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1452 search_tbl->ant_type = ANT_AB;
1453 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1454 search_tbl->ant_type = ANT_AC;
1455 else
1456 search_tbl->ant_type = ANT_BC;
1457
1458 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1459 search_tbl->ant_type))
1460 break;
1461
1462 ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
1463 conf, sta,
1464 search_tbl, index);
1465 if (!ret)
1466 goto out;
1467 break;
1468 case IWL_SISO_SWITCH_GI:
1469 if (!tbl->is_ht40 && !(ht_cap->cap &
1470 IEEE80211_HT_CAP_SGI_20))
1471 break;
1472 if (tbl->is_ht40 && !(ht_cap->cap &
1473 IEEE80211_HT_CAP_SGI_40))
1474 break;
1475
1476 IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
1477
1478 memcpy(search_tbl, tbl, sz);
1479 if (is_green) {
1480 if (!tbl->is_SGI)
1481 break;
1482 else
1483 IWL_ERR(priv,
1484 "SGI was set in GF+SISO\n");
1485 }
1486 search_tbl->is_SGI = !tbl->is_SGI;
1487 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1488 if (tbl->is_SGI) {
1489 s32 tpt = lq_sta->last_tpt / 100;
1490 if (tpt >= search_tbl->expected_tpt[index])
1491 break;
1492 }
1493 search_tbl->current_rate =
1494 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1495 index, is_green);
1496 update_search_tbl_counter = 1;
1497 goto out;
1498 }
1499 tbl->action++;
1500 if (tbl->action > IWL_SISO_SWITCH_GI)
1501 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1502
1503 if (tbl->action == start_action)
1504 break;
1505 }
1506 search_tbl->lq_type = LQ_NONE;
1507 return 0;
1508
1509 out:
1510 lq_sta->search_better_tbl = 1;
1511 tbl->action++;
1512 if (tbl->action > IWL_SISO_SWITCH_GI)
1513 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1514 if (update_search_tbl_counter)
1515 search_tbl->action = tbl->action;
1516
1517 return 0;
1518}
1519
1520/*
1521 * Try to switch to new modulation mode from MIMO2
1522 */
1523static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
1524 struct iwl_lq_sta *lq_sta,
1525 struct ieee80211_conf *conf,
1526 struct ieee80211_sta *sta, int index)
1527{
1528 s8 is_green = lq_sta->is_green;
1529 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1530 struct iwl_scale_tbl_info *search_tbl =
1531 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1532 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1533 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1534 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1535 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1536 u8 start_action;
1537 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1538 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1539 u8 update_search_tbl_counter = 0;
1540 int ret;
1541
1542 start_action = tbl->action;
1543 for (;;) {
1544 lq_sta->action_counter++;
1545 switch (tbl->action) {
1546 case IWL_MIMO2_SWITCH_ANTENNA1:
1547 case IWL_MIMO2_SWITCH_ANTENNA2:
1548 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
1549
1550 if (tx_chains_num <= 2)
1551 break;
1552
1553 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1554 break;
1555
1556 memcpy(search_tbl, tbl, sz);
1557 if (iwl4965_rs_toggle_antenna(valid_tx_ant,
1558 &search_tbl->current_rate, search_tbl)) {
1559 update_search_tbl_counter = 1;
1560 goto out;
1561 }
1562 break;
1563 case IWL_MIMO2_SWITCH_SISO_A:
1564 case IWL_MIMO2_SWITCH_SISO_B:
1565 case IWL_MIMO2_SWITCH_SISO_C:
1566 IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
1567
1568 /* Set up new search table for SISO */
1569 memcpy(search_tbl, tbl, sz);
1570
1571 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1572 search_tbl->ant_type = ANT_A;
1573 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1574 search_tbl->ant_type = ANT_B;
1575 else
1576 search_tbl->ant_type = ANT_C;
1577
1578 if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
1579 search_tbl->ant_type))
1580 break;
1581
1582 ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
1583 conf, sta,
1584 search_tbl, index);
1585 if (!ret)
1586 goto out;
1587
1588 break;
1589
1590 case IWL_MIMO2_SWITCH_GI:
1591 if (!tbl->is_ht40 && !(ht_cap->cap &
1592 IEEE80211_HT_CAP_SGI_20))
1593 break;
1594 if (tbl->is_ht40 && !(ht_cap->cap &
1595 IEEE80211_HT_CAP_SGI_40))
1596 break;
1597
1598 IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
1599
1600 /* Set up new search table for MIMO2 */
1601 memcpy(search_tbl, tbl, sz);
1602 search_tbl->is_SGI = !tbl->is_SGI;
1603 iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
1604 /*
1605 * If active table already uses the fastest possible
1606 * modulation (dual stream with short guard interval),
1607 * and it's working well, there's no need to look
1608 * for a better type of modulation!
1609 */
1610 if (tbl->is_SGI) {
1611 s32 tpt = lq_sta->last_tpt / 100;
1612 if (tpt >= search_tbl->expected_tpt[index])
1613 break;
1614 }
1615 search_tbl->current_rate =
1616 iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
1617 index, is_green);
1618 update_search_tbl_counter = 1;
1619 goto out;
1620
1621 }
1622 tbl->action++;
1623 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1624 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1625
1626 if (tbl->action == start_action)
1627 break;
1628 }
1629 search_tbl->lq_type = LQ_NONE;
1630 return 0;
1631 out:
1632 lq_sta->search_better_tbl = 1;
1633 tbl->action++;
1634 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1635 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1636 if (update_search_tbl_counter)
1637 search_tbl->action = tbl->action;
1638
1639 return 0;
1640
1641}
1642
1643/*
1644 * Check whether we should continue using same modulation mode, or
1645 * begin search for a new mode, based on:
1646 * 1) # tx successes or failures while using this mode
1647 * 2) # times calling this function
1648 * 3) elapsed time in this mode (not used, for now)
1649 */
1650static void
1651iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1652{
1653 struct iwl_scale_tbl_info *tbl;
1654 int i;
1655 int active_tbl;
1656 int flush_interval_passed = 0;
1657 struct iwl_priv *priv;
1658
1659 priv = lq_sta->drv;
1660 active_tbl = lq_sta->active_tbl;
1661
1662 tbl = &(lq_sta->lq_info[active_tbl]);
1663
1664 /* If we've been disallowing search, see if we should now allow it */
1665 if (lq_sta->stay_in_tbl) {
1666
1667 /* Elapsed time using current modulation mode */
1668 if (lq_sta->flush_timer)
1669 flush_interval_passed =
1670 time_after(jiffies,
1671 (unsigned long)(lq_sta->flush_timer +
1672 IWL_RATE_SCALE_FLUSH_INTVL));
1673
1674 /*
1675 * Check if we should allow search for new modulation mode.
1676 * If many frames have failed or succeeded, or we've used
1677 * this same modulation for a long time, allow search, and
1678 * reset history stats that keep track of whether we should
1679 * allow a new search. Also (below) reset all bitmaps and
1680 * stats in active history.
1681 */
1682 if (force_search ||
1683 (lq_sta->total_failed > lq_sta->max_failure_limit) ||
1684 (lq_sta->total_success > lq_sta->max_success_limit) ||
1685 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
1686 && (flush_interval_passed))) {
1687 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
1688 lq_sta->total_failed,
1689 lq_sta->total_success,
1690 flush_interval_passed);
1691
1692 /* Allow search for new mode */
1693 lq_sta->stay_in_tbl = 0; /* only place reset */
1694 lq_sta->total_failed = 0;
1695 lq_sta->total_success = 0;
1696 lq_sta->flush_timer = 0;
1697
1698 /*
1699 * Else if we've used this modulation mode enough repetitions
1700 * (regardless of elapsed time or success/failure), reset
1701 * history bitmaps and rate-specific stats for all rates in
1702 * active table.
1703 */
1704 } else {
1705 lq_sta->table_count++;
1706 if (lq_sta->table_count >=
1707 lq_sta->table_count_limit) {
1708 lq_sta->table_count = 0;
1709
1710 IWL_DEBUG_RATE(priv,
1711 "LQ: stay in table clear win\n");
1712 for (i = 0; i < IWL_RATE_COUNT; i++)
1713 iwl4965_rs_rate_scale_clear_window(
1714 &(tbl->win[i]));
1715 }
1716 }
1717
1718 /* If transitioning to allow "search", reset all history
1719 * bitmaps and stats in active table (this will become the new
1720 * "search" table). */
1721 if (!lq_sta->stay_in_tbl) {
1722 for (i = 0; i < IWL_RATE_COUNT; i++)
1723 iwl4965_rs_rate_scale_clear_window(
1724 &(tbl->win[i]));
1725 }
1726 }
1727}
1728
1729/*
1730 * setup rate table in uCode
1731 * return rate_n_flags as used in the table
1732 */
1733static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
1734 struct iwl_rxon_context *ctx,
1735 struct iwl_lq_sta *lq_sta,
1736 struct iwl_scale_tbl_info *tbl,
1737 int index, u8 is_green)
1738{
1739 u32 rate;
1740
1741 /* Update uCode's rate table. */
1742 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
1743 iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
1744 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
1745
1746 return rate;
1747}
1748
1749/*
1750 * Do rate scaling and search for new modulation mode.
1751 */
1752static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
1753 struct sk_buff *skb,
1754 struct ieee80211_sta *sta,
1755 struct iwl_lq_sta *lq_sta)
1756{
1757 struct ieee80211_hw *hw = priv->hw;
1758 struct ieee80211_conf *conf = &hw->conf;
1759 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1760 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1761 int low = IWL_RATE_INVALID;
1762 int high = IWL_RATE_INVALID;
1763 int index;
1764 int i;
1765 struct iwl_rate_scale_data *window = NULL;
1766 int current_tpt = IWL_INVALID_VALUE;
1767 int low_tpt = IWL_INVALID_VALUE;
1768 int high_tpt = IWL_INVALID_VALUE;
1769 u32 fail_count;
1770 s8 scale_action = 0;
1771 u16 rate_mask;
1772 u8 update_lq = 0;
1773 struct iwl_scale_tbl_info *tbl, *tbl1;
1774 u16 rate_scale_index_msk = 0;
1775 u32 rate;
1776 u8 is_green = 0;
1777 u8 active_tbl = 0;
1778 u8 done_search = 0;
1779 u16 high_low;
1780 s32 sr;
1781 u8 tid = MAX_TID_COUNT;
1782 struct iwl_tid_data *tid_data;
1783 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
1784 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
1785
1786 IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
1787
1788 /* Send management frames and NO_ACK data using lowest rate. */
1789 /* TODO: this could probably be improved.. */
1790 if (!ieee80211_is_data(hdr->frame_control) ||
1791 info->flags & IEEE80211_TX_CTL_NO_ACK)
1792 return;
1793
1794 if (!sta || !lq_sta)
1795 return;
1796
1797 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1798
1799 tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
1800 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1801 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1802 if (tid_data->agg.state == IWL_AGG_OFF)
1803 lq_sta->is_agg = 0;
1804 else
1805 lq_sta->is_agg = 1;
1806 } else
1807 lq_sta->is_agg = 0;
1808
1809 /*
1810 * Select rate-scale / modulation-mode table to work with in
1811 * the rest of this function: "search" if searching for better
1812 * modulation mode, or "active" if doing rate scaling within a mode.
1813 */
1814 if (!lq_sta->search_better_tbl)
1815 active_tbl = lq_sta->active_tbl;
1816 else
1817 active_tbl = 1 - lq_sta->active_tbl;
1818
1819 tbl = &(lq_sta->lq_info[active_tbl]);
1820 if (is_legacy(tbl->lq_type))
1821 lq_sta->is_green = 0;
1822 else
1823 lq_sta->is_green = iwl4965_rs_use_green(sta);
1824 is_green = lq_sta->is_green;
1825
1826 /* current tx rate */
1827 index = lq_sta->last_txrate_idx;
1828
1829 IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
1830 tbl->lq_type);
1831
1832 /* rates available for this association, and for modulation mode */
1833 rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
1834
1835 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
1836
1837 /* mask with station rate restriction */
1838 if (is_legacy(tbl->lq_type)) {
1839 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1840 /* supp_rates has no CCK bits in A mode */
1841 rate_scale_index_msk = (u16) (rate_mask &
1842 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
1843 else
1844 rate_scale_index_msk = (u16) (rate_mask &
1845 lq_sta->supp_rates);
1846
1847 } else
1848 rate_scale_index_msk = rate_mask;
1849
1850 if (!rate_scale_index_msk)
1851 rate_scale_index_msk = rate_mask;
1852
1853 if (!((1 << index) & rate_scale_index_msk)) {
1854 IWL_ERR(priv, "Current Rate is not valid\n");
1855 if (lq_sta->search_better_tbl) {
1856 /* revert to active table if search table is not valid*/
1857 tbl->lq_type = LQ_NONE;
1858 lq_sta->search_better_tbl = 0;
1859 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1860 /* get "active" rate info */
1861 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1862 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
1863 tbl, index, is_green);
1864 }
1865 return;
1866 }
1867
1868 /* Get expected throughput table and history window for current rate */
1869 if (!tbl->expected_tpt) {
1870 IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
1871 return;
1872 }
1873
1874 /* force user max rate if set by user */
1875 if ((lq_sta->max_rate_idx != -1) &&
1876 (lq_sta->max_rate_idx < index)) {
1877 index = lq_sta->max_rate_idx;
1878 update_lq = 1;
1879 window = &(tbl->win[index]);
1880 goto lq_update;
1881 }
1882
1883 window = &(tbl->win[index]);
1884
1885 /*
1886 * If there is not enough history to calculate actual average
1887 * throughput, keep analyzing results of more tx frames, without
1888 * changing rate or mode (bypass most of the rest of this function).
1889 * Set up new rate table in uCode only if old rate is not supported
1890 * in current association (use new rate found above).
1891 */
1892 fail_count = window->counter - window->success_counter;
1893 if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
1894 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
1895 IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
1896 "for index %d\n",
1897 window->success_counter, window->counter, index);
1898
1899 /* Can't calculate this yet; not enough history */
1900 window->average_tpt = IWL_INVALID_VALUE;
1901
1902 /* Should we stay with this modulation mode,
1903 * or search for a new one? */
1904 iwl4965_rs_stay_in_table(lq_sta, false);
1905
1906 goto out;
1907 }
1908 /* Else we have enough samples; calculate estimate of
1909 * actual average throughput */
1910 if (window->average_tpt != ((window->success_ratio *
1911 tbl->expected_tpt[index] + 64) / 128)) {
1912 IWL_ERR(priv,
1913 "expected_tpt should have been calculated by now\n");
1914 window->average_tpt = ((window->success_ratio *
1915 tbl->expected_tpt[index] + 64) / 128);
1916 }
1917
1918 /* If we are searching for better modulation mode, check success. */
1919 if (lq_sta->search_better_tbl) {
1920 /* If good success, continue using the "search" mode;
1921 * no need to send new link quality command, since we're
1922 * continuing to use the setup that we've been trying. */
1923 if (window->average_tpt > lq_sta->last_tpt) {
1924
1925 IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
1926 "suc=%d cur-tpt=%d old-tpt=%d\n",
1927 window->success_ratio,
1928 window->average_tpt,
1929 lq_sta->last_tpt);
1930
1931 if (!is_legacy(tbl->lq_type))
1932 lq_sta->enable_counter = 1;
1933
1934 /* Swap tables; "search" becomes "active" */
1935 lq_sta->active_tbl = active_tbl;
1936 current_tpt = window->average_tpt;
1937
1938 /* Else poor success; go back to mode in "active" table */
1939 } else {
1940
1941 IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
1942 "suc=%d cur-tpt=%d old-tpt=%d\n",
1943 window->success_ratio,
1944 window->average_tpt,
1945 lq_sta->last_tpt);
1946
1947 /* Nullify "search" table */
1948 tbl->lq_type = LQ_NONE;
1949
1950 /* Revert to "active" table */
1951 active_tbl = lq_sta->active_tbl;
1952 tbl = &(lq_sta->lq_info[active_tbl]);
1953
1954 /* Revert to "active" rate and throughput info */
1955 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
1956 current_tpt = lq_sta->last_tpt;
1957
1958 /* Need to set up a new rate table in uCode */
1959 update_lq = 1;
1960 }
1961
1962 /* Either way, we've made a decision; modulation mode
1963 * search is done, allow rate adjustment next time. */
1964 lq_sta->search_better_tbl = 0;
1965 done_search = 1; /* Don't switch modes below! */
1966 goto lq_update;
1967 }
1968
1969 /* (Else) not in search of better modulation mode, try for better
1970 * starting rate, while staying in this mode. */
1971 high_low = iwl4965_rs_get_adjacent_rate(priv, index,
1972 rate_scale_index_msk,
1973 tbl->lq_type);
1974 low = high_low & 0xff;
1975 high = (high_low >> 8) & 0xff;
1976
1977 /* If user set max rate, dont allow higher than user constrain */
1978 if ((lq_sta->max_rate_idx != -1) &&
1979 (lq_sta->max_rate_idx < high))
1980 high = IWL_RATE_INVALID;
1981
1982 sr = window->success_ratio;
1983
1984 /* Collect measured throughputs for current and adjacent rates */
1985 current_tpt = window->average_tpt;
1986 if (low != IWL_RATE_INVALID)
1987 low_tpt = tbl->win[low].average_tpt;
1988 if (high != IWL_RATE_INVALID)
1989 high_tpt = tbl->win[high].average_tpt;
1990
1991 scale_action = 0;
1992
1993 /* Too many failures, decrease rate */
1994 if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
1995 IWL_DEBUG_RATE(priv,
1996 "decrease rate because of low success_ratio\n");
1997 scale_action = -1;
1998
1999 /* No throughput measured yet for adjacent rates; try increase. */
2000 } else if ((low_tpt == IWL_INVALID_VALUE) &&
2001 (high_tpt == IWL_INVALID_VALUE)) {
2002
2003 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
2004 scale_action = 1;
2005 else if (low != IWL_RATE_INVALID)
2006 scale_action = 0;
2007 }
2008
2009 /* Both adjacent throughputs are measured, but neither one has better
2010 * throughput; we're using the best rate, don't change it! */
2011 else if ((low_tpt != IWL_INVALID_VALUE) &&
2012 (high_tpt != IWL_INVALID_VALUE) &&
2013 (low_tpt < current_tpt) &&
2014 (high_tpt < current_tpt))
2015 scale_action = 0;
2016
2017 /* At least one adjacent rate's throughput is measured,
2018 * and may have better performance. */
2019 else {
2020 /* Higher adjacent rate's throughput is measured */
2021 if (high_tpt != IWL_INVALID_VALUE) {
2022 /* Higher rate has better throughput */
2023 if (high_tpt > current_tpt &&
2024 sr >= IWL_RATE_INCREASE_TH) {
2025 scale_action = 1;
2026 } else {
2027 scale_action = 0;
2028 }
2029
2030 /* Lower adjacent rate's throughput is measured */
2031 } else if (low_tpt != IWL_INVALID_VALUE) {
2032 /* Lower rate has better throughput */
2033 if (low_tpt > current_tpt) {
2034 IWL_DEBUG_RATE(priv,
2035 "decrease rate because of low tpt\n");
2036 scale_action = -1;
2037 } else if (sr >= IWL_RATE_INCREASE_TH) {
2038 scale_action = 1;
2039 }
2040 }
2041 }
2042
2043 /* Sanity check; asked for decrease, but success rate or throughput
2044 * has been good at old rate. Don't change it. */
2045 if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
2046 ((sr > IWL_RATE_HIGH_TH) ||
2047 (current_tpt > (100 * tbl->expected_tpt[low]))))
2048 scale_action = 0;
2049
2050 switch (scale_action) {
2051 case -1:
2052 /* Decrease starting rate, update uCode's rate table */
2053 if (low != IWL_RATE_INVALID) {
2054 update_lq = 1;
2055 index = low;
2056 }
2057
2058 break;
2059 case 1:
2060 /* Increase starting rate, update uCode's rate table */
2061 if (high != IWL_RATE_INVALID) {
2062 update_lq = 1;
2063 index = high;
2064 }
2065
2066 break;
2067 case 0:
2068 /* No change */
2069 default:
2070 break;
2071 }
2072
2073 IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
2074 "high %d type %d\n",
2075 index, scale_action, low, high, tbl->lq_type);
2076
2077lq_update:
2078 /* Replace uCode's rate table for the destination station. */
2079 if (update_lq)
2080 rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
2081 tbl, index, is_green);
2082
2083 /* Should we stay with this modulation mode,
2084 * or search for a new one? */
2085 iwl4965_rs_stay_in_table(lq_sta, false);
2086
2087 /*
2088 * Search for new modulation mode if we're:
2089 * 1) Not changing rates right now
2090 * 2) Not just finishing up a search
2091 * 3) Allowing a new search
2092 */
2093 if (!update_lq && !done_search &&
2094 !lq_sta->stay_in_tbl && window->counter) {
2095 /* Save current throughput to compare with "search" throughput*/
2096 lq_sta->last_tpt = current_tpt;
2097
2098 /* Select a new "search" modulation mode to try.
2099 * If one is found, set up the new "search" table. */
2100 if (is_legacy(tbl->lq_type))
2101 iwl4965_rs_move_legacy_other(priv, lq_sta,
2102 conf, sta, index);
2103 else if (is_siso(tbl->lq_type))
2104 iwl4965_rs_move_siso_to_other(priv, lq_sta,
2105 conf, sta, index);
2106 else /* (is_mimo2(tbl->lq_type)) */
2107 iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
2108 conf, sta, index);
2109
2110 /* If new "search" mode was selected, set up in uCode table */
2111 if (lq_sta->search_better_tbl) {
2112 /* Access the "search" table, clear its history. */
2113 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2114 for (i = 0; i < IWL_RATE_COUNT; i++)
2115 iwl4965_rs_rate_scale_clear_window(
2116 &(tbl->win[i]));
2117
2118 /* Use new "search" start rate */
2119 index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
2120
2121 IWL_DEBUG_RATE(priv,
2122 "Switch current mcs: %X index: %d\n",
2123 tbl->current_rate, index);
2124 iwl4965_rs_fill_link_cmd(priv, lq_sta,
2125 tbl->current_rate);
2126 iwl_legacy_send_lq_cmd(priv, ctx,
2127 &lq_sta->lq, CMD_ASYNC, false);
2128 } else
2129 done_search = 1;
2130 }
2131
2132 if (done_search && !lq_sta->stay_in_tbl) {
2133 /* If the "active" (non-search) mode was legacy,
2134 * and we've tried switching antennas,
2135 * but we haven't been able to try HT modes (not available),
2136 * stay with best antenna legacy modulation for a while
2137 * before next round of mode comparisons. */
2138 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2139 if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
2140 lq_sta->action_counter > tbl1->max_search) {
2141 IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
2142 iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
2143 }
2144
2145 /* If we're in an HT mode, and all 3 mode switch actions
2146 * have been tried and compared, stay in this best modulation
2147 * mode for a while before next round of mode comparisons. */
2148 if (lq_sta->enable_counter &&
2149 (lq_sta->action_counter >= tbl1->max_search)) {
2150 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2151 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2152 (tid != MAX_TID_COUNT)) {
2153 tid_data =
2154 &priv->stations[lq_sta->lq.sta_id].tid[tid];
2155 if (tid_data->agg.state == IWL_AGG_OFF) {
2156 IWL_DEBUG_RATE(priv,
2157 "try to aggregate tid %d\n",
2158 tid);
2159 iwl4965_rs_tl_turn_on_agg(priv, tid,
2160 lq_sta, sta);
2161 }
2162 }
2163 iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
2164 }
2165 }
2166
2167out:
2168 tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
2169 index, is_green);
2170 i = index;
2171 lq_sta->last_txrate_idx = i;
2172}
2173
2174/**
2175 * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
2176 *
2177 * The uCode's station table contains a table of fallback rates
2178 * for automatic fallback during transmission.
2179 *
2180 * NOTE: This sets up a default set of values. These will be replaced later
2181 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
2182 * rc80211_simple.
2183 *
2184 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2185 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2186 * which requires station table entry to exist).
2187 */
2188static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
2189 struct ieee80211_conf *conf,
2190 struct ieee80211_sta *sta,
2191 struct iwl_lq_sta *lq_sta)
2192{
2193 struct iwl_scale_tbl_info *tbl;
2194 int rate_idx;
2195 int i;
2196 u32 rate;
2197 u8 use_green = iwl4965_rs_use_green(sta);
2198 u8 active_tbl = 0;
2199 u8 valid_tx_ant;
2200 struct iwl_station_priv *sta_priv;
2201 struct iwl_rxon_context *ctx;
2202
2203 if (!sta || !lq_sta)
2204 return;
2205
2206 sta_priv = (void *)sta->drv_priv;
2207 ctx = sta_priv->common.ctx;
2208
2209 i = lq_sta->last_txrate_idx;
2210
2211 valid_tx_ant = priv->hw_params.valid_tx_ant;
2212
2213 if (!lq_sta->search_better_tbl)
2214 active_tbl = lq_sta->active_tbl;
2215 else
2216 active_tbl = 1 - lq_sta->active_tbl;
2217
2218 tbl = &(lq_sta->lq_info[active_tbl]);
2219
2220 if ((i < 0) || (i >= IWL_RATE_COUNT))
2221 i = 0;
2222
2223 rate = iwlegacy_rates[i].plcp;
2224 tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
2225 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2226
2227 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2228 rate |= RATE_MCS_CCK_MSK;
2229
2230 iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2231 if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2232 iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
2233
2234 rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
2235 tbl->current_rate = rate;
2236 iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
2237 iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
2238 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2239 iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
2240}
2241
2242static void
2243iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2244 struct ieee80211_tx_rate_control *txrc)
2245{
2246
2247 struct sk_buff *skb = txrc->skb;
2248 struct ieee80211_supported_band *sband = txrc->sband;
2249 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2251 struct iwl_lq_sta *lq_sta = priv_sta;
2252 int rate_idx;
2253
2254 IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
2255
2256 /* Get max rate if user set max rate */
2257 if (lq_sta) {
2258 lq_sta->max_rate_idx = txrc->max_rate_idx;
2259 if ((sband->band == IEEE80211_BAND_5GHZ) &&
2260 (lq_sta->max_rate_idx != -1))
2261 lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
2262 if ((lq_sta->max_rate_idx < 0) ||
2263 (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
2264 lq_sta->max_rate_idx = -1;
2265 }
2266
2267 /* Treat uninitialized rate scaling data same as non-existing. */
2268 if (lq_sta && !lq_sta->drv) {
2269 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2270 priv_sta = NULL;
2271 }
2272
2273 /* Send management frames and NO_ACK data using lowest rate. */
2274 if (rate_control_send_low(sta, priv_sta, txrc))
2275 return;
2276
2277 if (!lq_sta)
2278 return;
2279
2280 rate_idx = lq_sta->last_txrate_idx;
2281
2282 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2283 rate_idx -= IWL_FIRST_OFDM_RATE;
2284 /* 6M and 9M shared same MCS index */
2285 rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
2286 if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
2287 IWL_RATE_MIMO2_6M_PLCP)
2288 rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
2289 info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
2290 if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
2291 info->control.rates[0].flags |=
2292 IEEE80211_TX_RC_SHORT_GI;
2293 if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
2294 info->control.rates[0].flags |=
2295 IEEE80211_TX_RC_DUP_DATA;
2296 if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
2297 info->control.rates[0].flags |=
2298 IEEE80211_TX_RC_40_MHZ_WIDTH;
2299 if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
2300 info->control.rates[0].flags |=
2301 IEEE80211_TX_RC_GREEN_FIELD;
2302 } else {
2303 /* Check for invalid rates */
2304 if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
2305 ((sband->band == IEEE80211_BAND_5GHZ) &&
2306 (rate_idx < IWL_FIRST_OFDM_RATE)))
2307 rate_idx = rate_lowest_index(sband, sta);
2308 /* On valid 5 GHz rate, adjust index */
2309 else if (sband->band == IEEE80211_BAND_5GHZ)
2310 rate_idx -= IWL_FIRST_OFDM_RATE;
2311 info->control.rates[0].flags = 0;
2312 }
2313 info->control.rates[0].idx = rate_idx;
2314
2315}
2316
2317static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2318 gfp_t gfp)
2319{
2320 struct iwl_lq_sta *lq_sta;
2321 struct iwl_station_priv *sta_priv =
2322 (struct iwl_station_priv *) sta->drv_priv;
2323 struct iwl_priv *priv;
2324
2325 priv = (struct iwl_priv *)priv_rate;
2326 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2327
2328 lq_sta = &sta_priv->lq_sta;
2329
2330 return lq_sta;
2331}
2332
2333/*
2334 * Called after adding a new station to initialize rate scaling
2335 */
2336void
2337iwl4965_rs_rate_init(struct iwl_priv *priv,
2338 struct ieee80211_sta *sta,
2339 u8 sta_id)
2340{
2341 int i, j;
2342 struct ieee80211_hw *hw = priv->hw;
2343 struct ieee80211_conf *conf = &priv->hw->conf;
2344 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2345 struct iwl_station_priv *sta_priv;
2346 struct iwl_lq_sta *lq_sta;
2347 struct ieee80211_supported_band *sband;
2348
2349 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2350 lq_sta = &sta_priv->lq_sta;
2351 sband = hw->wiphy->bands[conf->channel->band];
2352
2353
2354 lq_sta->lq.sta_id = sta_id;
2355
2356 for (j = 0; j < LQ_SIZE; j++)
2357 for (i = 0; i < IWL_RATE_COUNT; i++)
2358 iwl4965_rs_rate_scale_clear_window(
2359 &lq_sta->lq_info[j].win[i]);
2360
2361 lq_sta->flush_timer = 0;
2362 lq_sta->supp_rates = sta->supp_rates[sband->band];
2363 for (j = 0; j < LQ_SIZE; j++)
2364 for (i = 0; i < IWL_RATE_COUNT; i++)
2365 iwl4965_rs_rate_scale_clear_window(
2366 &lq_sta->lq_info[j].win[i]);
2367
2368 IWL_DEBUG_RATE(priv, "LQ:"
2369 "*** rate scale station global init for station %d ***\n",
2370 sta_id);
2371 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2372 * the lowest or the highest rate.. Could consider using RSSI from
2373 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2374 * after assoc.. */
2375
2376 lq_sta->is_dup = 0;
2377 lq_sta->max_rate_idx = -1;
2378 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2379 lq_sta->is_green = iwl4965_rs_use_green(sta);
2380 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2381 lq_sta->band = priv->band;
2382 /*
2383 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2384 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2385 */
2386 lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
2387 lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
2388 lq_sta->active_siso_rate &= ~((u16)0x2);
2389 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2390
2391 /* Same here */
2392 lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
2393 lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
2394 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2395 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2396
2397 /* These values will be overridden later */
2398 lq_sta->lq.general_params.single_stream_ant_msk =
2399 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2400 lq_sta->lq.general_params.dual_stream_ant_msk =
2401 priv->hw_params.valid_tx_ant &
2402 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
2403 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2404 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2405 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2406 lq_sta->lq.general_params.dual_stream_ant_msk =
2407 priv->hw_params.valid_tx_ant;
2408 }
2409
2410 /* as default allow aggregation for all tids */
2411 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2412 lq_sta->drv = priv;
2413
2414 /* Set last_txrate_idx to lowest rate */
2415 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2416 if (sband->band == IEEE80211_BAND_5GHZ)
2417 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2418 lq_sta->is_agg = 0;
2419
2420#ifdef CONFIG_MAC80211_DEBUGFS
2421 lq_sta->dbg_fixed_rate = 0;
2422#endif
2423
2424 iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
2425}
2426
2427static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
2428 struct iwl_lq_sta *lq_sta, u32 new_rate)
2429{
2430 struct iwl_scale_tbl_info tbl_type;
2431 int index = 0;
2432 int rate_idx;
2433 int repeat_rate = 0;
2434 u8 ant_toggle_cnt = 0;
2435 u8 use_ht_possible = 1;
2436 u8 valid_tx_ant = 0;
2437 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
2438
2439 /* Override starting rate (index 0) if needed for debug purposes */
2440 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2441
2442 /* Interpret new_rate (rate_n_flags) */
2443 iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
2444 &tbl_type, &rate_idx);
2445
2446 /* How many times should we repeat the initial rate? */
2447 if (is_legacy(tbl_type.lq_type)) {
2448 ant_toggle_cnt = 1;
2449 repeat_rate = IWL_NUMBER_TRY;
2450 } else {
2451 repeat_rate = IWL_HT_NUMBER_TRY;
2452 }
2453
2454 lq_cmd->general_params.mimo_delimiter =
2455 is_mimo(tbl_type.lq_type) ? 1 : 0;
2456
2457 /* Fill 1st table entry (index 0) */
2458 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2459
2460 if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
2461 lq_cmd->general_params.single_stream_ant_msk =
2462 tbl_type.ant_type;
2463 } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
2464 lq_cmd->general_params.dual_stream_ant_msk =
2465 tbl_type.ant_type;
2466 } /* otherwise we don't modify the existing value */
2467
2468 index++;
2469 repeat_rate--;
2470 if (priv)
2471 valid_tx_ant = priv->hw_params.valid_tx_ant;
2472
2473 /* Fill rest of rate table */
2474 while (index < LINK_QUAL_MAX_RETRY_NUM) {
2475 /* Repeat initial/next rate.
2476 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
2477 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
2478 while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
2479 if (is_legacy(tbl_type.lq_type)) {
2480 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2481 ant_toggle_cnt++;
2482 else if (priv &&
2483 iwl4965_rs_toggle_antenna(valid_tx_ant,
2484 &new_rate, &tbl_type))
2485 ant_toggle_cnt = 1;
2486 }
2487
2488 /* Override next rate if needed for debug purposes */
2489 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2490
2491 /* Fill next table entry */
2492 lq_cmd->rs_table[index].rate_n_flags =
2493 cpu_to_le32(new_rate);
2494 repeat_rate--;
2495 index++;
2496 }
2497
2498 iwl4965_rs_get_tbl_info_from_mcs(new_rate,
2499 lq_sta->band, &tbl_type,
2500 &rate_idx);
2501
2502 /* Indicate to uCode which entries might be MIMO.
2503 * If initial rate was MIMO, this will finally end up
2504 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
2505 if (is_mimo(tbl_type.lq_type))
2506 lq_cmd->general_params.mimo_delimiter = index;
2507
2508 /* Get next rate */
2509 new_rate = iwl4965_rs_get_lower_rate(lq_sta,
2510 &tbl_type, rate_idx,
2511 use_ht_possible);
2512
2513 /* How many times should we repeat the next rate? */
2514 if (is_legacy(tbl_type.lq_type)) {
2515 if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
2516 ant_toggle_cnt++;
2517 else if (priv &&
2518 iwl4965_rs_toggle_antenna(valid_tx_ant,
2519 &new_rate, &tbl_type))
2520 ant_toggle_cnt = 1;
2521
2522 repeat_rate = IWL_NUMBER_TRY;
2523 } else {
2524 repeat_rate = IWL_HT_NUMBER_TRY;
2525 }
2526
2527 /* Don't allow HT rates after next pass.
2528 * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
2529 use_ht_possible = 0;
2530
2531 /* Override next rate if needed for debug purposes */
2532 iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
2533
2534 /* Fill next table entry */
2535 lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
2536
2537 index++;
2538 repeat_rate--;
2539 }
2540
2541 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2542 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2543
2544 lq_cmd->agg_params.agg_time_limit =
2545 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
2546}
2547
2548static void
2549*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2550{
2551 return hw->priv;
2552}
2553/* rate scale requires free function to be implemented */
2554static void iwl4965_rs_free(void *priv_rate)
2555{
2556 return;
2557}
2558
2559static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2560 void *priv_sta)
2561{
2562 struct iwl_priv *priv __maybe_unused = priv_r;
2563
2564 IWL_DEBUG_RATE(priv, "enter\n");
2565 IWL_DEBUG_RATE(priv, "leave\n");
2566}
2567
2568
2569#ifdef CONFIG_MAC80211_DEBUGFS
2570static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
2571{
2572 file->private_data = inode->i_private;
2573 return 0;
2574}
2575static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
2576 u32 *rate_n_flags, int index)
2577{
2578 struct iwl_priv *priv;
2579 u8 valid_tx_ant;
2580 u8 ant_sel_tx;
2581
2582 priv = lq_sta->drv;
2583 valid_tx_ant = priv->hw_params.valid_tx_ant;
2584 if (lq_sta->dbg_fixed_rate) {
2585 ant_sel_tx =
2586 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
2587 >> RATE_MCS_ANT_POS);
2588 if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
2589 *rate_n_flags = lq_sta->dbg_fixed_rate;
2590 IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
2591 } else {
2592 lq_sta->dbg_fixed_rate = 0;
2593 IWL_ERR(priv,
2594 "Invalid antenna selection 0x%X, Valid is 0x%X\n",
2595 ant_sel_tx, valid_tx_ant);
2596 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2597 }
2598 } else {
2599 IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
2600 }
2601}
2602
2603static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
2604 const char __user *user_buf, size_t count, loff_t *ppos)
2605{
2606 struct iwl_lq_sta *lq_sta = file->private_data;
2607 struct iwl_priv *priv;
2608 char buf[64];
2609 size_t buf_size;
2610 u32 parsed_rate;
2611 struct iwl_station_priv *sta_priv =
2612 container_of(lq_sta, struct iwl_station_priv, lq_sta);
2613 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
2614
2615 priv = lq_sta->drv;
2616 memset(buf, 0, sizeof(buf));
2617 buf_size = min(count, sizeof(buf) - 1);
2618 if (copy_from_user(buf, user_buf, buf_size))
2619 return -EFAULT;
2620
2621 if (sscanf(buf, "%x", &parsed_rate) == 1)
2622 lq_sta->dbg_fixed_rate = parsed_rate;
2623 else
2624 lq_sta->dbg_fixed_rate = 0;
2625
2626 lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
2627 lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2628 lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
2629
2630 IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
2631 lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
2632
2633 if (lq_sta->dbg_fixed_rate) {
2634 iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2635 iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
2636 false);
2637 }
2638
2639 return count;
2640}
2641
2642static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
2643 char __user *user_buf, size_t count, loff_t *ppos)
2644{
2645 char *buff;
2646 int desc = 0;
2647 int i = 0;
2648 int index = 0;
2649 ssize_t ret;
2650
2651 struct iwl_lq_sta *lq_sta = file->private_data;
2652 struct iwl_priv *priv;
2653 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
2654
2655 priv = lq_sta->drv;
2656 buff = kmalloc(1024, GFP_KERNEL);
2657 if (!buff)
2658 return -ENOMEM;
2659
2660 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2661 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
2662 lq_sta->total_failed, lq_sta->total_success,
2663 lq_sta->active_legacy_rate);
2664 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
2665 lq_sta->dbg_fixed_rate);
2666 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
2667 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
2668 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
2669 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
2670 desc += sprintf(buff+desc, "lq type %s\n",
2671 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
2672 if (is_Ht(tbl->lq_type)) {
2673 desc += sprintf(buff+desc, " %s",
2674 (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
2675 desc += sprintf(buff+desc, " %s",
2676 (tbl->is_ht40) ? "40MHz" : "20MHz");
2677 desc += sprintf(buff+desc, " %s %s %s\n",
2678 (tbl->is_SGI) ? "SGI" : "",
2679 (lq_sta->is_green) ? "GF enabled" : "",
2680 (lq_sta->is_agg) ? "AGG on" : "");
2681 }
2682 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2683 lq_sta->last_rate_n_flags);
2684 desc += sprintf(buff+desc, "general:"
2685 "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
2686 lq_sta->lq.general_params.flags,
2687 lq_sta->lq.general_params.mimo_delimiter,
2688 lq_sta->lq.general_params.single_stream_ant_msk,
2689 lq_sta->lq.general_params.dual_stream_ant_msk);
2690
2691 desc += sprintf(buff+desc, "agg:"
2692 "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
2693 le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
2694 lq_sta->lq.agg_params.agg_dis_start_th,
2695 lq_sta->lq.agg_params.agg_frame_cnt_limit);
2696
2697 desc += sprintf(buff+desc,
2698 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
2699 lq_sta->lq.general_params.start_rate_index[0],
2700 lq_sta->lq.general_params.start_rate_index[1],
2701 lq_sta->lq.general_params.start_rate_index[2],
2702 lq_sta->lq.general_params.start_rate_index[3]);
2703
2704 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
2705 index = iwl4965_hwrate_to_plcp_idx(
2706 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
2707 if (is_legacy(tbl->lq_type)) {
2708 desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
2709 i,
2710 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2711 iwl_rate_mcs[index].mbps);
2712 } else {
2713 desc += sprintf(buff+desc,
2714 " rate[%d] 0x%X %smbps (%s)\n",
2715 i,
2716 le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
2717 iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
2718 }
2719 }
2720
2721 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2722 kfree(buff);
2723 return ret;
2724}
2725
2726static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
2727 .write = iwl4965_rs_sta_dbgfs_scale_table_write,
2728 .read = iwl4965_rs_sta_dbgfs_scale_table_read,
2729 .open = iwl4965_open_file_generic,
2730 .llseek = default_llseek,
2731};
2732static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
2733 char __user *user_buf, size_t count, loff_t *ppos)
2734{
2735 char *buff;
2736 int desc = 0;
2737 int i, j;
2738 ssize_t ret;
2739
2740 struct iwl_lq_sta *lq_sta = file->private_data;
2741
2742 buff = kmalloc(1024, GFP_KERNEL);
2743 if (!buff)
2744 return -ENOMEM;
2745
2746 for (i = 0; i < LQ_SIZE; i++) {
2747 desc += sprintf(buff+desc,
2748 "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
2749 "rate=0x%X\n",
2750 lq_sta->active_tbl == i ? "*" : "x",
2751 lq_sta->lq_info[i].lq_type,
2752 lq_sta->lq_info[i].is_SGI,
2753 lq_sta->lq_info[i].is_ht40,
2754 lq_sta->lq_info[i].is_dup,
2755 lq_sta->is_green,
2756 lq_sta->lq_info[i].current_rate);
2757 for (j = 0; j < IWL_RATE_COUNT; j++) {
2758 desc += sprintf(buff+desc,
2759 "counter=%d success=%d %%=%d\n",
2760 lq_sta->lq_info[i].win[j].counter,
2761 lq_sta->lq_info[i].win[j].success_counter,
2762 lq_sta->lq_info[i].win[j].success_ratio);
2763 }
2764 }
2765 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2766 kfree(buff);
2767 return ret;
2768}
2769
2770static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
2771 .read = iwl4965_rs_sta_dbgfs_stats_table_read,
2772 .open = iwl4965_open_file_generic,
2773 .llseek = default_llseek,
2774};
2775
2776static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2777 char __user *user_buf, size_t count, loff_t *ppos)
2778{
2779 char buff[120];
2780 int desc = 0;
2781 ssize_t ret;
2782
2783 struct iwl_lq_sta *lq_sta = file->private_data;
2784 struct iwl_priv *priv;
2785 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
2786
2787 priv = lq_sta->drv;
2788
2789 if (is_Ht(tbl->lq_type))
2790 desc += sprintf(buff+desc,
2791 "Bit Rate= %d Mb/s\n",
2792 tbl->expected_tpt[lq_sta->last_txrate_idx]);
2793 else
2794 desc += sprintf(buff+desc,
2795 "Bit Rate= %d Mb/s\n",
2796 iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
2797
2798 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2799 return ret;
2800}
2801
2802static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
2803 .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
2804 .open = iwl4965_open_file_generic,
2805 .llseek = default_llseek,
2806};
2807
2808static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
2809 struct dentry *dir)
2810{
2811 struct iwl_lq_sta *lq_sta = priv_sta;
2812 lq_sta->rs_sta_dbgfs_scale_table_file =
2813 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
2814 lq_sta, &rs_sta_dbgfs_scale_table_ops);
2815 lq_sta->rs_sta_dbgfs_stats_table_file =
2816 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
2817 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2818 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
2819 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
2820 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
2821 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2822 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
2823 &lq_sta->tx_agg_tid_en);
2824
2825}
2826
2827static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
2828{
2829 struct iwl_lq_sta *lq_sta = priv_sta;
2830 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2831 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2832 debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
2833 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2834}
2835#endif
2836
2837/*
2838 * Initialization of rate scaling information is done by driver after
2839 * the station is added. Since mac80211 calls this function before a
2840 * station is added we ignore it.
2841 */
2842static void
2843iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2844 struct ieee80211_sta *sta, void *priv_sta)
2845{
2846}
2847static struct rate_control_ops rs_4965_ops = {
2848 .module = NULL,
2849 .name = IWL4965_RS_NAME,
2850 .tx_status = iwl4965_rs_tx_status,
2851 .get_rate = iwl4965_rs_get_rate,
2852 .rate_init = iwl4965_rs_rate_init_stub,
2853 .alloc = iwl4965_rs_alloc,
2854 .free = iwl4965_rs_free,
2855 .alloc_sta = iwl4965_rs_alloc_sta,
2856 .free_sta = iwl4965_rs_free_sta,
2857#ifdef CONFIG_MAC80211_DEBUGFS
2858 .add_sta_debugfs = iwl4965_rs_add_debugfs,
2859 .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
2860#endif
2861};
2862
2863int iwl4965_rate_control_register(void)
2864{
2865 return ieee80211_rate_control_register(&rs_4965_ops);
2866}
2867
2868void iwl4965_rate_control_unregister(void)
2869{
2870 ieee80211_rate_control_unregister(&rs_4965_ops);
2871}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bbfc3c5..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-4965-calib.h"
38#include "iwl-sta.h"
39#include "iwl-io.h"
40#include "iwl-helpers.h"
41#include "iwl-4965-hw.h"
42#include "iwl-4965.h"
43
44void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
45 struct iwl_rx_mem_buffer *rxb)
46
47{
48 struct iwl_rx_packet *pkt = rxb_addr(rxb);
49 struct iwl_missed_beacon_notif *missed_beacon;
50
51 missed_beacon = &pkt->u.missed_beacon;
52 if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
53 priv->missed_beacon_threshold) {
54 IWL_DEBUG_CALIB(priv,
55 "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
56 le32_to_cpu(missed_beacon->consecutive_missed_beacons),
57 le32_to_cpu(missed_beacon->total_missed_becons),
58 le32_to_cpu(missed_beacon->num_recvd_beacons),
59 le32_to_cpu(missed_beacon->num_expected_beacons));
60 if (!test_bit(STATUS_SCANNING, &priv->status))
61 iwl4965_init_sensitivity(priv);
62 }
63}
64
65/* Calculate noise level, based on measurements during network silence just
66 * before arriving beacon. This measurement can be done only if we know
67 * exactly when to expect beacons, therefore only when we're associated. */
68static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
69{
70 struct statistics_rx_non_phy *rx_info;
71 int num_active_rx = 0;
72 int total_silence = 0;
73 int bcn_silence_a, bcn_silence_b, bcn_silence_c;
74 int last_rx_noise;
75
76 rx_info = &(priv->_4965.statistics.rx.general);
77 bcn_silence_a =
78 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
79 bcn_silence_b =
80 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
81 bcn_silence_c =
82 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
83
84 if (bcn_silence_a) {
85 total_silence += bcn_silence_a;
86 num_active_rx++;
87 }
88 if (bcn_silence_b) {
89 total_silence += bcn_silence_b;
90 num_active_rx++;
91 }
92 if (bcn_silence_c) {
93 total_silence += bcn_silence_c;
94 num_active_rx++;
95 }
96
97 /* Average among active antennas */
98 if (num_active_rx)
99 last_rx_noise = (total_silence / num_active_rx) - 107;
100 else
101 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
102
103 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
104 bcn_silence_a, bcn_silence_b, bcn_silence_c,
105 last_rx_noise);
106}
107
108#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
109/*
110 * based on the assumption of all statistics counter are in DWORD
111 * FIXME: This function is for debugging, do not deal with
112 * the case of counters roll-over.
113 */
114static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
115 __le32 *stats)
116{
117 int i, size;
118 __le32 *prev_stats;
119 u32 *accum_stats;
120 u32 *delta, *max_delta;
121 struct statistics_general_common *general, *accum_general;
122 struct statistics_tx *tx, *accum_tx;
123
124 prev_stats = (__le32 *)&priv->_4965.statistics;
125 accum_stats = (u32 *)&priv->_4965.accum_statistics;
126 size = sizeof(struct iwl_notif_statistics);
127 general = &priv->_4965.statistics.general.common;
128 accum_general = &priv->_4965.accum_statistics.general.common;
129 tx = &priv->_4965.statistics.tx;
130 accum_tx = &priv->_4965.accum_statistics.tx;
131 delta = (u32 *)&priv->_4965.delta_statistics;
132 max_delta = (u32 *)&priv->_4965.max_delta;
133
134 for (i = sizeof(__le32); i < size;
135 i += sizeof(__le32), stats++, prev_stats++, delta++,
136 max_delta++, accum_stats++) {
137 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
138 *delta = (le32_to_cpu(*stats) -
139 le32_to_cpu(*prev_stats));
140 *accum_stats += *delta;
141 if (*delta > *max_delta)
142 *max_delta = *delta;
143 }
144 }
145
146 /* reset accumulative statistics for "no-counter" type statistics */
147 accum_general->temperature = general->temperature;
148 accum_general->ttl_timestamp = general->ttl_timestamp;
149}
150#endif
151
152#define REG_RECALIB_PERIOD (60)
153
154void iwl4965_rx_statistics(struct iwl_priv *priv,
155 struct iwl_rx_mem_buffer *rxb)
156{
157 int change;
158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
159
160 IWL_DEBUG_RX(priv,
161 "Statistics notification received (%d vs %d).\n",
162 (int)sizeof(struct iwl_notif_statistics),
163 le32_to_cpu(pkt->len_n_flags) &
164 FH_RSCSR_FRAME_SIZE_MSK);
165
166 change = ((priv->_4965.statistics.general.common.temperature !=
167 pkt->u.stats.general.common.temperature) ||
168 ((priv->_4965.statistics.flag &
169 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
170 (pkt->u.stats.flag &
171 STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
172#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
173 iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
174#endif
175
176 /* TODO: reading some of statistics is unneeded */
177 memcpy(&priv->_4965.statistics, &pkt->u.stats,
178 sizeof(priv->_4965.statistics));
179
180 set_bit(STATUS_STATISTICS, &priv->status);
181
182 /* Reschedule the statistics timer to occur in
183 * REG_RECALIB_PERIOD seconds to ensure we get a
184 * thermal update even if the uCode doesn't give
185 * us one */
186 mod_timer(&priv->statistics_periodic, jiffies +
187 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
188
189 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
190 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
191 iwl4965_rx_calc_noise(priv);
192 queue_work(priv->workqueue, &priv->run_time_calib_work);
193 }
194 if (priv->cfg->ops->lib->temp_ops.temperature && change)
195 priv->cfg->ops->lib->temp_ops.temperature(priv);
196}
197
198void iwl4965_reply_statistics(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202
203 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
204#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
205 memset(&priv->_4965.accum_statistics, 0,
206 sizeof(struct iwl_notif_statistics));
207 memset(&priv->_4965.delta_statistics, 0,
208 sizeof(struct iwl_notif_statistics));
209 memset(&priv->_4965.max_delta, 0,
210 sizeof(struct iwl_notif_statistics));
211#endif
212 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
213 }
214 iwl4965_rx_statistics(priv, rxb);
215}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23553d2..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-sta.h"
35#include "iwl-4965.h"
36
37static struct iwl_link_quality_cmd *
38iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
39{
40 int i, r;
41 struct iwl_link_quality_cmd *link_cmd;
42 u32 rate_flags = 0;
43 __le32 rate_n_flags;
44
45 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
46 if (!link_cmd) {
47 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
48 return NULL;
49 }
50 /* Set up the rate scaling to start at selected rate, fall back
51 * all the way down to 1M in IEEE order, and then spin on 1M */
52 if (priv->band == IEEE80211_BAND_5GHZ)
53 r = IWL_RATE_6M_INDEX;
54 else
55 r = IWL_RATE_1M_INDEX;
56
57 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
58 rate_flags |= RATE_MCS_CCK_MSK;
59
60 rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
61 RATE_MCS_ANT_POS;
62 rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
63 rate_flags);
64 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
65 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
66
67 link_cmd->general_params.single_stream_ant_msk =
68 iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
69
70 link_cmd->general_params.dual_stream_ant_msk =
71 priv->hw_params.valid_tx_ant &
72 ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
73 if (!link_cmd->general_params.dual_stream_ant_msk) {
74 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
75 } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
76 link_cmd->general_params.dual_stream_ant_msk =
77 priv->hw_params.valid_tx_ant;
78 }
79
80 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
81 link_cmd->agg_params.agg_time_limit =
82 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
83
84 link_cmd->sta_id = sta_id;
85
86 return link_cmd;
87}
88
89/*
90 * iwl4965_add_bssid_station - Add the special IBSS BSSID station
91 *
92 * Function sleeps.
93 */
94int
95iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
96 const u8 *addr, u8 *sta_id_r)
97{
98 int ret;
99 u8 sta_id;
100 struct iwl_link_quality_cmd *link_cmd;
101 unsigned long flags;
102
103 if (sta_id_r)
104 *sta_id_r = IWL_INVALID_STATION;
105
106 ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
107 if (ret) {
108 IWL_ERR(priv, "Unable to add station %pM\n", addr);
109 return ret;
110 }
111
112 if (sta_id_r)
113 *sta_id_r = sta_id;
114
115 spin_lock_irqsave(&priv->sta_lock, flags);
116 priv->stations[sta_id].used |= IWL_STA_LOCAL;
117 spin_unlock_irqrestore(&priv->sta_lock, flags);
118
119 /* Set up default rate scaling table in device's station table */
120 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
121 if (!link_cmd) {
122 IWL_ERR(priv,
123 "Unable to initialize rate scaling for station %pM.\n",
124 addr);
125 return -ENOMEM;
126 }
127
128 ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
129 if (ret)
130 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
131
132 spin_lock_irqsave(&priv->sta_lock, flags);
133 priv->stations[sta_id].lq = link_cmd;
134 spin_unlock_irqrestore(&priv->sta_lock, flags);
135
136 return 0;
137}
138
139static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
140 struct iwl_rxon_context *ctx,
141 bool send_if_empty)
142{
143 int i, not_empty = 0;
144 u8 buff[sizeof(struct iwl_wep_cmd) +
145 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
146 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
147 size_t cmd_size = sizeof(struct iwl_wep_cmd);
148 struct iwl_host_cmd cmd = {
149 .id = ctx->wep_key_cmd,
150 .data = wep_cmd,
151 .flags = CMD_SYNC,
152 };
153
154 might_sleep();
155
156 memset(wep_cmd, 0, cmd_size +
157 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
158
159 for (i = 0; i < WEP_KEYS_MAX ; i++) {
160 wep_cmd->key[i].key_index = i;
161 if (ctx->wep_keys[i].key_size) {
162 wep_cmd->key[i].key_offset = i;
163 not_empty = 1;
164 } else {
165 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
166 }
167
168 wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
169 memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
170 ctx->wep_keys[i].key_size);
171 }
172
173 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
174 wep_cmd->num_keys = WEP_KEYS_MAX;
175
176 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
177
178 cmd.len = cmd_size;
179
180 if (not_empty || send_if_empty)
181 return iwl_legacy_send_cmd(priv, &cmd);
182 else
183 return 0;
184}
185
186int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
187 struct iwl_rxon_context *ctx)
188{
189 lockdep_assert_held(&priv->mutex);
190
191 return iwl4965_static_wepkey_cmd(priv, ctx, false);
192}
193
194int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_key_conf *keyconf)
197{
198 int ret;
199
200 lockdep_assert_held(&priv->mutex);
201
202 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
203 keyconf->keyidx);
204
205 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
206 if (iwl_legacy_is_rfkill(priv)) {
207 IWL_DEBUG_WEP(priv,
208 "Not sending REPLY_WEPKEY command due to RFKILL.\n");
209 /* but keys in device are clear anyway so return success */
210 return 0;
211 }
212 ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
213 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
214 keyconf->keyidx, ret);
215
216 return ret;
217}
218
219int iwl4965_set_default_wep_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf)
222{
223 int ret;
224
225 lockdep_assert_held(&priv->mutex);
226
227 if (keyconf->keylen != WEP_KEY_LEN_128 &&
228 keyconf->keylen != WEP_KEY_LEN_64) {
229 IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
230 return -EINVAL;
231 }
232
233 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
234 keyconf->hw_key_idx = HW_KEY_DEFAULT;
235 priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
236
237 ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
238 memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
239 keyconf->keylen);
240
241 ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
242 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
243 keyconf->keylen, keyconf->keyidx, ret);
244
245 return ret;
246}
247
248static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
249 struct iwl_rxon_context *ctx,
250 struct ieee80211_key_conf *keyconf,
251 u8 sta_id)
252{
253 unsigned long flags;
254 __le16 key_flags = 0;
255 struct iwl_legacy_addsta_cmd sta_cmd;
256
257 lockdep_assert_held(&priv->mutex);
258
259 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
260
261 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
262 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
263 key_flags &= ~STA_KEY_FLG_INVALID;
264
265 if (keyconf->keylen == WEP_KEY_LEN_128)
266 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
267
268 if (sta_id == ctx->bcast_sta_id)
269 key_flags |= STA_KEY_MULTICAST_MSK;
270
271 spin_lock_irqsave(&priv->sta_lock, flags);
272
273 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
274 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
275 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
276
277 memcpy(priv->stations[sta_id].keyinfo.key,
278 keyconf->key, keyconf->keylen);
279
280 memcpy(&priv->stations[sta_id].sta.key.key[3],
281 keyconf->key, keyconf->keylen);
282
283 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
284 == STA_KEY_FLG_NO_ENC)
285 priv->stations[sta_id].sta.key.key_offset =
286 iwl_legacy_get_free_ucode_key_index(priv);
287 /* else, we are overriding an existing key => no need to allocated room
288 * in uCode. */
289
290 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
291 "no space for a new key");
292
293 priv->stations[sta_id].sta.key.key_flags = key_flags;
294 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
295 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
296
297 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
298 sizeof(struct iwl_legacy_addsta_cmd));
299 spin_unlock_irqrestore(&priv->sta_lock, flags);
300
301 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
302}
303
304static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_key_conf *keyconf,
307 u8 sta_id)
308{
309 unsigned long flags;
310 __le16 key_flags = 0;
311 struct iwl_legacy_addsta_cmd sta_cmd;
312
313 lockdep_assert_held(&priv->mutex);
314
315 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
316 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
317 key_flags &= ~STA_KEY_FLG_INVALID;
318
319 if (sta_id == ctx->bcast_sta_id)
320 key_flags |= STA_KEY_MULTICAST_MSK;
321
322 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
323
324 spin_lock_irqsave(&priv->sta_lock, flags);
325 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
326 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
327
328 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
329 keyconf->keylen);
330
331 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
332 keyconf->keylen);
333
334 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
335 == STA_KEY_FLG_NO_ENC)
336 priv->stations[sta_id].sta.key.key_offset =
337 iwl_legacy_get_free_ucode_key_index(priv);
338 /* else, we are overriding an existing key => no need to allocated room
339 * in uCode. */
340
341 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
342 "no space for a new key");
343
344 priv->stations[sta_id].sta.key.key_flags = key_flags;
345 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
346 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
347
348 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
349 sizeof(struct iwl_legacy_addsta_cmd));
350 spin_unlock_irqrestore(&priv->sta_lock, flags);
351
352 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
353}
354
355static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
356 struct iwl_rxon_context *ctx,
357 struct ieee80211_key_conf *keyconf,
358 u8 sta_id)
359{
360 unsigned long flags;
361 int ret = 0;
362 __le16 key_flags = 0;
363
364 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
365 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
366 key_flags &= ~STA_KEY_FLG_INVALID;
367
368 if (sta_id == ctx->bcast_sta_id)
369 key_flags |= STA_KEY_MULTICAST_MSK;
370
371 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
372 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
373
374 spin_lock_irqsave(&priv->sta_lock, flags);
375
376 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
377 priv->stations[sta_id].keyinfo.keylen = 16;
378
379 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
380 == STA_KEY_FLG_NO_ENC)
381 priv->stations[sta_id].sta.key.key_offset =
382 iwl_legacy_get_free_ucode_key_index(priv);
383 /* else, we are overriding an existing key => no need to allocated room
384 * in uCode. */
385
386 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
387 "no space for a new key");
388
389 priv->stations[sta_id].sta.key.key_flags = key_flags;
390
391
392 /* This copy is acutally not needed: we get the key with each TX */
393 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
394
395 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
396
397 spin_unlock_irqrestore(&priv->sta_lock, flags);
398
399 return ret;
400}
401
402void iwl4965_update_tkip_key(struct iwl_priv *priv,
403 struct iwl_rxon_context *ctx,
404 struct ieee80211_key_conf *keyconf,
405 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
406{
407 u8 sta_id;
408 unsigned long flags;
409 int i;
410
411 if (iwl_legacy_scan_cancel(priv)) {
412 /* cancel scan failed, just live w/ bad key and rely
413 briefly on SW decryption */
414 return;
415 }
416
417 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
418 if (sta_id == IWL_INVALID_STATION)
419 return;
420
421 spin_lock_irqsave(&priv->sta_lock, flags);
422
423 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
424
425 for (i = 0; i < 5; i++)
426 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
427 cpu_to_le16(phase1key[i]);
428
429 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
430 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
431
432 iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
433
434 spin_unlock_irqrestore(&priv->sta_lock, flags);
435
436}
437
438int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
439 struct iwl_rxon_context *ctx,
440 struct ieee80211_key_conf *keyconf,
441 u8 sta_id)
442{
443 unsigned long flags;
444 u16 key_flags;
445 u8 keyidx;
446 struct iwl_legacy_addsta_cmd sta_cmd;
447
448 lockdep_assert_held(&priv->mutex);
449
450 ctx->key_mapping_keys--;
451
452 spin_lock_irqsave(&priv->sta_lock, flags);
453 key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
454 keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
455
456 IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
457 keyconf->keyidx, sta_id);
458
459 if (keyconf->keyidx != keyidx) {
460 /* We need to remove a key with index different that the one
461 * in the uCode. This means that the key we need to remove has
462 * been replaced by another one with different index.
463 * Don't do anything and return ok
464 */
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 return 0;
467 }
468
469 if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
470 IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
471 keyconf->keyidx, key_flags);
472 spin_unlock_irqrestore(&priv->sta_lock, flags);
473 return 0;
474 }
475
476 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
477 &priv->ucode_key_table))
478 IWL_ERR(priv, "index %d not used in uCode key table.\n",
479 priv->stations[sta_id].sta.key.key_offset);
480 memset(&priv->stations[sta_id].keyinfo, 0,
481 sizeof(struct iwl_hw_key));
482 memset(&priv->stations[sta_id].sta.key, 0,
483 sizeof(struct iwl4965_keyinfo));
484 priv->stations[sta_id].sta.key.key_flags =
485 STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
486 priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
487 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
488 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
489
490 if (iwl_legacy_is_rfkill(priv)) {
491 IWL_DEBUG_WEP(priv,
492 "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
493 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return 0;
495 }
496 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
497 sizeof(struct iwl_legacy_addsta_cmd));
498 spin_unlock_irqrestore(&priv->sta_lock, flags);
499
500 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
501}
502
503int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
504 struct ieee80211_key_conf *keyconf, u8 sta_id)
505{
506 int ret;
507
508 lockdep_assert_held(&priv->mutex);
509
510 ctx->key_mapping_keys++;
511 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
512
513 switch (keyconf->cipher) {
514 case WLAN_CIPHER_SUITE_CCMP:
515 ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
516 keyconf, sta_id);
517 break;
518 case WLAN_CIPHER_SUITE_TKIP:
519 ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
520 keyconf, sta_id);
521 break;
522 case WLAN_CIPHER_SUITE_WEP40:
523 case WLAN_CIPHER_SUITE_WEP104:
524 ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
525 keyconf, sta_id);
526 break;
527 default:
528 IWL_ERR(priv,
529 "Unknown alg: %s cipher = %x\n", __func__,
530 keyconf->cipher);
531 ret = -EINVAL;
532 }
533
534 IWL_DEBUG_WEP(priv,
535 "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
536 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
537 sta_id, ret);
538
539 return ret;
540}
541
542/**
543 * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
544 *
545 * This adds the broadcast station into the driver's station table
546 * and marks it driver active, so that it will be restored to the
547 * device at the next best time.
548 */
549int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
550 struct iwl_rxon_context *ctx)
551{
552 struct iwl_link_quality_cmd *link_cmd;
553 unsigned long flags;
554 u8 sta_id;
555
556 spin_lock_irqsave(&priv->sta_lock, flags);
557 sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
558 false, NULL);
559 if (sta_id == IWL_INVALID_STATION) {
560 IWL_ERR(priv, "Unable to prepare broadcast station\n");
561 spin_unlock_irqrestore(&priv->sta_lock, flags);
562
563 return -EINVAL;
564 }
565
566 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
567 priv->stations[sta_id].used |= IWL_STA_BCAST;
568 spin_unlock_irqrestore(&priv->sta_lock, flags);
569
570 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
571 if (!link_cmd) {
572 IWL_ERR(priv,
573 "Unable to initialize rate scaling for bcast station.\n");
574 return -ENOMEM;
575 }
576
577 spin_lock_irqsave(&priv->sta_lock, flags);
578 priv->stations[sta_id].lq = link_cmd;
579 spin_unlock_irqrestore(&priv->sta_lock, flags);
580
581 return 0;
582}
583
584/**
585 * iwl4965_update_bcast_station - update broadcast station's LQ command
586 *
587 * Only used by iwl4965. Placed here to have all bcast station management
588 * code together.
589 */
590static int iwl4965_update_bcast_station(struct iwl_priv *priv,
591 struct iwl_rxon_context *ctx)
592{
593 unsigned long flags;
594 struct iwl_link_quality_cmd *link_cmd;
595 u8 sta_id = ctx->bcast_sta_id;
596
597 link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
598 if (!link_cmd) {
599 IWL_ERR(priv,
600 "Unable to initialize rate scaling for bcast station.\n");
601 return -ENOMEM;
602 }
603
604 spin_lock_irqsave(&priv->sta_lock, flags);
605 if (priv->stations[sta_id].lq)
606 kfree(priv->stations[sta_id].lq);
607 else
608 IWL_DEBUG_INFO(priv,
609 "Bcast station rate scaling has not been initialized yet.\n");
610 priv->stations[sta_id].lq = link_cmd;
611 spin_unlock_irqrestore(&priv->sta_lock, flags);
612
613 return 0;
614}
615
616int iwl4965_update_bcast_stations(struct iwl_priv *priv)
617{
618 struct iwl_rxon_context *ctx;
619 int ret = 0;
620
621 for_each_context(priv, ctx) {
622 ret = iwl4965_update_bcast_station(priv, ctx);
623 if (ret)
624 break;
625 }
626
627 return ret;
628}
629
630/**
631 * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
632 */
633int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
634{
635 unsigned long flags;
636 struct iwl_legacy_addsta_cmd sta_cmd;
637
638 lockdep_assert_held(&priv->mutex);
639
640 /* Remove "disable" flag, to enable Tx for this TID */
641 spin_lock_irqsave(&priv->sta_lock, flags);
642 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
643 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
644 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
646 sizeof(struct iwl_legacy_addsta_cmd));
647 spin_unlock_irqrestore(&priv->sta_lock, flags);
648
649 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
650}
651
652int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
653 int tid, u16 ssn)
654{
655 unsigned long flags;
656 int sta_id;
657 struct iwl_legacy_addsta_cmd sta_cmd;
658
659 lockdep_assert_held(&priv->mutex);
660
661 sta_id = iwl_legacy_sta_id(sta);
662 if (sta_id == IWL_INVALID_STATION)
663 return -ENXIO;
664
665 spin_lock_irqsave(&priv->sta_lock, flags);
666 priv->stations[sta_id].sta.station_flags_msk = 0;
667 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
668 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
669 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
670 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
671 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
672 sizeof(struct iwl_legacy_addsta_cmd));
673 spin_unlock_irqrestore(&priv->sta_lock, flags);
674
675 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
676}
677
678int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
679 int tid)
680{
681 unsigned long flags;
682 int sta_id;
683 struct iwl_legacy_addsta_cmd sta_cmd;
684
685 lockdep_assert_held(&priv->mutex);
686
687 sta_id = iwl_legacy_sta_id(sta);
688 if (sta_id == IWL_INVALID_STATION) {
689 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
690 return -ENXIO;
691 }
692
693 spin_lock_irqsave(&priv->sta_lock, flags);
694 priv->stations[sta_id].sta.station_flags_msk = 0;
695 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
696 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
697 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
698 memcpy(&sta_cmd, &priv->stations[sta_id].sta,
699 sizeof(struct iwl_legacy_addsta_cmd));
700 spin_unlock_irqrestore(&priv->sta_lock, flags);
701
702 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
703}
704
705void
706iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
707{
708 unsigned long flags;
709
710 spin_lock_irqsave(&priv->sta_lock, flags);
711 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
712 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
713 priv->stations[sta_id].sta.sta.modify_mask =
714 STA_MODIFY_SLEEP_TX_COUNT_MSK;
715 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
716 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
717 iwl_legacy_send_add_sta(priv,
718 &priv->stations[sta_id].sta, CMD_ASYNC);
719 spin_unlock_irqrestore(&priv->sta_lock, flags);
720
721}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e3638bae..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-4965-hw.h"
41#include "iwl-4965.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80static inline int iwl4965_get_ac_from_tid(u16 tid)
81{
82 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
83 return tid_to_ac[tid];
84
85 /* no support for TIDs 8-15 yet */
86 return -EINVAL;
87}
88
89static inline int
90iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99/*
100 * handle build REPLY_TX command notification.
101 */
102static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
103 struct sk_buff *skb,
104 struct iwl_tx_cmd *tx_cmd,
105 struct ieee80211_tx_info *info,
106 struct ieee80211_hdr *hdr,
107 u8 std_id)
108{
109 __le16 fc = hdr->frame_control;
110 __le32 tx_flags = tx_cmd->tx_flags;
111
112 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
113 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
114 tx_flags |= TX_CMD_FLG_ACK_MSK;
115 if (ieee80211_is_mgmt(fc))
116 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
117 if (ieee80211_is_probe_resp(fc) &&
118 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
119 tx_flags |= TX_CMD_FLG_TSF_MSK;
120 } else {
121 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
122 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
123 }
124
125 if (ieee80211_is_back_req(fc))
126 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
127
128 tx_cmd->sta_id = std_id;
129 if (ieee80211_has_morefrags(fc))
130 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
131
132 if (ieee80211_is_data_qos(fc)) {
133 u8 *qc = ieee80211_get_qos_ctl(hdr);
134 tx_cmd->tid_tspec = qc[0] & 0xf;
135 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
136 } else {
137 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
138 }
139
140 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
141
142 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
143 if (ieee80211_is_mgmt(fc)) {
144 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
145 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
146 else
147 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
148 } else {
149 tx_cmd->timeout.pm_frame_timeout = 0;
150 }
151
152 tx_cmd->driver_txop = 0;
153 tx_cmd->tx_flags = tx_flags;
154 tx_cmd->next_frame_len = 0;
155}
156
157#define RTS_DFAULT_RETRY_LIMIT 60
158
159static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
160 struct iwl_tx_cmd *tx_cmd,
161 struct ieee80211_tx_info *info,
162 __le16 fc)
163{
164 u32 rate_flags;
165 int rate_idx;
166 u8 rts_retry_limit;
167 u8 data_retry_limit;
168 u8 rate_plcp;
169
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc))
172 data_retry_limit = 3;
173 else
174 data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
175 tx_cmd->data_retry_limit = data_retry_limit;
176
177 /* Set retry limit on RTS packets */
178 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
179 if (data_retry_limit < rts_retry_limit)
180 rts_retry_limit = data_retry_limit;
181 tx_cmd->rts_retry_limit = rts_retry_limit;
182
183 /* DATA packets will use the uCode station table for rate/antenna
184 * selection */
185 if (ieee80211_is_data(fc)) {
186 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
188 return;
189 }
190
191 /**
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
195 * index is invalid.
196 */
197 rate_idx = info->control.rates[0].idx;
198 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
199 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
200 rate_idx = rate_lowest_index(&priv->bands[info->band],
201 info->control.sta);
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info->band == IEEE80211_BAND_5GHZ)
204 rate_idx += IWL_FIRST_OFDM_RATE;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp = iwlegacy_rates[rate_idx].plcp;
207 /* Zero out flags for this packet */
208 rate_flags = 0;
209
210 /* Set CCK flag as needed */
211 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
212 rate_flags |= RATE_MCS_CCK_MSK;
213
214 /* Set up antennas */
215 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
216 priv->hw_params.valid_tx_ant);
217
218 rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
219
220 /* Set the rate in the TX cmd */
221 tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
222}
223
224static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
225 struct ieee80211_tx_info *info,
226 struct iwl_tx_cmd *tx_cmd,
227 struct sk_buff *skb_frag,
228 int sta_id)
229{
230 struct ieee80211_key_conf *keyconf = info->control.hw_key;
231
232 switch (keyconf->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
235 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
238 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
239 break;
240
241 case WLAN_CIPHER_SUITE_TKIP:
242 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
243 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
244 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
245 break;
246
247 case WLAN_CIPHER_SUITE_WEP104:
248 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
249 /* fall through */
250 case WLAN_CIPHER_SUITE_WEP40:
251 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
252 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
253
254 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
255
256 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
257 "with key %d\n", keyconf->keyidx);
258 break;
259
260 default:
261 IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
262 break;
263 }
264}
265
266/*
267 * start REPLY_TX command process
268 */
269int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
270{
271 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
272 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
273 struct ieee80211_sta *sta = info->control.sta;
274 struct iwl_station_priv *sta_priv = NULL;
275 struct iwl_tx_queue *txq;
276 struct iwl_queue *q;
277 struct iwl_device_cmd *out_cmd;
278 struct iwl_cmd_meta *out_meta;
279 struct iwl_tx_cmd *tx_cmd;
280 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
281 int txq_id;
282 dma_addr_t phys_addr;
283 dma_addr_t txcmd_phys;
284 dma_addr_t scratch_phys;
285 u16 len, firstlen, secondlen;
286 u16 seq_number = 0;
287 __le16 fc;
288 u8 hdr_len;
289 u8 sta_id;
290 u8 wait_write_ptr = 0;
291 u8 tid = 0;
292 u8 *qc = NULL;
293 unsigned long flags;
294 bool is_agg = false;
295
296 if (info->control.vif)
297 ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
298
299 spin_lock_irqsave(&priv->lock, flags);
300 if (iwl_legacy_is_rfkill(priv)) {
301 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
302 goto drop_unlock;
303 }
304
305 fc = hdr->frame_control;
306
307#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
308 if (ieee80211_is_auth(fc))
309 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
310 else if (ieee80211_is_assoc_req(fc))
311 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
312 else if (ieee80211_is_reassoc_req(fc))
313 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
314#endif
315
316 hdr_len = ieee80211_hdrlen(fc);
317
318 /* For management frames use broadcast id to do not break aggregation */
319 if (!ieee80211_is_data(fc))
320 sta_id = ctx->bcast_sta_id;
321 else {
322 /* Find index into station table for destination station */
323 sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
324
325 if (sta_id == IWL_INVALID_STATION) {
326 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
327 hdr->addr1);
328 goto drop_unlock;
329 }
330 }
331
332 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
333
334 if (sta)
335 sta_priv = (void *)sta->drv_priv;
336
337 if (sta_priv && sta_priv->asleep &&
338 (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
339 /*
340 * This sends an asynchronous command to the device,
341 * but we can rely on it being processed before the
342 * next frame is processed -- and the next frame to
343 * this station is the one that will consume this
344 * counter.
345 * For now set the counter to just 1 since we do not
346 * support uAPSD yet.
347 */
348 iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
349 }
350
351 /*
352 * Send this frame after DTIM -- there's a special queue
353 * reserved for this for contexts that support AP mode.
354 */
355 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
356 txq_id = ctx->mcast_queue;
357 /*
358 * The microcode will clear the more data
359 * bit in the last frame it transmits.
360 */
361 hdr->frame_control |=
362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
363 } else
364 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
365
366 /* irqs already disabled/saved above when locking priv->lock */
367 spin_lock(&priv->sta_lock);
368
369 if (ieee80211_is_data_qos(fc)) {
370 qc = ieee80211_get_qos_ctl(hdr);
371 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
372 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
373 spin_unlock(&priv->sta_lock);
374 goto drop_unlock;
375 }
376 seq_number = priv->stations[sta_id].tid[tid].seq_number;
377 seq_number &= IEEE80211_SCTL_SEQ;
378 hdr->seq_ctrl = hdr->seq_ctrl &
379 cpu_to_le16(IEEE80211_SCTL_FRAG);
380 hdr->seq_ctrl |= cpu_to_le16(seq_number);
381 seq_number += 0x10;
382 /* aggregation is on for this <sta,tid> */
383 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
384 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
385 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
386 is_agg = true;
387 }
388 }
389
390 txq = &priv->txq[txq_id];
391 q = &txq->q;
392
393 if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
394 spin_unlock(&priv->sta_lock);
395 goto drop_unlock;
396 }
397
398 if (ieee80211_is_data_qos(fc)) {
399 priv->stations[sta_id].tid[tid].tfds_in_queue++;
400 if (!ieee80211_has_morefrags(fc))
401 priv->stations[sta_id].tid[tid].seq_number = seq_number;
402 }
403
404 spin_unlock(&priv->sta_lock);
405
406 /* Set up driver data for this TFD */
407 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
408 txq->txb[q->write_ptr].skb = skb;
409 txq->txb[q->write_ptr].ctx = ctx;
410
411 /* Set up first empty entry in queue's array of Tx/cmd buffers */
412 out_cmd = txq->cmd[q->write_ptr];
413 out_meta = &txq->meta[q->write_ptr];
414 tx_cmd = &out_cmd->cmd.tx;
415 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
416 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
417
418 /*
419 * Set up the Tx-command (not MAC!) header.
420 * Store the chosen Tx queue and TFD index within the sequence field;
421 * after Tx, uCode's Tx response will return this value so driver can
422 * locate the frame within the tx queue and do post-tx processing.
423 */
424 out_cmd->hdr.cmd = REPLY_TX;
425 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
426 INDEX_TO_SEQ(q->write_ptr)));
427
428 /* Copy MAC header from skb into command buffer */
429 memcpy(tx_cmd->hdr, hdr, hdr_len);
430
431
432 /* Total # bytes to be transmitted */
433 len = (u16)skb->len;
434 tx_cmd->len = cpu_to_le16(len);
435
436 if (info->control.hw_key)
437 iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
438
439 /* TODO need this for burst mode later on */
440 iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
441 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
442
443 iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
444
445 iwl_legacy_update_stats(priv, true, fc, len);
446 /*
447 * Use the first empty entry in this queue's command buffer array
448 * to contain the Tx command and MAC header concatenated together
449 * (payload data will be in another buffer).
450 * Size of this varies, due to varying MAC header length.
451 * If end is not dword aligned, we'll have 2 extra bytes at the end
452 * of the MAC header (device reads on dword boundaries).
453 * We'll tell device about this padding later.
454 */
455 len = sizeof(struct iwl_tx_cmd) +
456 sizeof(struct iwl_cmd_header) + hdr_len;
457 firstlen = (len + 3) & ~3;
458
459 /* Tell NIC about any 2-byte padding after MAC header */
460 if (firstlen != len)
461 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
462
463 /* Physical address of this Tx command's header (not MAC header!),
464 * within command buffer array. */
465 txcmd_phys = pci_map_single(priv->pci_dev,
466 &out_cmd->hdr, firstlen,
467 PCI_DMA_BIDIRECTIONAL);
468 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
469 dma_unmap_len_set(out_meta, len, firstlen);
470 /* Add buffer containing Tx command and MAC(!) header to TFD's
471 * first entry */
472 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
473 txcmd_phys, firstlen, 1, 0);
474
475 if (!ieee80211_has_morefrags(hdr->frame_control)) {
476 txq->need_update = 1;
477 } else {
478 wait_write_ptr = 1;
479 txq->need_update = 0;
480 }
481
482 /* Set up TFD's 2nd entry to point directly to remainder of skb,
483 * if any (802.11 null frames have no payload). */
484 secondlen = skb->len - hdr_len;
485 if (secondlen > 0) {
486 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
487 secondlen, PCI_DMA_TODEVICE);
488 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
489 phys_addr, secondlen,
490 0, 0);
491 }
492
493 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
494 offsetof(struct iwl_tx_cmd, scratch);
495
496 /* take back ownership of DMA buffer to enable update */
497 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
498 firstlen, PCI_DMA_BIDIRECTIONAL);
499 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
500 tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
501
502 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
503 le16_to_cpu(out_cmd->hdr.sequence));
504 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
505 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
506 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
507
508 /* Set up entry for this TFD in Tx byte-count array */
509 if (info->flags & IEEE80211_TX_CTL_AMPDU)
510 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
511 le16_to_cpu(tx_cmd->len));
512
513 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
514 firstlen, PCI_DMA_BIDIRECTIONAL);
515
516 trace_iwlwifi_legacy_dev_tx(priv,
517 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
518 sizeof(struct iwl_tfd),
519 &out_cmd->hdr, firstlen,
520 skb->data + hdr_len, secondlen);
521
522 /* Tell device the write index *just past* this latest filled TFD */
523 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
524 iwl_legacy_txq_update_write_ptr(priv, txq);
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 /*
528 * At this point the frame is "transmitted" successfully
529 * and we will get a TX status notification eventually,
530 * regardless of the value of ret. "ret" only indicates
531 * whether or not we should update the write pointer.
532 */
533
534 /*
535 * Avoid atomic ops if it isn't an associated client.
536 * Also, if this is a packet for aggregation, don't
537 * increase the counter because the ucode will stop
538 * aggregation queues when their respective station
539 * goes to sleep.
540 */
541 if (sta_priv && sta_priv->client && !is_agg)
542 atomic_inc(&sta_priv->pending_frames);
543
544 if ((iwl_legacy_queue_space(q) < q->high_mark) &&
545 priv->mac80211_registered) {
546 if (wait_write_ptr) {
547 spin_lock_irqsave(&priv->lock, flags);
548 txq->need_update = 1;
549 iwl_legacy_txq_update_write_ptr(priv, txq);
550 spin_unlock_irqrestore(&priv->lock, flags);
551 } else {
552 iwl_legacy_stop_queue(priv, txq);
553 }
554 }
555
556 return 0;
557
558drop_unlock:
559 spin_unlock_irqrestore(&priv->lock, flags);
560 return -1;
561}
562
563static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
564 struct iwl_dma_ptr *ptr, size_t size)
565{
566 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
567 GFP_KERNEL);
568 if (!ptr->addr)
569 return -ENOMEM;
570 ptr->size = size;
571 return 0;
572}
573
574static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
575 struct iwl_dma_ptr *ptr)
576{
577 if (unlikely(!ptr->addr))
578 return;
579
580 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
581 memset(ptr, 0, sizeof(*ptr));
582}
583
584/**
585 * iwl4965_hw_txq_ctx_free - Free TXQ Context
586 *
587 * Destroy all TX DMA queues and structures
588 */
589void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
590{
591 int txq_id;
592
593 /* Tx queues */
594 if (priv->txq) {
595 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
596 if (txq_id == priv->cmd_queue)
597 iwl_legacy_cmd_queue_free(priv);
598 else
599 iwl_legacy_tx_queue_free(priv, txq_id);
600 }
601 iwl4965_free_dma_ptr(priv, &priv->kw);
602
603 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
604
605 /* free tx queue structure */
606 iwl_legacy_txq_mem(priv);
607}
608
609/**
610 * iwl4965_txq_ctx_alloc - allocate TX queue context
611 * Allocate all Tx DMA structures and initialize them
612 *
613 * @param priv
614 * @return error code
615 */
616int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
617{
618 int ret;
619 int txq_id, slots_num;
620 unsigned long flags;
621
622 /* Free all tx/cmd queues and keep-warm buffer */
623 iwl4965_hw_txq_ctx_free(priv);
624
625 ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
626 priv->hw_params.scd_bc_tbls_size);
627 if (ret) {
628 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
629 goto error_bc_tbls;
630 }
631 /* Alloc keep-warm buffer */
632 ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
633 if (ret) {
634 IWL_ERR(priv, "Keep Warm allocation failed\n");
635 goto error_kw;
636 }
637
638 /* allocate tx queue structure */
639 ret = iwl_legacy_alloc_txq_mem(priv);
640 if (ret)
641 goto error;
642
643 spin_lock_irqsave(&priv->lock, flags);
644
645 /* Turn off all Tx DMA fifos */
646 iwl4965_txq_set_sched(priv, 0);
647
648 /* Tell NIC where to find the "keep warm" buffer */
649 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
650
651 spin_unlock_irqrestore(&priv->lock, flags);
652
653 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
654 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
655 slots_num = (txq_id == priv->cmd_queue) ?
656 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
657 ret = iwl_legacy_tx_queue_init(priv,
658 &priv->txq[txq_id], slots_num,
659 txq_id);
660 if (ret) {
661 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
662 goto error;
663 }
664 }
665
666 return ret;
667
668 error:
669 iwl4965_hw_txq_ctx_free(priv);
670 iwl4965_free_dma_ptr(priv, &priv->kw);
671 error_kw:
672 iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
673 error_bc_tbls:
674 return ret;
675}
676
677void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
678{
679 int txq_id, slots_num;
680 unsigned long flags;
681
682 spin_lock_irqsave(&priv->lock, flags);
683
684 /* Turn off all Tx DMA fifos */
685 iwl4965_txq_set_sched(priv, 0);
686
687 /* Tell NIC where to find the "keep warm" buffer */
688 iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
689
690 spin_unlock_irqrestore(&priv->lock, flags);
691
692 /* Alloc and init all Tx queues, including the command queue (#4) */
693 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
694 slots_num = txq_id == priv->cmd_queue ?
695 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
696 iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
697 slots_num, txq_id);
698 }
699}
700
701/**
702 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
703 */
704void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
705{
706 int ch, txq_id;
707 unsigned long flags;
708
709 /* Turn off all Tx DMA fifos */
710 spin_lock_irqsave(&priv->lock, flags);
711
712 iwl4965_txq_set_sched(priv, 0);
713
714 /* Stop each Tx DMA channel, and wait for it to be idle */
715 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
716 iwl_legacy_write_direct32(priv,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
718 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
720 1000))
721 IWL_ERR(priv, "Failing on timeout while stopping"
722 " DMA channel %d [0x%08x]", ch,
723 iwl_legacy_read_direct32(priv,
724 FH_TSSR_TX_STATUS_REG));
725 }
726 spin_unlock_irqrestore(&priv->lock, flags);
727
728 if (!priv->txq)
729 return;
730
731 /* Unmap DMA from host system and free skb's */
732 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
733 if (txq_id == priv->cmd_queue)
734 iwl_legacy_cmd_queue_unmap(priv);
735 else
736 iwl_legacy_tx_queue_unmap(priv, txq_id);
737}
738
739/*
740 * Find first available (lowest unused) Tx Queue, mark it "active".
741 * Called only when finding queue for aggregation.
742 * Should never return anything < 7, because they should already
743 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
744 */
745static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
746{
747 int txq_id;
748
749 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
750 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
751 return txq_id;
752 return -1;
753}
754
755/**
756 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
757 */
758static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
759 u16 txq_id)
760{
761 /* Simply stop the queue, but don't change any configuration;
762 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
763 iwl_legacy_write_prph(priv,
764 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
765 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
766 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
767}
768
769/**
770 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
771 */
772static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
773 u16 txq_id)
774{
775 u32 tbl_dw_addr;
776 u32 tbl_dw;
777 u16 scd_q2ratid;
778
779 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
780
781 tbl_dw_addr = priv->scd_base_addr +
782 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
783
784 tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
785
786 if (txq_id & 0x1)
787 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
788 else
789 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
790
791 iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
792
793 return 0;
794}
795
796/**
797 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
798 *
799 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
800 * i.e. it must be one of the higher queues used for aggregation
801 */
802static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
803 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
804{
805 unsigned long flags;
806 u16 ra_tid;
807 int ret;
808
809 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
810 (IWL49_FIRST_AMPDU_QUEUE +
811 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
812 IWL_WARN(priv,
813 "queue number out of range: %d, must be %d to %d\n",
814 txq_id, IWL49_FIRST_AMPDU_QUEUE,
815 IWL49_FIRST_AMPDU_QUEUE +
816 priv->cfg->base_params->num_of_ampdu_queues - 1);
817 return -EINVAL;
818 }
819
820 ra_tid = BUILD_RAxTID(sta_id, tid);
821
822 /* Modify device's station table to Tx this TID */
823 ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
824 if (ret)
825 return ret;
826
827 spin_lock_irqsave(&priv->lock, flags);
828
829 /* Stop this Tx queue before configuring it */
830 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
831
832 /* Map receiver-address / traffic-ID to this queue */
833 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
834
835 /* Set this queue as a chain-building queue */
836 iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
837
838 /* Place first TFD at index corresponding to start sequence number.
839 * Assumes that ssn_idx is valid (!= 0xFFF) */
840 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
841 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
842 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
843
844 /* Set up Tx window size and frame limit for this queue */
845 iwl_legacy_write_targ_mem(priv,
846 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
847 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
848 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
849
850 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
851 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
852 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
853 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
854
855 iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
856
857 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
858 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
859
860 spin_unlock_irqrestore(&priv->lock, flags);
861
862 return 0;
863}
864
865
866int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
867 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
868{
869 int sta_id;
870 int tx_fifo;
871 int txq_id;
872 int ret;
873 unsigned long flags;
874 struct iwl_tid_data *tid_data;
875
876 tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
877 if (unlikely(tx_fifo < 0))
878 return tx_fifo;
879
880 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
881 __func__, sta->addr, tid);
882
883 sta_id = iwl_legacy_sta_id(sta);
884 if (sta_id == IWL_INVALID_STATION) {
885 IWL_ERR(priv, "Start AGG on invalid station\n");
886 return -ENXIO;
887 }
888 if (unlikely(tid >= MAX_TID_COUNT))
889 return -EINVAL;
890
891 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
892 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
893 return -ENXIO;
894 }
895
896 txq_id = iwl4965_txq_ctx_activate_free(priv);
897 if (txq_id == -1) {
898 IWL_ERR(priv, "No free aggregation queue available\n");
899 return -ENXIO;
900 }
901
902 spin_lock_irqsave(&priv->sta_lock, flags);
903 tid_data = &priv->stations[sta_id].tid[tid];
904 *ssn = SEQ_TO_SN(tid_data->seq_number);
905 tid_data->agg.txq_id = txq_id;
906 iwl_legacy_set_swq_id(&priv->txq[txq_id],
907 iwl4965_get_ac_from_tid(tid), txq_id);
908 spin_unlock_irqrestore(&priv->sta_lock, flags);
909
910 ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
911 sta_id, tid, *ssn);
912 if (ret)
913 return ret;
914
915 spin_lock_irqsave(&priv->sta_lock, flags);
916 tid_data = &priv->stations[sta_id].tid[tid];
917 if (tid_data->tfds_in_queue == 0) {
918 IWL_DEBUG_HT(priv, "HW queue is empty\n");
919 tid_data->agg.state = IWL_AGG_ON;
920 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
921 } else {
922 IWL_DEBUG_HT(priv,
923 "HW queue is NOT empty: %d packets in HW queue\n",
924 tid_data->tfds_in_queue);
925 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
926 }
927 spin_unlock_irqrestore(&priv->sta_lock, flags);
928 return ret;
929}
930
931/**
932 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
933 * priv->lock must be held by the caller
934 */
935static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
936 u16 ssn_idx, u8 tx_fifo)
937{
938 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
939 (IWL49_FIRST_AMPDU_QUEUE +
940 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
941 IWL_WARN(priv,
942 "queue number out of range: %d, must be %d to %d\n",
943 txq_id, IWL49_FIRST_AMPDU_QUEUE,
944 IWL49_FIRST_AMPDU_QUEUE +
945 priv->cfg->base_params->num_of_ampdu_queues - 1);
946 return -EINVAL;
947 }
948
949 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
950
951 iwl_legacy_clear_bits_prph(priv,
952 IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
953
954 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
955 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
956 /* supposes that ssn_idx is valid (!= 0xFFF) */
957 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
958
959 iwl_legacy_clear_bits_prph(priv,
960 IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
961 iwl_txq_ctx_deactivate(priv, txq_id);
962 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
963
964 return 0;
965}
966
967int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
968 struct ieee80211_sta *sta, u16 tid)
969{
970 int tx_fifo_id, txq_id, sta_id, ssn;
971 struct iwl_tid_data *tid_data;
972 int write_ptr, read_ptr;
973 unsigned long flags;
974
975 tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
976 if (unlikely(tx_fifo_id < 0))
977 return tx_fifo_id;
978
979 sta_id = iwl_legacy_sta_id(sta);
980
981 if (sta_id == IWL_INVALID_STATION) {
982 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
983 return -ENXIO;
984 }
985
986 spin_lock_irqsave(&priv->sta_lock, flags);
987
988 tid_data = &priv->stations[sta_id].tid[tid];
989 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
990 txq_id = tid_data->agg.txq_id;
991
992 switch (priv->stations[sta_id].tid[tid].agg.state) {
993 case IWL_EMPTYING_HW_QUEUE_ADDBA:
994 /*
995 * This can happen if the peer stops aggregation
996 * again before we've had a chance to drain the
997 * queue we selected previously, i.e. before the
998 * session was really started completely.
999 */
1000 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1001 goto turn_off;
1002 case IWL_AGG_ON:
1003 break;
1004 default:
1005 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1006 }
1007
1008 write_ptr = priv->txq[txq_id].q.write_ptr;
1009 read_ptr = priv->txq[txq_id].q.read_ptr;
1010
1011 /* The queue is not empty */
1012 if (write_ptr != read_ptr) {
1013 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1014 priv->stations[sta_id].tid[tid].agg.state =
1015 IWL_EMPTYING_HW_QUEUE_DELBA;
1016 spin_unlock_irqrestore(&priv->sta_lock, flags);
1017 return 0;
1018 }
1019
1020 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1021 turn_off:
1022 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1023
1024 /* do not restore/save irqs */
1025 spin_unlock(&priv->sta_lock);
1026 spin_lock(&priv->lock);
1027
1028 /*
1029 * the only reason this call can fail is queue number out of range,
1030 * which can happen if uCode is reloaded and all the station
1031 * information are lost. if it is outside the range, there is no need
1032 * to deactivate the uCode queue, just return "success" to allow
1033 * mac80211 to clean up it own data.
1034 */
1035 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
1036 spin_unlock_irqrestore(&priv->lock, flags);
1037
1038 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1039
1040 return 0;
1041}
1042
1043int iwl4965_txq_check_empty(struct iwl_priv *priv,
1044 int sta_id, u8 tid, int txq_id)
1045{
1046 struct iwl_queue *q = &priv->txq[txq_id].q;
1047 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1048 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1049 struct iwl_rxon_context *ctx;
1050
1051 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
1052
1053 lockdep_assert_held(&priv->sta_lock);
1054
1055 switch (priv->stations[sta_id].tid[tid].agg.state) {
1056 case IWL_EMPTYING_HW_QUEUE_DELBA:
1057 /* We are reclaiming the last packet of the */
1058 /* aggregated HW queue */
1059 if ((txq_id == tid_data->agg.txq_id) &&
1060 (q->read_ptr == q->write_ptr)) {
1061 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1062 int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
1063 IWL_DEBUG_HT(priv,
1064 "HW queue empty: continue DELBA flow\n");
1065 iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
1066 tid_data->agg.state = IWL_AGG_OFF;
1067 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1068 }
1069 break;
1070 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1071 /* We are reclaiming the last packet of the queue */
1072 if (tid_data->tfds_in_queue == 0) {
1073 IWL_DEBUG_HT(priv,
1074 "HW queue empty: continue ADDBA flow\n");
1075 tid_data->agg.state = IWL_AGG_ON;
1076 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
1077 }
1078 break;
1079 }
1080
1081 return 0;
1082}
1083
1084static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
1085 struct iwl_rxon_context *ctx,
1086 const u8 *addr1)
1087{
1088 struct ieee80211_sta *sta;
1089 struct iwl_station_priv *sta_priv;
1090
1091 rcu_read_lock();
1092 sta = ieee80211_find_sta(ctx->vif, addr1);
1093 if (sta) {
1094 sta_priv = (void *)sta->drv_priv;
1095 /* avoid atomic ops if this isn't a client */
1096 if (sta_priv->client &&
1097 atomic_dec_return(&sta_priv->pending_frames) == 0)
1098 ieee80211_sta_block_awake(priv->hw, sta, false);
1099 }
1100 rcu_read_unlock();
1101}
1102
1103static void
1104iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1105 bool is_agg)
1106{
1107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1108
1109 if (!is_agg)
1110 iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1111
1112 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1113}
1114
1115int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1116{
1117 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1118 struct iwl_queue *q = &txq->q;
1119 struct iwl_tx_info *tx_info;
1120 int nfreed = 0;
1121 struct ieee80211_hdr *hdr;
1122
1123 if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
1124 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1125 "is out of range [0-%d] %d %d.\n", txq_id,
1126 index, q->n_bd, q->write_ptr, q->read_ptr);
1127 return 0;
1128 }
1129
1130 for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
1131 q->read_ptr != index;
1132 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1133
1134 tx_info = &txq->txb[txq->q.read_ptr];
1135
1136 if (WARN_ON_ONCE(tx_info->skb == NULL))
1137 continue;
1138
1139 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1140 if (ieee80211_is_data_qos(hdr->frame_control))
1141 nfreed++;
1142
1143 iwl4965_tx_status(priv, tx_info,
1144 txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
1145 tx_info->skb = NULL;
1146
1147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1148 }
1149 return nfreed;
1150}
1151
1152/**
1153 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1154 *
1155 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1156 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1157 */
1158static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1159 struct iwl_ht_agg *agg,
1160 struct iwl_compressed_ba_resp *ba_resp)
1161
1162{
1163 int i, sh, ack;
1164 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1165 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1166 int successes = 0;
1167 struct ieee80211_tx_info *info;
1168 u64 bitmap, sent_bitmap;
1169
1170 if (unlikely(!agg->wait_for_ba)) {
1171 if (unlikely(ba_resp->bitmap))
1172 IWL_ERR(priv, "Received BA when not expected\n");
1173 return -EINVAL;
1174 }
1175
1176 /* Mark that the expected block-ack response arrived */
1177 agg->wait_for_ba = 0;
1178 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
1179 ba_resp->seq_ctl);
1180
1181 /* Calculate shift to align block-ack bits with our Tx window bits */
1182 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1183 if (sh < 0) /* tbw something is wrong with indices */
1184 sh += 0x100;
1185
1186 if (agg->frame_count > (64 - sh)) {
1187 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1188 return -1;
1189 }
1190
1191 /* don't use 64-bit values for now */
1192 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1193
1194 /* check for success or failure according to the
1195 * transmitted bitmap and block-ack bitmap */
1196 sent_bitmap = bitmap & agg->bitmap;
1197
1198 /* For each frame attempted in aggregation,
1199 * update driver's record of tx frame's status. */
1200 i = 0;
1201 while (sent_bitmap) {
1202 ack = sent_bitmap & 1ULL;
1203 successes += ack;
1204 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1205 ack ? "ACK" : "NACK", i,
1206 (agg->start_idx + i) & 0xff,
1207 agg->start_idx + i);
1208 sent_bitmap >>= 1;
1209 ++i;
1210 }
1211
1212 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1213 (unsigned long long)bitmap);
1214
1215 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1216 memset(&info->status, 0, sizeof(info->status));
1217 info->flags |= IEEE80211_TX_STAT_ACK;
1218 info->flags |= IEEE80211_TX_STAT_AMPDU;
1219 info->status.ampdu_ack_len = successes;
1220 info->status.ampdu_len = agg->frame_count;
1221 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1222
1223 return 0;
1224}
1225
1226/**
1227 * translate ucode response to mac80211 tx status control values
1228 */
1229void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1230 struct ieee80211_tx_info *info)
1231{
1232 struct ieee80211_tx_rate *r = &info->control.rates[0];
1233
1234 info->antenna_sel_tx =
1235 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1236 if (rate_n_flags & RATE_MCS_HT_MSK)
1237 r->flags |= IEEE80211_TX_RC_MCS;
1238 if (rate_n_flags & RATE_MCS_GF_MSK)
1239 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1240 if (rate_n_flags & RATE_MCS_HT40_MSK)
1241 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1242 if (rate_n_flags & RATE_MCS_DUP_MSK)
1243 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1244 if (rate_n_flags & RATE_MCS_SGI_MSK)
1245 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1246 r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1247}
1248
1249/**
1250 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1251 *
1252 * Handles block-acknowledge notification from device, which reports success
1253 * of frames sent via aggregation.
1254 */
1255void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
1256 struct iwl_rx_mem_buffer *rxb)
1257{
1258 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1259 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1260 struct iwl_tx_queue *txq = NULL;
1261 struct iwl_ht_agg *agg;
1262 int index;
1263 int sta_id;
1264 int tid;
1265 unsigned long flags;
1266
1267 /* "flow" corresponds to Tx queue */
1268 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1269
1270 /* "ssn" is start of block-ack Tx window, corresponds to index
1271 * (in Tx queue's circular buffer) of first TFD/frame in window */
1272 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1273
1274 if (scd_flow >= priv->hw_params.max_txq_num) {
1275 IWL_ERR(priv,
1276 "BUG_ON scd_flow is bigger than number of queues\n");
1277 return;
1278 }
1279
1280 txq = &priv->txq[scd_flow];
1281 sta_id = ba_resp->sta_id;
1282 tid = ba_resp->tid;
1283 agg = &priv->stations[sta_id].tid[tid].agg;
1284 if (unlikely(agg->txq_id != scd_flow)) {
1285 /*
1286 * FIXME: this is a uCode bug which need to be addressed,
1287 * log the information and return for now!
1288 * since it is possible happen very often and in order
1289 * not to fill the syslog, don't enable the logging by default
1290 */
1291 IWL_DEBUG_TX_REPLY(priv,
1292 "BA scd_flow %d does not match txq_id %d\n",
1293 scd_flow, agg->txq_id);
1294 return;
1295 }
1296
1297 /* Find index just before block-ack window */
1298 index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1299
1300 spin_lock_irqsave(&priv->sta_lock, flags);
1301
1302 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1303 "sta_id = %d\n",
1304 agg->wait_for_ba,
1305 (u8 *) &ba_resp->sta_addr_lo32,
1306 ba_resp->sta_id);
1307 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1308 "scd_flow = "
1309 "%d, scd_ssn = %d\n",
1310 ba_resp->tid,
1311 ba_resp->seq_ctl,
1312 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1313 ba_resp->scd_flow,
1314 ba_resp->scd_ssn);
1315 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1316 agg->start_idx,
1317 (unsigned long long)agg->bitmap);
1318
1319 /* Update driver's record of ACK vs. not for each frame in window */
1320 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1321
1322 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1323 * block-ack window (we assume that they've been successfully
1324 * transmitted ... if not, it's too late anyway). */
1325 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1326 /* calculate mac80211 ampdu sw queue to wake */
1327 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
1328 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1329
1330 if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
1331 priv->mac80211_registered &&
1332 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1333 iwl_legacy_wake_queue(priv, txq);
1334
1335 iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
1336 }
1337
1338 spin_unlock_irqrestore(&priv->sta_lock, flags);
1339}
1340
1341#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1342const char *iwl4965_get_tx_fail_reason(u32 status)
1343{
1344#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1345#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1346
1347 switch (status & TX_STATUS_MSK) {
1348 case TX_STATUS_SUCCESS:
1349 return "SUCCESS";
1350 TX_STATUS_POSTPONE(DELAY);
1351 TX_STATUS_POSTPONE(FEW_BYTES);
1352 TX_STATUS_POSTPONE(QUIET_PERIOD);
1353 TX_STATUS_POSTPONE(CALC_TTAK);
1354 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1355 TX_STATUS_FAIL(SHORT_LIMIT);
1356 TX_STATUS_FAIL(LONG_LIMIT);
1357 TX_STATUS_FAIL(FIFO_UNDERRUN);
1358 TX_STATUS_FAIL(DRAIN_FLOW);
1359 TX_STATUS_FAIL(RFKILL_FLUSH);
1360 TX_STATUS_FAIL(LIFE_EXPIRE);
1361 TX_STATUS_FAIL(DEST_PS);
1362 TX_STATUS_FAIL(HOST_ABORTED);
1363 TX_STATUS_FAIL(BT_RETRY);
1364 TX_STATUS_FAIL(STA_INVALID);
1365 TX_STATUS_FAIL(FRAG_DROPPED);
1366 TX_STATUS_FAIL(TID_DISABLE);
1367 TX_STATUS_FAIL(FIFO_FLUSHED);
1368 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
1369 TX_STATUS_FAIL(PASSIVE_NO_RX);
1370 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
1371 }
1372
1373 return "UNKNOWN";
1374
1375#undef TX_STATUS_FAIL
1376#undef TX_STATUS_POSTPONE
1377}
1378#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148feb94..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-4965-hw.h"
40#include "iwl-4965.h"
41#include "iwl-4965-calib.h"
42
43#define IWL_AC_UNSET -1
44
45/**
46 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
47 * using sample data 100 bytes apart. If these sample points are good,
48 * it's a pretty good bet that everything between them is good, too.
49 */
50static int
51iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
52{
53 u32 val;
54 int ret = 0;
55 u32 errcnt = 0;
56 u32 i;
57
58 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
59
60 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
61 /* read data comes through single port, auto-incr addr */
62 /* NOTE: Use the debugless read so we don't flood kernel log
63 * if IWL_DL_IO is set */
64 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
65 i + IWL4965_RTC_INST_LOWER_BOUND);
66 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
67 if (val != le32_to_cpu(*image)) {
68 ret = -EIO;
69 errcnt++;
70 if (errcnt >= 3)
71 break;
72 }
73 }
74
75 return ret;
76}
77
78/**
79 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
80 * looking at all data.
81 */
82static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
83 u32 len)
84{
85 u32 val;
86 u32 save_len = len;
87 int ret = 0;
88 u32 errcnt;
89
90 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
91
92 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
93 IWL4965_RTC_INST_LOWER_BOUND);
94
95 errcnt = 0;
96 for (; len > 0; len -= sizeof(u32), image++) {
97 /* read data comes through single port, auto-incr addr */
98 /* NOTE: Use the debugless read so we don't flood kernel log
99 * if IWL_DL_IO is set */
100 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
101 if (val != le32_to_cpu(*image)) {
102 IWL_ERR(priv, "uCode INST section is invalid at "
103 "offset 0x%x, is 0x%x, s/b 0x%x\n",
104 save_len - len, val, le32_to_cpu(*image));
105 ret = -EIO;
106 errcnt++;
107 if (errcnt >= 20)
108 break;
109 }
110 }
111
112 if (!errcnt)
113 IWL_DEBUG_INFO(priv,
114 "ucode image in INSTRUCTION memory is good\n");
115
116 return ret;
117}
118
119/**
120 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
121 * and verify its contents
122 */
123int iwl4965_verify_ucode(struct iwl_priv *priv)
124{
125 __le32 *image;
126 u32 len;
127 int ret;
128
129 /* Try bootstrap */
130 image = (__le32 *)priv->ucode_boot.v_addr;
131 len = priv->ucode_boot.len;
132 ret = iwl4965_verify_inst_sparse(priv, image, len);
133 if (!ret) {
134 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
135 return 0;
136 }
137
138 /* Try initialize */
139 image = (__le32 *)priv->ucode_init.v_addr;
140 len = priv->ucode_init.len;
141 ret = iwl4965_verify_inst_sparse(priv, image, len);
142 if (!ret) {
143 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
144 return 0;
145 }
146
147 /* Try runtime/protocol */
148 image = (__le32 *)priv->ucode_code.v_addr;
149 len = priv->ucode_code.len;
150 ret = iwl4965_verify_inst_sparse(priv, image, len);
151 if (!ret) {
152 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
153 return 0;
154 }
155
156 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
157
158 /* Since nothing seems to match, show first several data entries in
159 * instruction SRAM, so maybe visual inspection will give a clue.
160 * Selection of bootstrap image (vs. other images) is arbitrary. */
161 image = (__le32 *)priv->ucode_boot.v_addr;
162 len = priv->ucode_boot.len;
163 ret = iwl4965_verify_inst_full(priv, image, len);
164
165 return ret;
166}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce193e4..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/sched.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-eeprom.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-helpers.h"
45#include "iwl-4965-calib.h"
46#include "iwl-sta.h"
47#include "iwl-4965-led.h"
48#include "iwl-4965.h"
49#include "iwl-4965-debugfs.h"
50
51static int iwl4965_send_tx_power(struct iwl_priv *priv);
52static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
53
54/* Highest firmware API version supported */
55#define IWL4965_UCODE_API_MAX 2
56
57/* Lowest firmware API version supported */
58#define IWL4965_UCODE_API_MIN 2
59
60#define IWL4965_FW_PRE "iwlwifi-4965-"
61#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
62#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
63
64/* check contents of special bootstrap uCode SRAM */
65static int iwl4965_verify_bsm(struct iwl_priv *priv)
66{
67 __le32 *image = priv->ucode_boot.v_addr;
68 u32 len = priv->ucode_boot.len;
69 u32 reg;
70 u32 val;
71
72 IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
73
74 /* verify BSM SRAM contents */
75 val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
76 for (reg = BSM_SRAM_LOWER_BOUND;
77 reg < BSM_SRAM_LOWER_BOUND + len;
78 reg += sizeof(u32), image++) {
79 val = iwl_legacy_read_prph(priv, reg);
80 if (val != le32_to_cpu(*image)) {
81 IWL_ERR(priv, "BSM uCode verification failed at "
82 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
83 BSM_SRAM_LOWER_BOUND,
84 reg - BSM_SRAM_LOWER_BOUND, len,
85 val, le32_to_cpu(*image));
86 return -EIO;
87 }
88 }
89
90 IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
91
92 return 0;
93}
94
95/**
96 * iwl4965_load_bsm - Load bootstrap instructions
97 *
98 * BSM operation:
99 *
100 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
101 * in special SRAM that does not power down during RFKILL. When powering back
102 * up after power-saving sleeps (or during initial uCode load), the BSM loads
103 * the bootstrap program into the on-board processor, and starts it.
104 *
105 * The bootstrap program loads (via DMA) instructions and data for a new
106 * program from host DRAM locations indicated by the host driver in the
107 * BSM_DRAM_* registers. Once the new program is loaded, it starts
108 * automatically.
109 *
110 * When initializing the NIC, the host driver points the BSM to the
111 * "initialize" uCode image. This uCode sets up some internal data, then
112 * notifies host via "initialize alive" that it is complete.
113 *
114 * The host then replaces the BSM_DRAM_* pointer values to point to the
115 * normal runtime uCode instructions and a backup uCode data cache buffer
116 * (filled initially with starting data values for the on-board processor),
117 * then triggers the "initialize" uCode to load and launch the runtime uCode,
118 * which begins normal operation.
119 *
120 * When doing a power-save shutdown, runtime uCode saves data SRAM into
121 * the backup data cache in DRAM before SRAM is powered down.
122 *
123 * When powering back up, the BSM loads the bootstrap program. This reloads
124 * the runtime uCode instructions and the backup data cache into SRAM,
125 * and re-launches the runtime uCode from where it left off.
126 */
127static int iwl4965_load_bsm(struct iwl_priv *priv)
128{
129 __le32 *image = priv->ucode_boot.v_addr;
130 u32 len = priv->ucode_boot.len;
131 dma_addr_t pinst;
132 dma_addr_t pdata;
133 u32 inst_len;
134 u32 data_len;
135 int i;
136 u32 done;
137 u32 reg_offset;
138 int ret;
139
140 IWL_DEBUG_INFO(priv, "Begin load bsm\n");
141
142 priv->ucode_type = UCODE_RT;
143
144 /* make sure bootstrap program is no larger than BSM's SRAM size */
145 if (len > IWL49_MAX_BSM_SIZE)
146 return -EINVAL;
147
148 /* Tell bootstrap uCode where to find the "Initialize" uCode
149 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
150 * NOTE: iwl_init_alive_start() will replace these values,
151 * after the "initialize" uCode has run, to point to
152 * runtime/protocol instructions and backup data cache.
153 */
154 pinst = priv->ucode_init.p_addr >> 4;
155 pdata = priv->ucode_init_data.p_addr >> 4;
156 inst_len = priv->ucode_init.len;
157 data_len = priv->ucode_init_data.len;
158
159 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
160 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
161 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
162 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
163
164 /* Fill BSM memory with bootstrap instructions */
165 for (reg_offset = BSM_SRAM_LOWER_BOUND;
166 reg_offset < BSM_SRAM_LOWER_BOUND + len;
167 reg_offset += sizeof(u32), image++)
168 _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
169
170 ret = iwl4965_verify_bsm(priv);
171 if (ret)
172 return ret;
173
174 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
175 iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
176 iwl_legacy_write_prph(priv,
177 BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
178 iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
179
180 /* Load bootstrap code into instruction SRAM now,
181 * to prepare to load "initialize" uCode */
182 iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
183
184 /* Wait for load of bootstrap uCode to finish */
185 for (i = 0; i < 100; i++) {
186 done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
187 if (!(done & BSM_WR_CTRL_REG_BIT_START))
188 break;
189 udelay(10);
190 }
191 if (i < 100)
192 IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
193 else {
194 IWL_ERR(priv, "BSM write did not complete!\n");
195 return -EIO;
196 }
197
198 /* Enable future boot loads whenever power management unit triggers it
199 * (e.g. when powering back up after power-save shutdown) */
200 iwl_legacy_write_prph(priv,
201 BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
202
203
204 return 0;
205}
206
207/**
208 * iwl4965_set_ucode_ptrs - Set uCode address location
209 *
210 * Tell initialization uCode where to find runtime uCode.
211 *
212 * BSM registers initially contain pointers to initialization uCode.
213 * We need to replace them to load runtime uCode inst and data,
214 * and to save runtime data when powering down.
215 */
216static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
217{
218 dma_addr_t pinst;
219 dma_addr_t pdata;
220 int ret = 0;
221
222 /* bits 35:4 for 4965 */
223 pinst = priv->ucode_code.p_addr >> 4;
224 pdata = priv->ucode_data_backup.p_addr >> 4;
225
226 /* Tell bootstrap uCode where to find image to load */
227 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
228 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
229 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
230 priv->ucode_data.len);
231
232 /* Inst byte count must be last to set up, bit 31 signals uCode
233 * that all new ptr/size info is in place */
234 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
235 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
236 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
237
238 return ret;
239}
240
241/**
242 * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
243 *
244 * Called after REPLY_ALIVE notification received from "initialize" uCode.
245 *
246 * The 4965 "initialize" ALIVE reply contains calibration data for:
247 * Voltage, temperature, and MIMO tx gain correction, now stored in priv
248 * (3945 does not contain this data).
249 *
250 * Tell "initialize" uCode to go ahead and load the runtime uCode.
251*/
252static void iwl4965_init_alive_start(struct iwl_priv *priv)
253{
254 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
255 * This is a paranoid check, because we would not have gotten the
256 * "initialize" alive if code weren't properly loaded. */
257 if (iwl4965_verify_ucode(priv)) {
258 /* Runtime instruction load was bad;
259 * take it all the way back down so we can try again */
260 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
261 goto restart;
262 }
263
264 /* Calculate temperature */
265 priv->temperature = iwl4965_hw_get_temperature(priv);
266
267 /* Send pointers to protocol/runtime uCode image ... init code will
268 * load and launch runtime uCode, which will send us another "Alive"
269 * notification. */
270 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
271 if (iwl4965_set_ucode_ptrs(priv)) {
272 /* Runtime instruction load won't happen;
273 * take it all the way back down so we can try again */
274 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
275 goto restart;
276 }
277 return;
278
279restart:
280 queue_work(priv->workqueue, &priv->restart);
281}
282
283static bool iw4965_is_ht40_channel(__le32 rxon_flags)
284{
285 int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
286 >> RXON_FLG_CHANNEL_MODE_POS;
287 return ((chan_mod == CHANNEL_MODE_PURE_40) ||
288 (chan_mod == CHANNEL_MODE_MIXED));
289}
290
291static void iwl4965_nic_config(struct iwl_priv *priv)
292{
293 unsigned long flags;
294 u16 radio_cfg;
295
296 spin_lock_irqsave(&priv->lock, flags);
297
298 radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
299
300 /* write radio config values to register */
301 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
302 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
303 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
304 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
305 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
306
307 /* set CSR_HW_CONFIG_REG for uCode use */
308 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
309 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
310 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
311
312 priv->calib_info = (struct iwl_eeprom_calib_info *)
313 iwl_legacy_eeprom_query_addr(priv,
314 EEPROM_4965_CALIB_TXPOWER_OFFSET);
315
316 spin_unlock_irqrestore(&priv->lock, flags);
317}
318
319/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
320 * Called after every association, but this runs only once!
321 * ... once chain noise is calibrated the first time, it's good forever. */
322static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
323{
324 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
325
326 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
327 iwl_legacy_is_any_associated(priv)) {
328 struct iwl_calib_diff_gain_cmd cmd;
329
330 /* clear data for chain noise calibration algorithm */
331 data->chain_noise_a = 0;
332 data->chain_noise_b = 0;
333 data->chain_noise_c = 0;
334 data->chain_signal_a = 0;
335 data->chain_signal_b = 0;
336 data->chain_signal_c = 0;
337 data->beacon_count = 0;
338
339 memset(&cmd, 0, sizeof(cmd));
340 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
341 cmd.diff_gain_a = 0;
342 cmd.diff_gain_b = 0;
343 cmd.diff_gain_c = 0;
344 if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
345 sizeof(cmd), &cmd))
346 IWL_ERR(priv,
347 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
348 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
349 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
350 }
351}
352
353static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
354 .min_nrg_cck = 97,
355 .max_nrg_cck = 0, /* not used, set to 0 */
356
357 .auto_corr_min_ofdm = 85,
358 .auto_corr_min_ofdm_mrc = 170,
359 .auto_corr_min_ofdm_x1 = 105,
360 .auto_corr_min_ofdm_mrc_x1 = 220,
361
362 .auto_corr_max_ofdm = 120,
363 .auto_corr_max_ofdm_mrc = 210,
364 .auto_corr_max_ofdm_x1 = 140,
365 .auto_corr_max_ofdm_mrc_x1 = 270,
366
367 .auto_corr_min_cck = 125,
368 .auto_corr_max_cck = 200,
369 .auto_corr_min_cck_mrc = 200,
370 .auto_corr_max_cck_mrc = 400,
371
372 .nrg_th_cck = 100,
373 .nrg_th_ofdm = 100,
374
375 .barker_corr_th_min = 190,
376 .barker_corr_th_min_mrc = 390,
377 .nrg_th_cca = 62,
378};
379
380static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
381{
382 /* want Kelvin */
383 priv->hw_params.ct_kill_threshold =
384 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
385}
386
387/**
388 * iwl4965_hw_set_hw_params
389 *
390 * Called when initializing driver
391 */
392static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
393{
394 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
395 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
396 priv->cfg->base_params->num_of_queues =
397 priv->cfg->mod_params->num_of_queues;
398
399 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
400 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
401 priv->hw_params.scd_bc_tbls_size =
402 priv->cfg->base_params->num_of_queues *
403 sizeof(struct iwl4965_scd_bc_tbl);
404 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
405 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
406 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
407 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
408 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
409 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
410 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
411
412 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
413
414 priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
415 priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
416 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
417 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
418
419 iwl4965_set_ct_threshold(priv);
420
421 priv->hw_params.sens = &iwl4965_sensitivity;
422 priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
423
424 return 0;
425}
426
427static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
428{
429 s32 sign = 1;
430
431 if (num < 0) {
432 sign = -sign;
433 num = -num;
434 }
435 if (denom < 0) {
436 sign = -sign;
437 denom = -denom;
438 }
439 *res = 1;
440 *res = ((num * 2 + denom) / (denom * 2)) * sign;
441
442 return 1;
443}
444
445/**
446 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
447 *
448 * Determines power supply voltage compensation for txpower calculations.
449 * Returns number of 1/2-dB steps to subtract from gain table index,
450 * to compensate for difference between power supply voltage during
451 * factory measurements, vs. current power supply voltage.
452 *
453 * Voltage indication is higher for lower voltage.
454 * Lower voltage requires more gain (lower gain table index).
455 */
456static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
457 s32 current_voltage)
458{
459 s32 comp = 0;
460
461 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
462 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
463 return 0;
464
465 iwl4965_math_div_round(current_voltage - eeprom_voltage,
466 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
467
468 if (current_voltage > eeprom_voltage)
469 comp *= 2;
470 if ((comp < -2) || (comp > 2))
471 comp = 0;
472
473 return comp;
474}
475
476static s32 iwl4965_get_tx_atten_grp(u16 channel)
477{
478 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
479 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
480 return CALIB_CH_GROUP_5;
481
482 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
483 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
484 return CALIB_CH_GROUP_1;
485
486 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
487 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
488 return CALIB_CH_GROUP_2;
489
490 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
491 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
492 return CALIB_CH_GROUP_3;
493
494 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
495 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
496 return CALIB_CH_GROUP_4;
497
498 return -EINVAL;
499}
500
501static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
502{
503 s32 b = -1;
504
505 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
506 if (priv->calib_info->band_info[b].ch_from == 0)
507 continue;
508
509 if ((channel >= priv->calib_info->band_info[b].ch_from)
510 && (channel <= priv->calib_info->band_info[b].ch_to))
511 break;
512 }
513
514 return b;
515}
516
517static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
518{
519 s32 val;
520
521 if (x2 == x1)
522 return y1;
523 else {
524 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
525 return val + y2;
526 }
527}
528
529/**
530 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
531 *
532 * Interpolates factory measurements from the two sample channels within a
533 * sub-band, to apply to channel of interest. Interpolation is proportional to
534 * differences in channel frequencies, which is proportional to differences
535 * in channel number.
536 */
537static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
538 struct iwl_eeprom_calib_ch_info *chan_info)
539{
540 s32 s = -1;
541 u32 c;
542 u32 m;
543 const struct iwl_eeprom_calib_measure *m1;
544 const struct iwl_eeprom_calib_measure *m2;
545 struct iwl_eeprom_calib_measure *omeas;
546 u32 ch_i1;
547 u32 ch_i2;
548
549 s = iwl4965_get_sub_band(priv, channel);
550 if (s >= EEPROM_TX_POWER_BANDS) {
551 IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
552 return -1;
553 }
554
555 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
556 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
557 chan_info->ch_num = (u8) channel;
558
559 IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
560 channel, s, ch_i1, ch_i2);
561
562 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
563 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
564 m1 = &(priv->calib_info->band_info[s].ch1.
565 measurements[c][m]);
566 m2 = &(priv->calib_info->band_info[s].ch2.
567 measurements[c][m]);
568 omeas = &(chan_info->measurements[c][m]);
569
570 omeas->actual_pow =
571 (u8) iwl4965_interpolate_value(channel, ch_i1,
572 m1->actual_pow,
573 ch_i2,
574 m2->actual_pow);
575 omeas->gain_idx =
576 (u8) iwl4965_interpolate_value(channel, ch_i1,
577 m1->gain_idx, ch_i2,
578 m2->gain_idx);
579 omeas->temperature =
580 (u8) iwl4965_interpolate_value(channel, ch_i1,
581 m1->temperature,
582 ch_i2,
583 m2->temperature);
584 omeas->pa_det =
585 (s8) iwl4965_interpolate_value(channel, ch_i1,
586 m1->pa_det, ch_i2,
587 m2->pa_det);
588
589 IWL_DEBUG_TXPOWER(priv,
590 "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
591 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
592 IWL_DEBUG_TXPOWER(priv,
593 "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
594 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
595 IWL_DEBUG_TXPOWER(priv,
596 "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
597 m1->pa_det, m2->pa_det, omeas->pa_det);
598 IWL_DEBUG_TXPOWER(priv,
599 "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
600 m1->temperature, m2->temperature,
601 omeas->temperature);
602 }
603 }
604
605 return 0;
606}
607
608/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
609 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
610static s32 back_off_table[] = {
611 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
612 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
613 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
614 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
615 10 /* CCK */
616};
617
618/* Thermal compensation values for txpower for various frequency ranges ...
619 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
620static struct iwl4965_txpower_comp_entry {
621 s32 degrees_per_05db_a;
622 s32 degrees_per_05db_a_denom;
623} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
624 {9, 2}, /* group 0 5.2, ch 34-43 */
625 {4, 1}, /* group 1 5.2, ch 44-70 */
626 {4, 1}, /* group 2 5.2, ch 71-124 */
627 {4, 1}, /* group 3 5.2, ch 125-200 */
628 {3, 1} /* group 4 2.4, ch all */
629};
630
631static s32 get_min_power_index(s32 rate_power_index, u32 band)
632{
633 if (!band) {
634 if ((rate_power_index & 7) <= 4)
635 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
636 }
637 return MIN_TX_GAIN_INDEX;
638}
639
640struct gain_entry {
641 u8 dsp;
642 u8 radio;
643};
644
645static const struct gain_entry gain_table[2][108] = {
646 /* 5.2GHz power gain index table */
647 {
648 {123, 0x3F}, /* highest txpower */
649 {117, 0x3F},
650 {110, 0x3F},
651 {104, 0x3F},
652 {98, 0x3F},
653 {110, 0x3E},
654 {104, 0x3E},
655 {98, 0x3E},
656 {110, 0x3D},
657 {104, 0x3D},
658 {98, 0x3D},
659 {110, 0x3C},
660 {104, 0x3C},
661 {98, 0x3C},
662 {110, 0x3B},
663 {104, 0x3B},
664 {98, 0x3B},
665 {110, 0x3A},
666 {104, 0x3A},
667 {98, 0x3A},
668 {110, 0x39},
669 {104, 0x39},
670 {98, 0x39},
671 {110, 0x38},
672 {104, 0x38},
673 {98, 0x38},
674 {110, 0x37},
675 {104, 0x37},
676 {98, 0x37},
677 {110, 0x36},
678 {104, 0x36},
679 {98, 0x36},
680 {110, 0x35},
681 {104, 0x35},
682 {98, 0x35},
683 {110, 0x34},
684 {104, 0x34},
685 {98, 0x34},
686 {110, 0x33},
687 {104, 0x33},
688 {98, 0x33},
689 {110, 0x32},
690 {104, 0x32},
691 {98, 0x32},
692 {110, 0x31},
693 {104, 0x31},
694 {98, 0x31},
695 {110, 0x30},
696 {104, 0x30},
697 {98, 0x30},
698 {110, 0x25},
699 {104, 0x25},
700 {98, 0x25},
701 {110, 0x24},
702 {104, 0x24},
703 {98, 0x24},
704 {110, 0x23},
705 {104, 0x23},
706 {98, 0x23},
707 {110, 0x22},
708 {104, 0x18},
709 {98, 0x18},
710 {110, 0x17},
711 {104, 0x17},
712 {98, 0x17},
713 {110, 0x16},
714 {104, 0x16},
715 {98, 0x16},
716 {110, 0x15},
717 {104, 0x15},
718 {98, 0x15},
719 {110, 0x14},
720 {104, 0x14},
721 {98, 0x14},
722 {110, 0x13},
723 {104, 0x13},
724 {98, 0x13},
725 {110, 0x12},
726 {104, 0x08},
727 {98, 0x08},
728 {110, 0x07},
729 {104, 0x07},
730 {98, 0x07},
731 {110, 0x06},
732 {104, 0x06},
733 {98, 0x06},
734 {110, 0x05},
735 {104, 0x05},
736 {98, 0x05},
737 {110, 0x04},
738 {104, 0x04},
739 {98, 0x04},
740 {110, 0x03},
741 {104, 0x03},
742 {98, 0x03},
743 {110, 0x02},
744 {104, 0x02},
745 {98, 0x02},
746 {110, 0x01},
747 {104, 0x01},
748 {98, 0x01},
749 {110, 0x00},
750 {104, 0x00},
751 {98, 0x00},
752 {93, 0x00},
753 {88, 0x00},
754 {83, 0x00},
755 {78, 0x00},
756 },
757 /* 2.4GHz power gain index table */
758 {
759 {110, 0x3f}, /* highest txpower */
760 {104, 0x3f},
761 {98, 0x3f},
762 {110, 0x3e},
763 {104, 0x3e},
764 {98, 0x3e},
765 {110, 0x3d},
766 {104, 0x3d},
767 {98, 0x3d},
768 {110, 0x3c},
769 {104, 0x3c},
770 {98, 0x3c},
771 {110, 0x3b},
772 {104, 0x3b},
773 {98, 0x3b},
774 {110, 0x3a},
775 {104, 0x3a},
776 {98, 0x3a},
777 {110, 0x39},
778 {104, 0x39},
779 {98, 0x39},
780 {110, 0x38},
781 {104, 0x38},
782 {98, 0x38},
783 {110, 0x37},
784 {104, 0x37},
785 {98, 0x37},
786 {110, 0x36},
787 {104, 0x36},
788 {98, 0x36},
789 {110, 0x35},
790 {104, 0x35},
791 {98, 0x35},
792 {110, 0x34},
793 {104, 0x34},
794 {98, 0x34},
795 {110, 0x33},
796 {104, 0x33},
797 {98, 0x33},
798 {110, 0x32},
799 {104, 0x32},
800 {98, 0x32},
801 {110, 0x31},
802 {104, 0x31},
803 {98, 0x31},
804 {110, 0x30},
805 {104, 0x30},
806 {98, 0x30},
807 {110, 0x6},
808 {104, 0x6},
809 {98, 0x6},
810 {110, 0x5},
811 {104, 0x5},
812 {98, 0x5},
813 {110, 0x4},
814 {104, 0x4},
815 {98, 0x4},
816 {110, 0x3},
817 {104, 0x3},
818 {98, 0x3},
819 {110, 0x2},
820 {104, 0x2},
821 {98, 0x2},
822 {110, 0x1},
823 {104, 0x1},
824 {98, 0x1},
825 {110, 0x0},
826 {104, 0x0},
827 {98, 0x0},
828 {97, 0},
829 {96, 0},
830 {95, 0},
831 {94, 0},
832 {93, 0},
833 {92, 0},
834 {91, 0},
835 {90, 0},
836 {89, 0},
837 {88, 0},
838 {87, 0},
839 {86, 0},
840 {85, 0},
841 {84, 0},
842 {83, 0},
843 {82, 0},
844 {81, 0},
845 {80, 0},
846 {79, 0},
847 {78, 0},
848 {77, 0},
849 {76, 0},
850 {75, 0},
851 {74, 0},
852 {73, 0},
853 {72, 0},
854 {71, 0},
855 {70, 0},
856 {69, 0},
857 {68, 0},
858 {67, 0},
859 {66, 0},
860 {65, 0},
861 {64, 0},
862 {63, 0},
863 {62, 0},
864 {61, 0},
865 {60, 0},
866 {59, 0},
867 }
868};
869
870static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
871 u8 is_ht40, u8 ctrl_chan_high,
872 struct iwl4965_tx_power_db *tx_power_tbl)
873{
874 u8 saturation_power;
875 s32 target_power;
876 s32 user_target_power;
877 s32 power_limit;
878 s32 current_temp;
879 s32 reg_limit;
880 s32 current_regulatory;
881 s32 txatten_grp = CALIB_CH_GROUP_MAX;
882 int i;
883 int c;
884 const struct iwl_channel_info *ch_info = NULL;
885 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
886 const struct iwl_eeprom_calib_measure *measurement;
887 s16 voltage;
888 s32 init_voltage;
889 s32 voltage_compensation;
890 s32 degrees_per_05db_num;
891 s32 degrees_per_05db_denom;
892 s32 factory_temp;
893 s32 temperature_comp[2];
894 s32 factory_gain_index[2];
895 s32 factory_actual_pwr[2];
896 s32 power_index;
897
898 /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
899 * are used for indexing into txpower table) */
900 user_target_power = 2 * priv->tx_power_user_lmt;
901
902 /* Get current (RXON) channel, band, width */
903 IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
904 is_ht40);
905
906 ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
907
908 if (!iwl_legacy_is_channel_valid(ch_info))
909 return -EINVAL;
910
911 /* get txatten group, used to select 1) thermal txpower adjustment
912 * and 2) mimo txpower balance between Tx chains. */
913 txatten_grp = iwl4965_get_tx_atten_grp(channel);
914 if (txatten_grp < 0) {
915 IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
916 channel);
917 return txatten_grp;
918 }
919
920 IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
921 channel, txatten_grp);
922
923 if (is_ht40) {
924 if (ctrl_chan_high)
925 channel -= 2;
926 else
927 channel += 2;
928 }
929
930 /* hardware txpower limits ...
931 * saturation (clipping distortion) txpowers are in half-dBm */
932 if (band)
933 saturation_power = priv->calib_info->saturation_power24;
934 else
935 saturation_power = priv->calib_info->saturation_power52;
936
937 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
938 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
939 if (band)
940 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
941 else
942 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
943 }
944
945 /* regulatory txpower limits ... reg_limit values are in half-dBm,
946 * max_power_avg values are in dBm, convert * 2 */
947 if (is_ht40)
948 reg_limit = ch_info->ht40_max_power_avg * 2;
949 else
950 reg_limit = ch_info->max_power_avg * 2;
951
952 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
953 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
954 if (band)
955 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
956 else
957 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
958 }
959
960 /* Interpolate txpower calibration values for this channel,
961 * based on factory calibration tests on spaced channels. */
962 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
963
964 /* calculate tx gain adjustment based on power supply voltage */
965 voltage = le16_to_cpu(priv->calib_info->voltage);
966 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
967 voltage_compensation =
968 iwl4965_get_voltage_compensation(voltage, init_voltage);
969
970 IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
971 init_voltage,
972 voltage, voltage_compensation);
973
974 /* get current temperature (Celsius) */
975 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
976 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
977 current_temp = KELVIN_TO_CELSIUS(current_temp);
978
979 /* select thermal txpower adjustment params, based on channel group
980 * (same frequency group used for mimo txatten adjustment) */
981 degrees_per_05db_num =
982 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
983 degrees_per_05db_denom =
984 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
985
986 /* get per-chain txpower values from factory measurements */
987 for (c = 0; c < 2; c++) {
988 measurement = &ch_eeprom_info.measurements[c][1];
989
990 /* txgain adjustment (in half-dB steps) based on difference
991 * between factory and current temperature */
992 factory_temp = measurement->temperature;
993 iwl4965_math_div_round((current_temp - factory_temp) *
994 degrees_per_05db_denom,
995 degrees_per_05db_num,
996 &temperature_comp[c]);
997
998 factory_gain_index[c] = measurement->gain_idx;
999 factory_actual_pwr[c] = measurement->actual_pow;
1000
1001 IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
1002 IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
1003 "curr tmp %d, comp %d steps\n",
1004 factory_temp, current_temp,
1005 temperature_comp[c]);
1006
1007 IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
1008 factory_gain_index[c],
1009 factory_actual_pwr[c]);
1010 }
1011
1012 /* for each of 33 bit-rates (including 1 for CCK) */
1013 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1014 u8 is_mimo_rate;
1015 union iwl4965_tx_power_dual_stream tx_power;
1016
1017 /* for mimo, reduce each chain's txpower by half
1018 * (3dB, 6 steps), so total output power is regulatory
1019 * compliant. */
1020 if (i & 0x8) {
1021 current_regulatory = reg_limit -
1022 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1023 is_mimo_rate = 1;
1024 } else {
1025 current_regulatory = reg_limit;
1026 is_mimo_rate = 0;
1027 }
1028
1029 /* find txpower limit, either hardware or regulatory */
1030 power_limit = saturation_power - back_off_table[i];
1031 if (power_limit > current_regulatory)
1032 power_limit = current_regulatory;
1033
1034 /* reduce user's txpower request if necessary
1035 * for this rate on this channel */
1036 target_power = user_target_power;
1037 if (target_power > power_limit)
1038 target_power = power_limit;
1039
1040 IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
1041 i, saturation_power - back_off_table[i],
1042 current_regulatory, user_target_power,
1043 target_power);
1044
1045 /* for each of 2 Tx chains (radio transmitters) */
1046 for (c = 0; c < 2; c++) {
1047 s32 atten_value;
1048
1049 if (is_mimo_rate)
1050 atten_value =
1051 (s32)le32_to_cpu(priv->card_alive_init.
1052 tx_atten[txatten_grp][c]);
1053 else
1054 atten_value = 0;
1055
1056 /* calculate index; higher index means lower txpower */
1057 power_index = (u8) (factory_gain_index[c] -
1058 (target_power -
1059 factory_actual_pwr[c]) -
1060 temperature_comp[c] -
1061 voltage_compensation +
1062 atten_value);
1063
1064/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
1065 power_index); */
1066
1067 if (power_index < get_min_power_index(i, band))
1068 power_index = get_min_power_index(i, band);
1069
1070 /* adjust 5 GHz index to support negative indexes */
1071 if (!band)
1072 power_index += 9;
1073
1074 /* CCK, rate 32, reduce txpower for CCK */
1075 if (i == POWER_TABLE_CCK_ENTRY)
1076 power_index +=
1077 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1078
1079 /* stay within the table! */
1080 if (power_index > 107) {
1081 IWL_WARN(priv, "txpower index %d > 107\n",
1082 power_index);
1083 power_index = 107;
1084 }
1085 if (power_index < 0) {
1086 IWL_WARN(priv, "txpower index %d < 0\n",
1087 power_index);
1088 power_index = 0;
1089 }
1090
1091 /* fill txpower command for this rate/chain */
1092 tx_power.s.radio_tx_gain[c] =
1093 gain_table[band][power_index].radio;
1094 tx_power.s.dsp_predis_atten[c] =
1095 gain_table[band][power_index].dsp;
1096
1097 IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
1098 "gain 0x%02x dsp %d\n",
1099 c, atten_value, power_index,
1100 tx_power.s.radio_tx_gain[c],
1101 tx_power.s.dsp_predis_atten[c]);
1102 } /* for each chain */
1103
1104 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1105
1106 } /* for each rate */
1107
1108 return 0;
1109}
1110
1111/**
1112 * iwl4965_send_tx_power - Configure the TXPOWER level user limit
1113 *
1114 * Uses the active RXON for channel, band, and characteristics (ht40, high)
1115 * The power limit is taken from priv->tx_power_user_lmt.
1116 */
1117static int iwl4965_send_tx_power(struct iwl_priv *priv)
1118{
1119 struct iwl4965_txpowertable_cmd cmd = { 0 };
1120 int ret;
1121 u8 band = 0;
1122 bool is_ht40 = false;
1123 u8 ctrl_chan_high = 0;
1124 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1125
1126 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1127 "TX Power requested while scanning!\n"))
1128 return -EAGAIN;
1129
1130 band = priv->band == IEEE80211_BAND_2GHZ;
1131
1132 is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
1133
1134 if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1135 ctrl_chan_high = 1;
1136
1137 cmd.band = band;
1138 cmd.channel = ctx->active.channel;
1139
1140 ret = iwl4965_fill_txpower_tbl(priv, band,
1141 le16_to_cpu(ctx->active.channel),
1142 is_ht40, ctrl_chan_high, &cmd.tx_power);
1143 if (ret)
1144 goto out;
1145
1146 ret = iwl_legacy_send_cmd_pdu(priv,
1147 REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
1148
1149out:
1150 return ret;
1151}
1152
1153static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1154 struct iwl_rxon_context *ctx)
1155{
1156 int ret = 0;
1157 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1158 const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
1159 const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
1160
1161 if ((rxon1->flags == rxon2->flags) &&
1162 (rxon1->filter_flags == rxon2->filter_flags) &&
1163 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1164 (rxon1->ofdm_ht_single_stream_basic_rates ==
1165 rxon2->ofdm_ht_single_stream_basic_rates) &&
1166 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1167 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1168 (rxon1->rx_chain == rxon2->rx_chain) &&
1169 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1170 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1171 return 0;
1172 }
1173
1174 rxon_assoc.flags = ctx->staging.flags;
1175 rxon_assoc.filter_flags = ctx->staging.filter_flags;
1176 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
1177 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
1178 rxon_assoc.reserved = 0;
1179 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1180 ctx->staging.ofdm_ht_single_stream_basic_rates;
1181 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1182 ctx->staging.ofdm_ht_dual_stream_basic_rates;
1183 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
1184
1185 ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1186 sizeof(rxon_assoc), &rxon_assoc, NULL);
1187
1188 return ret;
1189}
1190
1191static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1192{
1193 /* cast away the const for active_rxon in this function */
1194 struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
1195 int ret;
1196 bool new_assoc =
1197 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1198
1199 if (!iwl_legacy_is_alive(priv))
1200 return -EBUSY;
1201
1202 if (!ctx->is_active)
1203 return 0;
1204
1205 /* always get timestamp with Rx frame */
1206 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1207
1208 ret = iwl_legacy_check_rxon_cmd(priv, ctx);
1209 if (ret) {
1210 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1211 return -EINVAL;
1212 }
1213
1214 /*
1215 * receive commit_rxon request
1216 * abort any previous channel switch if still in process
1217 */
1218 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1219 (priv->switch_channel != ctx->staging.channel)) {
1220 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1221 le16_to_cpu(priv->switch_channel));
1222 iwl_legacy_chswitch_done(priv, false);
1223 }
1224
1225 /* If we don't need to send a full RXON, we can use
1226 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1227 * and other flags for the current radio configuration. */
1228 if (!iwl_legacy_full_rxon_required(priv, ctx)) {
1229 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
1230 if (ret) {
1231 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1232 return ret;
1233 }
1234
1235 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1236 iwl_legacy_print_rx_config_cmd(priv, ctx);
1237 /*
1238 * We do not commit tx power settings while channel changing,
1239 * do it now if tx power changed.
1240 */
1241 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
1242 return 0;
1243 }
1244
1245 /* If we are currently associated and the new config requires
1246 * an RXON_ASSOC and the new config wants the associated mask enabled,
1247 * we must clear the associated from the active configuration
1248 * before we apply the new config */
1249 if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
1250 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1251 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1252
1253 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1254 sizeof(struct iwl_legacy_rxon_cmd),
1255 active_rxon);
1256
1257 /* If the mask clearing failed then we set
1258 * active_rxon back to what it was previously */
1259 if (ret) {
1260 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1261 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1262 return ret;
1263 }
1264 iwl_legacy_clear_ucode_stations(priv, ctx);
1265 iwl_legacy_restore_stations(priv, ctx);
1266 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1267 if (ret) {
1268 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1269 return ret;
1270 }
1271 }
1272
1273 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1274 "* with%s RXON_FILTER_ASSOC_MSK\n"
1275 "* channel = %d\n"
1276 "* bssid = %pM\n",
1277 (new_assoc ? "" : "out"),
1278 le16_to_cpu(ctx->staging.channel),
1279 ctx->staging.bssid_addr);
1280
1281 iwl_legacy_set_rxon_hwcrypto(priv, ctx,
1282 !priv->cfg->mod_params->sw_crypto);
1283
1284 /* Apply the new configuration
1285 * RXON unassoc clears the station table in uCode so restoration of
1286 * stations is needed after it (the RXON command) completes
1287 */
1288 if (!new_assoc) {
1289 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1290 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1291 if (ret) {
1292 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1293 return ret;
1294 }
1295 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1296 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1297 iwl_legacy_clear_ucode_stations(priv, ctx);
1298 iwl_legacy_restore_stations(priv, ctx);
1299 ret = iwl4965_restore_default_wep_keys(priv, ctx);
1300 if (ret) {
1301 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1302 return ret;
1303 }
1304 }
1305 if (new_assoc) {
1306 priv->start_calib = 0;
1307 /* Apply the new configuration
1308 * RXON assoc doesn't clear the station table in uCode,
1309 */
1310 ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
1311 sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
1312 if (ret) {
1313 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1314 return ret;
1315 }
1316 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1317 }
1318 iwl_legacy_print_rx_config_cmd(priv, ctx);
1319
1320 iwl4965_init_sensitivity(priv);
1321
1322 /* If we issue a new RXON command which required a tune then we must
1323 * send a new TXPOWER command or we won't be able to Tx any frames */
1324 ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
1325 if (ret) {
1326 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1327 return ret;
1328 }
1329
1330 return 0;
1331}
1332
1333static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1334 struct ieee80211_channel_switch *ch_switch)
1335{
1336 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1337 int rc;
1338 u8 band = 0;
1339 bool is_ht40 = false;
1340 u8 ctrl_chan_high = 0;
1341 struct iwl4965_channel_switch_cmd cmd;
1342 const struct iwl_channel_info *ch_info;
1343 u32 switch_time_in_usec, ucode_switch_time;
1344 u16 ch;
1345 u32 tsf_low;
1346 u8 switch_count;
1347 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
1348 struct ieee80211_vif *vif = ctx->vif;
1349 band = priv->band == IEEE80211_BAND_2GHZ;
1350
1351 is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
1352
1353 if (is_ht40 &&
1354 (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1355 ctrl_chan_high = 1;
1356
1357 cmd.band = band;
1358 cmd.expect_beacon = 0;
1359 ch = ch_switch->channel->hw_value;
1360 cmd.channel = cpu_to_le16(ch);
1361 cmd.rxon_flags = ctx->staging.flags;
1362 cmd.rxon_filter_flags = ctx->staging.filter_flags;
1363 switch_count = ch_switch->count;
1364 tsf_low = ch_switch->timestamp & 0x0ffffffff;
1365 /*
1366 * calculate the ucode channel switch time
1367 * adding TSF as one of the factor for when to switch
1368 */
1369 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
1370 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
1371 beacon_interval)) {
1372 switch_count -= (priv->ucode_beacon_time -
1373 tsf_low) / beacon_interval;
1374 } else
1375 switch_count = 0;
1376 }
1377 if (switch_count <= 1)
1378 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1379 else {
1380 switch_time_in_usec =
1381 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
1382 ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
1383 switch_time_in_usec,
1384 beacon_interval);
1385 cmd.switch_time = iwl_legacy_add_beacon_time(priv,
1386 priv->ucode_beacon_time,
1387 ucode_switch_time,
1388 beacon_interval);
1389 }
1390 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
1391 cmd.switch_time);
1392 ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
1393 if (ch_info)
1394 cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
1395 else {
1396 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1397 ctx->active.channel, ch);
1398 return -EFAULT;
1399 }
1400
1401 rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
1402 ctrl_chan_high, &cmd.tx_power);
1403 if (rc) {
1404 IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
1405 return rc;
1406 }
1407
1408 return iwl_legacy_send_cmd_pdu(priv,
1409 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1410}
1411
1412/**
1413 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1414 */
1415static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1416 struct iwl_tx_queue *txq,
1417 u16 byte_cnt)
1418{
1419 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
1420 int txq_id = txq->q.id;
1421 int write_ptr = txq->q.write_ptr;
1422 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1423 __le16 bc_ent;
1424
1425 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1426
1427 bc_ent = cpu_to_le16(len & 0xFFF);
1428 /* Set up byte count within first 256 entries */
1429 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1430
1431 /* If within first 64 entries, duplicate at end */
1432 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1433 scd_bc_tbl[txq_id].
1434 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
1435}
1436
1437/**
1438 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1439 * @statistics: Provides the temperature reading from the uCode
1440 *
1441 * A return of <0 indicates bogus data in the statistics
1442 */
1443static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1444{
1445 s32 temperature;
1446 s32 vt;
1447 s32 R1, R2, R3;
1448 u32 R4;
1449
1450 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
1451 (priv->_4965.statistics.flag &
1452 STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
1453 IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
1454 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
1455 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
1456 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
1457 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
1458 } else {
1459 IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
1460 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
1461 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
1462 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
1463 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
1464 }
1465
1466 /*
1467 * Temperature is only 23 bits, so sign extend out to 32.
1468 *
1469 * NOTE If we haven't received a statistics notification yet
1470 * with an updated temperature, use R4 provided to us in the
1471 * "initialize" ALIVE response.
1472 */
1473 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1474 vt = sign_extend32(R4, 23);
1475 else
1476 vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
1477 general.common.temperature), 23);
1478
1479 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
1480
1481 if (R3 == R1) {
1482 IWL_ERR(priv, "Calibration conflict R1 == R3\n");
1483 return -1;
1484 }
1485
1486 /* Calculate temperature in degrees Kelvin, adjust by 97%.
1487 * Add offset to center the adjustment around 0 degrees Centigrade. */
1488 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
1489 temperature /= (R3 - R1);
1490 temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
1491
1492 IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
1493 temperature, KELVIN_TO_CELSIUS(temperature));
1494
1495 return temperature;
1496}
1497
1498/* Adjust Txpower only if temperature variance is greater than threshold. */
1499#define IWL_TEMPERATURE_THRESHOLD 3
1500
1501/**
1502 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
1503 *
1504 * If the temperature changed has changed sufficiently, then a recalibration
1505 * is needed.
1506 *
1507 * Assumes caller will replace priv->last_temperature once calibration
1508 * executed.
1509 */
1510static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1511{
1512 int temp_diff;
1513
1514 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
1515 IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
1516 return 0;
1517 }
1518
1519 temp_diff = priv->temperature - priv->last_temperature;
1520
1521 /* get absolute value */
1522 if (temp_diff < 0) {
1523 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1524 temp_diff = -temp_diff;
1525 } else if (temp_diff == 0)
1526 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1527 else
1528 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1529
1530 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1531 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1532 return 0;
1533 }
1534
1535 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1536
1537 return 1;
1538}
1539
1540static void iwl4965_temperature_calib(struct iwl_priv *priv)
1541{
1542 s32 temp;
1543
1544 temp = iwl4965_hw_get_temperature(priv);
1545 if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
1546 return;
1547
1548 if (priv->temperature != temp) {
1549 if (priv->temperature)
1550 IWL_DEBUG_TEMP(priv, "Temperature changed "
1551 "from %dC to %dC\n",
1552 KELVIN_TO_CELSIUS(priv->temperature),
1553 KELVIN_TO_CELSIUS(temp));
1554 else
1555 IWL_DEBUG_TEMP(priv, "Temperature "
1556 "initialized to %dC\n",
1557 KELVIN_TO_CELSIUS(temp));
1558 }
1559
1560 priv->temperature = temp;
1561 set_bit(STATUS_TEMPERATURE, &priv->status);
1562
1563 if (!priv->disable_tx_power_cal &&
1564 unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
1565 iwl4965_is_temp_calib_needed(priv))
1566 queue_work(priv->workqueue, &priv->txpower_work);
1567}
1568
1569static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
1570{
1571 switch (cmd_id) {
1572 case REPLY_RXON:
1573 return (u16) sizeof(struct iwl4965_rxon_cmd);
1574 default:
1575 return len;
1576 }
1577}
1578
1579static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
1580 u8 *data)
1581{
1582 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
1583 addsta->mode = cmd->mode;
1584 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
1585 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
1586 addsta->station_flags = cmd->station_flags;
1587 addsta->station_flags_msk = cmd->station_flags_msk;
1588 addsta->tid_disable_tx = cmd->tid_disable_tx;
1589 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1590 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1591 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1592 addsta->sleep_tx_count = cmd->sleep_tx_count;
1593 addsta->reserved1 = cpu_to_le16(0);
1594 addsta->reserved2 = cpu_to_le16(0);
1595
1596 return (u16)sizeof(struct iwl4965_addsta_cmd);
1597}
1598
1599static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
1600{
1601 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
1602}
1603
1604/**
1605 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
1606 */
1607static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1608 struct iwl_ht_agg *agg,
1609 struct iwl4965_tx_resp *tx_resp,
1610 int txq_id, u16 start_idx)
1611{
1612 u16 status;
1613 struct agg_tx_status *frame_status = tx_resp->u.agg_status;
1614 struct ieee80211_tx_info *info = NULL;
1615 struct ieee80211_hdr *hdr = NULL;
1616 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
1617 int i, sh, idx;
1618 u16 seq;
1619 if (agg->wait_for_ba)
1620 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
1621
1622 agg->frame_count = tx_resp->frame_count;
1623 agg->start_idx = start_idx;
1624 agg->rate_n_flags = rate_n_flags;
1625 agg->bitmap = 0;
1626
1627 /* num frames attempted by Tx command */
1628 if (agg->frame_count == 1) {
1629 /* Only one frame was attempted; no block-ack will arrive */
1630 status = le16_to_cpu(frame_status[0].status);
1631 idx = start_idx;
1632
1633 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
1634 agg->frame_count, agg->start_idx, idx);
1635
1636 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
1637 info->status.rates[0].count = tx_resp->failure_frame + 1;
1638 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1639 info->flags |= iwl4965_tx_status_to_mac80211(status);
1640 iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
1641
1642 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1643 status & 0xff, tx_resp->failure_frame);
1644 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1645
1646 agg->wait_for_ba = 0;
1647 } else {
1648 /* Two or more frames were attempted; expect block-ack */
1649 u64 bitmap = 0;
1650 int start = agg->start_idx;
1651
1652 /* Construct bit-map of pending frames within Tx window */
1653 for (i = 0; i < agg->frame_count; i++) {
1654 u16 sc;
1655 status = le16_to_cpu(frame_status[i].status);
1656 seq = le16_to_cpu(frame_status[i].sequence);
1657 idx = SEQ_TO_INDEX(seq);
1658 txq_id = SEQ_TO_QUEUE(seq);
1659
1660 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1661 AGG_TX_STATE_ABORT_MSK))
1662 continue;
1663
1664 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1665 agg->frame_count, txq_id, idx);
1666
1667 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
1668 if (!hdr) {
1669 IWL_ERR(priv,
1670 "BUG_ON idx doesn't point to valid skb"
1671 " idx=%d, txq_id=%d\n", idx, txq_id);
1672 return -1;
1673 }
1674
1675 sc = le16_to_cpu(hdr->seq_ctrl);
1676 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1677 IWL_ERR(priv,
1678 "BUG_ON idx doesn't match seq control"
1679 " idx=%d, seq_idx=%d, seq=%d\n",
1680 idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
1681 return -1;
1682 }
1683
1684 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1685 i, idx, SEQ_TO_SN(sc));
1686
1687 sh = idx - start;
1688 if (sh > 64) {
1689 sh = (start - idx) + 0xff;
1690 bitmap = bitmap << sh;
1691 sh = 0;
1692 start = idx;
1693 } else if (sh < -64)
1694 sh = 0xff - (start - idx);
1695 else if (sh < 0) {
1696 sh = start - idx;
1697 start = idx;
1698 bitmap = bitmap << sh;
1699 sh = 0;
1700 }
1701 bitmap |= 1ULL << sh;
1702 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1703 start, (unsigned long long)bitmap);
1704 }
1705
1706 agg->bitmap = bitmap;
1707 agg->start_idx = start;
1708 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1709 agg->frame_count, agg->start_idx,
1710 (unsigned long long)agg->bitmap);
1711
1712 if (bitmap)
1713 agg->wait_for_ba = 1;
1714 }
1715 return 0;
1716}
1717
1718static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
1719{
1720 int i;
1721 int start = 0;
1722 int ret = IWL_INVALID_STATION;
1723 unsigned long flags;
1724
1725 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
1726 start = IWL_STA_ID;
1727
1728 if (is_broadcast_ether_addr(addr))
1729 return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
1730
1731 spin_lock_irqsave(&priv->sta_lock, flags);
1732 for (i = start; i < priv->hw_params.max_stations; i++)
1733 if (priv->stations[i].used &&
1734 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
1735 addr))) {
1736 ret = i;
1737 goto out;
1738 }
1739
1740 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
1741 addr, priv->num_stations);
1742
1743 out:
1744 /*
1745 * It may be possible that more commands interacting with stations
1746 * arrive before we completed processing the adding of
1747 * station
1748 */
1749 if (ret != IWL_INVALID_STATION &&
1750 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
1751 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
1752 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
1753 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
1754 ret);
1755 ret = IWL_INVALID_STATION;
1756 }
1757 spin_unlock_irqrestore(&priv->sta_lock, flags);
1758 return ret;
1759}
1760
1761static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1762{
1763 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
1764 return IWL_AP_ID;
1765 } else {
1766 u8 *da = ieee80211_get_DA(hdr);
1767 return iwl4965_find_station(priv, da);
1768 }
1769}
1770
1771/**
1772 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
1773 */
1774static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
1775 struct iwl_rx_mem_buffer *rxb)
1776{
1777 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1778 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1779 int txq_id = SEQ_TO_QUEUE(sequence);
1780 int index = SEQ_TO_INDEX(sequence);
1781 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1782 struct ieee80211_hdr *hdr;
1783 struct ieee80211_tx_info *info;
1784 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1785 u32 status = le32_to_cpu(tx_resp->u.status);
1786 int uninitialized_var(tid);
1787 int sta_id;
1788 int freed;
1789 u8 *qc = NULL;
1790 unsigned long flags;
1791
1792 if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
1793 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1794 "is out of range [0-%d] %d %d\n", txq_id,
1795 index, txq->q.n_bd, txq->q.write_ptr,
1796 txq->q.read_ptr);
1797 return;
1798 }
1799
1800 txq->time_stamp = jiffies;
1801 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
1802 memset(&info->status, 0, sizeof(info->status));
1803
1804 hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
1805 if (ieee80211_is_data_qos(hdr->frame_control)) {
1806 qc = ieee80211_get_qos_ctl(hdr);
1807 tid = qc[0] & 0xf;
1808 }
1809
1810 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
1811 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1812 IWL_ERR(priv, "Station not known\n");
1813 return;
1814 }
1815
1816 spin_lock_irqsave(&priv->sta_lock, flags);
1817 if (txq->sched_retry) {
1818 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
1819 struct iwl_ht_agg *agg = NULL;
1820 WARN_ON(!qc);
1821
1822 agg = &priv->stations[sta_id].tid[tid].agg;
1823
1824 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1825
1826 /* check if BAR is needed */
1827 if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
1828 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1829
1830 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1831 index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
1832 txq->q.n_bd);
1833 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
1834 "%d index %d\n", scd_ssn , index);
1835 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1836 if (qc)
1837 iwl4965_free_tfds_in_queue(priv, sta_id,
1838 tid, freed);
1839
1840 if (priv->mac80211_registered &&
1841 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
1842 && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1843 iwl_legacy_wake_queue(priv, txq);
1844 }
1845 } else {
1846 info->status.rates[0].count = tx_resp->failure_frame + 1;
1847 info->flags |= iwl4965_tx_status_to_mac80211(status);
1848 iwl4965_hwrate_to_tx_control(priv,
1849 le32_to_cpu(tx_resp->rate_n_flags),
1850 info);
1851
1852 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
1853 "rate_n_flags 0x%x retries %d\n",
1854 txq_id,
1855 iwl4965_get_tx_fail_reason(status), status,
1856 le32_to_cpu(tx_resp->rate_n_flags),
1857 tx_resp->failure_frame);
1858
1859 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
1860 if (qc && likely(sta_id != IWL_INVALID_STATION))
1861 iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
1862 else if (sta_id == IWL_INVALID_STATION)
1863 IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
1864
1865 if (priv->mac80211_registered &&
1866 (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
1867 iwl_legacy_wake_queue(priv, txq);
1868 }
1869 if (qc && likely(sta_id != IWL_INVALID_STATION))
1870 iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
1871
1872 iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
1873
1874 spin_unlock_irqrestore(&priv->sta_lock, flags);
1875}
1876
1877static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1878 struct iwl_rx_mem_buffer *rxb)
1879{
1880 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1881 struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
1882 u8 rate __maybe_unused =
1883 iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
1884
1885 IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
1886 "tsf:0x%.8x%.8x rate:%d\n",
1887 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
1888 beacon->beacon_notify_hdr.failure_frame,
1889 le32_to_cpu(beacon->ibss_mgr_status),
1890 le32_to_cpu(beacon->high_tsf),
1891 le32_to_cpu(beacon->low_tsf), rate);
1892
1893 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
1894}
1895
1896/* Set up 4965-specific Rx frame reply handlers */
1897static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
1898{
1899 /* Legacy Rx frames */
1900 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
1901 /* Tx response */
1902 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
1903 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
1904}
1905
1906static struct iwl_hcmd_ops iwl4965_hcmd = {
1907 .rxon_assoc = iwl4965_send_rxon_assoc,
1908 .commit_rxon = iwl4965_commit_rxon,
1909 .set_rxon_chain = iwl4965_set_rxon_chain,
1910};
1911
1912static void iwl4965_post_scan(struct iwl_priv *priv)
1913{
1914 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1915
1916 /*
1917 * Since setting the RXON may have been deferred while
1918 * performing the scan, fire one off if needed
1919 */
1920 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1921 iwl_legacy_commit_rxon(priv, ctx);
1922}
1923
1924static void iwl4965_post_associate(struct iwl_priv *priv)
1925{
1926 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1927 struct ieee80211_vif *vif = ctx->vif;
1928 struct ieee80211_conf *conf = NULL;
1929 int ret = 0;
1930
1931 if (!vif || !priv->is_open)
1932 return;
1933
1934 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1935 return;
1936
1937 iwl_legacy_scan_cancel_timeout(priv, 200);
1938
1939 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
1940
1941 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1942 iwl_legacy_commit_rxon(priv, ctx);
1943
1944 ret = iwl_legacy_send_rxon_timing(priv, ctx);
1945 if (ret)
1946 IWL_WARN(priv, "RXON timing - "
1947 "Attempting to continue.\n");
1948
1949 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1950
1951 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
1952
1953 if (priv->cfg->ops->hcmd->set_rxon_chain)
1954 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1955
1956 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
1957
1958 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
1959 vif->bss_conf.aid, vif->bss_conf.beacon_int);
1960
1961 if (vif->bss_conf.use_short_preamble)
1962 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1963 else
1964 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1965
1966 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
1967 if (vif->bss_conf.use_short_slot)
1968 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
1969 else
1970 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1971 }
1972
1973 iwl_legacy_commit_rxon(priv, ctx);
1974
1975 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
1976 vif->bss_conf.aid, ctx->active.bssid_addr);
1977
1978 switch (vif->type) {
1979 case NL80211_IFTYPE_STATION:
1980 break;
1981 case NL80211_IFTYPE_ADHOC:
1982 iwl4965_send_beacon_cmd(priv);
1983 break;
1984 default:
1985 IWL_ERR(priv, "%s Should not be called in %d mode\n",
1986 __func__, vif->type);
1987 break;
1988 }
1989
1990 /* the chain noise calibration will enabled PM upon completion
1991 * If chain noise has already been run, then we need to enable
1992 * power management here */
1993 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
1994 iwl_legacy_power_update_mode(priv, false);
1995
1996 /* Enable Rx differential gain and sensitivity calibrations */
1997 iwl4965_chain_noise_reset(priv);
1998 priv->start_calib = 1;
1999}
2000
2001static void iwl4965_config_ap(struct iwl_priv *priv)
2002{
2003 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2004 struct ieee80211_vif *vif = ctx->vif;
2005 int ret = 0;
2006
2007 lockdep_assert_held(&priv->mutex);
2008
2009 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2010 return;
2011
2012 /* The following should be done only at AP bring up */
2013 if (!iwl_legacy_is_associated_ctx(ctx)) {
2014
2015 /* RXON - unassoc (to set timing command) */
2016 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2017 iwl_legacy_commit_rxon(priv, ctx);
2018
2019 /* RXON Timing */
2020 ret = iwl_legacy_send_rxon_timing(priv, ctx);
2021 if (ret)
2022 IWL_WARN(priv, "RXON timing failed - "
2023 "Attempting to continue.\n");
2024
2025 /* AP has all antennas */
2026 priv->chain_noise_data.active_chains =
2027 priv->hw_params.valid_rx_ant;
2028 iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
2029 if (priv->cfg->ops->hcmd->set_rxon_chain)
2030 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2031
2032 ctx->staging.assoc_id = 0;
2033
2034 if (vif->bss_conf.use_short_preamble)
2035 ctx->staging.flags |=
2036 RXON_FLG_SHORT_PREAMBLE_MSK;
2037 else
2038 ctx->staging.flags &=
2039 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2040
2041 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2042 if (vif->bss_conf.use_short_slot)
2043 ctx->staging.flags |=
2044 RXON_FLG_SHORT_SLOT_MSK;
2045 else
2046 ctx->staging.flags &=
2047 ~RXON_FLG_SHORT_SLOT_MSK;
2048 }
2049 /* need to send beacon cmd before committing assoc RXON! */
2050 iwl4965_send_beacon_cmd(priv);
2051 /* restore RXON assoc */
2052 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2053 iwl_legacy_commit_rxon(priv, ctx);
2054 }
2055 iwl4965_send_beacon_cmd(priv);
2056}
2057
2058static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2059 .get_hcmd_size = iwl4965_get_hcmd_size,
2060 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2061 .request_scan = iwl4965_request_scan,
2062 .post_scan = iwl4965_post_scan,
2063};
2064
2065static struct iwl_lib_ops iwl4965_lib = {
2066 .set_hw_params = iwl4965_hw_set_hw_params,
2067 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2068 .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
2069 .txq_free_tfd = iwl4965_hw_txq_free_tfd,
2070 .txq_init = iwl4965_hw_tx_queue_init,
2071 .rx_handler_setup = iwl4965_rx_handler_setup,
2072 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
2073 .init_alive_start = iwl4965_init_alive_start,
2074 .load_ucode = iwl4965_load_bsm,
2075 .dump_nic_error_log = iwl4965_dump_nic_error_log,
2076 .dump_fh = iwl4965_dump_fh,
2077 .set_channel_switch = iwl4965_hw_channel_switch,
2078 .apm_ops = {
2079 .init = iwl_legacy_apm_init,
2080 .config = iwl4965_nic_config,
2081 },
2082 .eeprom_ops = {
2083 .regulatory_bands = {
2084 EEPROM_REGULATORY_BAND_1_CHANNELS,
2085 EEPROM_REGULATORY_BAND_2_CHANNELS,
2086 EEPROM_REGULATORY_BAND_3_CHANNELS,
2087 EEPROM_REGULATORY_BAND_4_CHANNELS,
2088 EEPROM_REGULATORY_BAND_5_CHANNELS,
2089 EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
2090 EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
2091 },
2092 .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
2093 .release_semaphore = iwl4965_eeprom_release_semaphore,
2094 },
2095 .send_tx_power = iwl4965_send_tx_power,
2096 .update_chain_flags = iwl4965_update_chain_flags,
2097 .temp_ops = {
2098 .temperature = iwl4965_temperature_calib,
2099 },
2100 .debugfs_ops = {
2101 .rx_stats_read = iwl4965_ucode_rx_stats_read,
2102 .tx_stats_read = iwl4965_ucode_tx_stats_read,
2103 .general_stats_read = iwl4965_ucode_general_stats_read,
2104 },
2105};
2106
2107static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2108 .post_associate = iwl4965_post_associate,
2109 .config_ap = iwl4965_config_ap,
2110 .manage_ibss_station = iwl4965_manage_ibss_station,
2111 .update_bcast_stations = iwl4965_update_bcast_stations,
2112};
2113
2114struct ieee80211_ops iwl4965_hw_ops = {
2115 .tx = iwl4965_mac_tx,
2116 .start = iwl4965_mac_start,
2117 .stop = iwl4965_mac_stop,
2118 .add_interface = iwl_legacy_mac_add_interface,
2119 .remove_interface = iwl_legacy_mac_remove_interface,
2120 .change_interface = iwl_legacy_mac_change_interface,
2121 .config = iwl_legacy_mac_config,
2122 .configure_filter = iwl4965_configure_filter,
2123 .set_key = iwl4965_mac_set_key,
2124 .update_tkip_key = iwl4965_mac_update_tkip_key,
2125 .conf_tx = iwl_legacy_mac_conf_tx,
2126 .reset_tsf = iwl_legacy_mac_reset_tsf,
2127 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2128 .ampdu_action = iwl4965_mac_ampdu_action,
2129 .hw_scan = iwl_legacy_mac_hw_scan,
2130 .sta_add = iwl4965_mac_sta_add,
2131 .sta_remove = iwl_legacy_mac_sta_remove,
2132 .channel_switch = iwl4965_mac_channel_switch,
2133 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
2134};
2135
2136static const struct iwl_ops iwl4965_ops = {
2137 .lib = &iwl4965_lib,
2138 .hcmd = &iwl4965_hcmd,
2139 .utils = &iwl4965_hcmd_utils,
2140 .led = &iwl4965_led_ops,
2141 .legacy = &iwl4965_legacy_ops,
2142 .ieee80211_ops = &iwl4965_hw_ops,
2143};
2144
2145static struct iwl_base_params iwl4965_base_params = {
2146 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2147 .num_of_queues = IWL49_NUM_QUEUES,
2148 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2149 .pll_cfg_val = 0,
2150 .set_l0s = true,
2151 .use_bsm = true,
2152 .led_compensation = 61,
2153 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2154 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2155 .temperature_kelvin = true,
2156 .ucode_tracing = true,
2157 .sensitivity_calib_by_driver = true,
2158 .chain_noise_calib_by_driver = true,
2159};
2160
2161struct iwl_cfg iwl4965_cfg = {
2162 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2163 .fw_name_pre = IWL4965_FW_PRE,
2164 .ucode_api_max = IWL4965_UCODE_API_MAX,
2165 .ucode_api_min = IWL4965_UCODE_API_MIN,
2166 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2167 .valid_tx_ant = ANT_AB,
2168 .valid_rx_ant = ANT_ABC,
2169 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2170 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2171 .ops = &iwl4965_ops,
2172 .mod_params = &iwl4965_mod_params,
2173 .base_params = &iwl4965_base_params,
2174 .led_mode = IWL_LED_BLINK,
2175 /*
2176 * Force use of chains B and C for scan RX on 5 GHz band
2177 * because the device has off-channel reception on chain A.
2178 */
2179 .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2180};
2181
2182/* Module firmware */
2183MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163daf16..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_4965_h__
64#define __iwl_4965_h__
65
66#include "iwl-dev.h"
67
68/* configuration for the _4965 devices */
69extern struct iwl_cfg iwl4965_cfg;
70
71extern struct iwl_mod_params iwl4965_mod_params;
72
73extern struct ieee80211_ops iwl4965_hw_ops;
74
75/* tx queue */
76void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
77 int sta_id, int tid, int freed);
78
79/* RXON */
80void iwl4965_set_rxon_chain(struct iwl_priv *priv,
81 struct iwl_rxon_context *ctx);
82
83/* uCode */
84int iwl4965_verify_ucode(struct iwl_priv *priv);
85
86/* lib */
87void iwl4965_check_abort_status(struct iwl_priv *priv,
88 u8 frame_count, u32 status);
89
90void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
91int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
92int iwl4965_hw_nic_init(struct iwl_priv *priv);
93int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
94
95/* rx */
96void iwl4965_rx_queue_restock(struct iwl_priv *priv);
97void iwl4965_rx_replenish(struct iwl_priv *priv);
98void iwl4965_rx_replenish_now(struct iwl_priv *priv);
99void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
100int iwl4965_rxq_stop(struct iwl_priv *priv);
101int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
102void iwl4965_rx_reply_rx(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
105 struct iwl_rx_mem_buffer *rxb);
106void iwl4965_rx_handle(struct iwl_priv *priv);
107
108/* tx */
109void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
110int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq,
112 dma_addr_t addr, u16 len, u8 reset, u8 pad);
113int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
114 struct iwl_tx_queue *txq);
115void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
116 struct ieee80211_tx_info *info);
117int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
118int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
119 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
120int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
121 struct ieee80211_sta *sta, u16 tid);
122int iwl4965_txq_check_empty(struct iwl_priv *priv,
123 int sta_id, u8 tid, int txq_id);
124void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
125 struct iwl_rx_mem_buffer *rxb);
126int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
127void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
128int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
129void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
130void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
131void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
132
133/*
134 * Acquire priv->lock before calling this function !
135 */
136void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
137/**
138 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
139 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
140 * @scd_retry: (1) Indicates queue will be used in aggregation mode
141 *
142 * NOTE: Acquire priv->lock before calling this function !
143 */
144void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
145 struct iwl_tx_queue *txq,
146 int tx_fifo_id, int scd_retry);
147
148static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
149{
150 status &= TX_STATUS_MSK;
151
152 switch (status) {
153 case TX_STATUS_SUCCESS:
154 case TX_STATUS_DIRECT_DONE:
155 return IEEE80211_TX_STAT_ACK;
156 case TX_STATUS_FAIL_DEST_PS:
157 return IEEE80211_TX_STAT_TX_FILTERED;
158 default:
159 return 0;
160 }
161}
162
163static inline bool iwl4965_is_tx_success(u32 status)
164{
165 status &= TX_STATUS_MSK;
166 return (status == TX_STATUS_SUCCESS) ||
167 (status == TX_STATUS_DIRECT_DONE);
168}
169
170u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
171
172/* rx */
173void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb);
175bool iwl4965_good_plcp_health(struct iwl_priv *priv,
176 struct iwl_rx_packet *pkt);
177void iwl4965_rx_statistics(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb);
179void iwl4965_reply_statistics(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb);
181
182/* scan */
183int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
184
185/* station mgmt */
186int iwl4965_manage_ibss_station(struct iwl_priv *priv,
187 struct ieee80211_vif *vif, bool add);
188
189/* hcmd */
190int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
191
192#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
193const char *iwl4965_get_tx_fail_reason(u32 status);
194#else
195static inline const char *
196iwl4965_get_tx_fail_reason(u32 status) { return ""; }
197#endif
198
199/* station management */
200int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
201 struct iwl_rxon_context *ctx);
202int iwl4965_add_bssid_station(struct iwl_priv *priv,
203 struct iwl_rxon_context *ctx,
204 const u8 *addr, u8 *sta_id_r);
205int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
206 struct iwl_rxon_context *ctx,
207 struct ieee80211_key_conf *key);
208int iwl4965_set_default_wep_key(struct iwl_priv *priv,
209 struct iwl_rxon_context *ctx,
210 struct ieee80211_key_conf *key);
211int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
212 struct iwl_rxon_context *ctx);
213int iwl4965_set_dynamic_key(struct iwl_priv *priv,
214 struct iwl_rxon_context *ctx,
215 struct ieee80211_key_conf *key, u8 sta_id);
216int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
217 struct iwl_rxon_context *ctx,
218 struct ieee80211_key_conf *key, u8 sta_id);
219void iwl4965_update_tkip_key(struct iwl_priv *priv,
220 struct iwl_rxon_context *ctx,
221 struct ieee80211_key_conf *keyconf,
222 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
223int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
224 int sta_id, int tid);
225int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
226 int tid, u16 ssn);
227int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
228 int tid);
229void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
230 int sta_id, int cnt);
231int iwl4965_update_bcast_stations(struct iwl_priv *priv);
232
233/* rate */
234static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
235{
236 return BIT(ant_idx) << RATE_MCS_ANT_POS;
237}
238
239static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
240{
241 return le32_to_cpu(rate_n_flags) & 0xFF;
242}
243
244static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
245{
246 return cpu_to_le32(flags|(u32)rate);
247}
248
249/* eeprom */
250void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
251int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
252void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
253int iwl4965_eeprom_check_version(struct iwl_priv *priv);
254
255/* mac80211 handlers (for 4965) */
256void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
257int iwl4965_mac_start(struct ieee80211_hw *hw);
258void iwl4965_mac_stop(struct ieee80211_hw *hw);
259void iwl4965_configure_filter(struct ieee80211_hw *hw,
260 unsigned int changed_flags,
261 unsigned int *total_flags,
262 u64 multicast);
263int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
264 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
265 struct ieee80211_key_conf *key);
266void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
267 struct ieee80211_vif *vif,
268 struct ieee80211_key_conf *keyconf,
269 struct ieee80211_sta *sta,
270 u32 iv32, u16 *phase1key);
271int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
272 struct ieee80211_vif *vif,
273 enum ieee80211_ampdu_mlme_action action,
274 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
275 u8 buf_size);
276int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
277 struct ieee80211_vif *vif,
278 struct ieee80211_sta *sta);
279void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
280 struct ieee80211_channel_switch *ch_switch);
281
282#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659310d7..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-dev.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-power.h"
42#include "iwl-sta.h"
43#include "iwl-helpers.h"
44
45
46MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47MODULE_VERSION(IWLWIFI_VERSION);
48MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
49MODULE_LICENSE("GPL");
50
51/*
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
57 *
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
64 *
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
66 */
67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71u32 iwlegacy_debug_level;
72EXPORT_SYMBOL(iwlegacy_debug_level);
73
74const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75EXPORT_SYMBOL(iwlegacy_bcast_addr);
76
77
78/* This function both allocates and initializes hw and priv. */
79struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
80{
81 struct iwl_priv *priv;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw *hw;
85
86 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
87 cfg->ops->ieee80211_ops);
88 if (hw == NULL) {
89 pr_err("%s: Can not allocate network device\n",
90 cfg->name);
91 goto out;
92 }
93
94 priv = hw->priv;
95 priv->hw = hw;
96
97out:
98 return hw;
99}
100EXPORT_SYMBOL(iwl_legacy_alloc_all);
101
102#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
105 struct ieee80211_sta_ht_cap *ht_info,
106 enum ieee80211_band band)
107{
108 u16 max_bit_rate = 0;
109 u8 rx_chains_num = priv->hw_params.rx_chains_num;
110 u8 tx_chains_num = priv->hw_params.tx_chains_num;
111
112 ht_info->cap = 0;
113 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
114
115 ht_info->ht_supported = true;
116
117 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
118 max_bit_rate = MAX_BIT_RATE_20_MHZ;
119 if (priv->hw_params.ht40_channel & BIT(band)) {
120 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
121 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
122 ht_info->mcs.rx_mask[4] = 0x01;
123 max_bit_rate = MAX_BIT_RATE_40_MHZ;
124 }
125
126 if (priv->cfg->mod_params->amsdu_size_8K)
127 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
128
129 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
130 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
131
132 ht_info->mcs.rx_mask[0] = 0xFF;
133 if (rx_chains_num >= 2)
134 ht_info->mcs.rx_mask[1] = 0xFF;
135 if (rx_chains_num >= 3)
136 ht_info->mcs.rx_mask[2] = 0xFF;
137
138 /* Highest supported Rx data rate */
139 max_bit_rate *= rx_chains_num;
140 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
141 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
142
143 /* Tx MCS capabilities */
144 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
145 if (tx_chains_num != rx_chains_num) {
146 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
147 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
149 }
150}
151
152/**
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
154 */
155int iwl_legacy_init_geos(struct iwl_priv *priv)
156{
157 struct iwl_channel_info *ch;
158 struct ieee80211_supported_band *sband;
159 struct ieee80211_channel *channels;
160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates;
162 int i = 0;
163 s8 max_tx_power = 0;
164
165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
167 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
169 return 0;
170 }
171
172 channels = kzalloc(sizeof(struct ieee80211_channel) *
173 priv->channel_count, GFP_KERNEL);
174 if (!channels)
175 return -ENOMEM;
176
177 rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
178 GFP_KERNEL);
179 if (!rates) {
180 kfree(channels);
181 return -ENOMEM;
182 }
183
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband = &priv->bands[IEEE80211_BAND_5GHZ];
186 sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
187 /* just OFDM */
188 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
189 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
190
191 if (priv->cfg->sku & IWL_SKU_N)
192 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
193 IEEE80211_BAND_5GHZ);
194
195 sband = &priv->bands[IEEE80211_BAND_2GHZ];
196 sband->channels = channels;
197 /* OFDM & CCK */
198 sband->bitrates = rates;
199 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
200
201 if (priv->cfg->sku & IWL_SKU_N)
202 iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
203 IEEE80211_BAND_2GHZ);
204
205 priv->ieee_channels = channels;
206 priv->ieee_rates = rates;
207
208 for (i = 0; i < priv->channel_count; i++) {
209 ch = &priv->channel_info[i];
210
211 if (!iwl_legacy_is_channel_valid(ch))
212 continue;
213
214 sband = &priv->bands[ch->band];
215
216 geo_ch = &sband->channels[sband->n_channels++];
217
218 geo_ch->center_freq =
219 ieee80211_channel_to_frequency(ch->channel, ch->band);
220 geo_ch->max_power = ch->max_power_avg;
221 geo_ch->max_antenna_gain = 0xff;
222 geo_ch->hw_value = ch->channel;
223
224 if (iwl_legacy_is_channel_valid(ch)) {
225 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
226 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
227
228 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
229 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
230
231 if (ch->flags & EEPROM_CHANNEL_RADAR)
232 geo_ch->flags |= IEEE80211_CHAN_RADAR;
233
234 geo_ch->flags |= ch->ht40_extension_channel;
235
236 if (ch->max_power_avg > max_tx_power)
237 max_tx_power = ch->max_power_avg;
238 } else {
239 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
240 }
241
242 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
243 ch->channel, geo_ch->center_freq,
244 iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
245 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
246 "restricted" : "valid",
247 geo_ch->flags);
248 }
249
250 priv->tx_power_device_lmt = max_tx_power;
251 priv->tx_power_user_lmt = max_tx_power;
252 priv->tx_power_next = max_tx_power;
253
254 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
255 priv->cfg->sku & IWL_SKU_A) {
256 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
257 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
258 priv->pci_dev->device,
259 priv->pci_dev->subsystem_device);
260 priv->cfg->sku &= ~IWL_SKU_A;
261 }
262
263 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
264 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
265 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
266
267 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
268
269 return 0;
270}
271EXPORT_SYMBOL(iwl_legacy_init_geos);
272
273/*
274 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
275 */
276void iwl_legacy_free_geos(struct iwl_priv *priv)
277{
278 kfree(priv->ieee_channels);
279 kfree(priv->ieee_rates);
280 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
281}
282EXPORT_SYMBOL(iwl_legacy_free_geos);
283
284static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
285 enum ieee80211_band band,
286 u16 channel, u8 extension_chan_offset)
287{
288 const struct iwl_channel_info *ch_info;
289
290 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
291 if (!iwl_legacy_is_channel_valid(ch_info))
292 return false;
293
294 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
295 return !(ch_info->ht40_extension_channel &
296 IEEE80211_CHAN_NO_HT40PLUS);
297 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
298 return !(ch_info->ht40_extension_channel &
299 IEEE80211_CHAN_NO_HT40MINUS);
300
301 return false;
302}
303
304bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
305 struct iwl_rxon_context *ctx,
306 struct ieee80211_sta_ht_cap *ht_cap)
307{
308 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
309 return false;
310
311 /*
312 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
313 * the bit will not set if it is pure 40MHz case
314 */
315 if (ht_cap && !ht_cap->ht_supported)
316 return false;
317
318#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
319 if (priv->disable_ht40)
320 return false;
321#endif
322
323 return iwl_legacy_is_channel_extension(priv, priv->band,
324 le16_to_cpu(ctx->staging.channel),
325 ctx->ht.extension_chan_offset);
326}
327EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
328
329static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
330{
331 u16 new_val;
332 u16 beacon_factor;
333
334 /*
335 * If mac80211 hasn't given us a beacon interval, program
336 * the default into the device.
337 */
338 if (!beacon_val)
339 return DEFAULT_BEACON_INTERVAL;
340
341 /*
342 * If the beacon interval we obtained from the peer
343 * is too large, we'll have to wake up more often
344 * (and in IBSS case, we'll beacon too much)
345 *
346 * For example, if max_beacon_val is 4096, and the
347 * requested beacon interval is 7000, we'll have to
348 * use 3500 to be able to wake up on the beacons.
349 *
350 * This could badly influence beacon detection stats.
351 */
352
353 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
354 new_val = beacon_val / beacon_factor;
355
356 if (!new_val)
357 new_val = max_beacon_val;
358
359 return new_val;
360}
361
362int
363iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
364{
365 u64 tsf;
366 s32 interval_tm, rem;
367 struct ieee80211_conf *conf = NULL;
368 u16 beacon_int;
369 struct ieee80211_vif *vif = ctx->vif;
370
371 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
372
373 lockdep_assert_held(&priv->mutex);
374
375 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
376
377 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
378 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
379
380 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
381
382 /*
383 * TODO: For IBSS we need to get atim_window from mac80211,
384 * for now just always use 0
385 */
386 ctx->timing.atim_window = 0;
387
388 beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
389 priv->hw_params.max_beacon_itrvl * TIME_UNIT);
390 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
391
392 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
393 interval_tm = beacon_int * TIME_UNIT;
394 rem = do_div(tsf, interval_tm);
395 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
396
397 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
398
399 IWL_DEBUG_ASSOC(priv,
400 "beacon interval %d beacon timer %d beacon tim %d\n",
401 le16_to_cpu(ctx->timing.beacon_interval),
402 le32_to_cpu(ctx->timing.beacon_init_val),
403 le16_to_cpu(ctx->timing.atim_window));
404
405 return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
406 sizeof(ctx->timing), &ctx->timing);
407}
408EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
409
410void
411iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
412 struct iwl_rxon_context *ctx,
413 int hw_decrypt)
414{
415 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
416
417 if (hw_decrypt)
418 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
419 else
420 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
421
422}
423EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
424
425/* validate RXON structure is valid */
426int
427iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
428{
429 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
430 bool error = false;
431
432 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
433 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
434 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
435 error = true;
436 }
437 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
438 IWL_WARN(priv, "check 2.4G: wrong radar\n");
439 error = true;
440 }
441 } else {
442 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
443 IWL_WARN(priv, "check 5.2G: not short slot!\n");
444 error = true;
445 }
446 if (rxon->flags & RXON_FLG_CCK_MSK) {
447 IWL_WARN(priv, "check 5.2G: CCK!\n");
448 error = true;
449 }
450 }
451 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
452 IWL_WARN(priv, "mac/bssid mcast!\n");
453 error = true;
454 }
455
456 /* make sure basic rates 6Mbps and 1Mbps are supported */
457 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
458 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
459 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
460 error = true;
461 }
462
463 if (le16_to_cpu(rxon->assoc_id) > 2007) {
464 IWL_WARN(priv, "aid > 2007\n");
465 error = true;
466 }
467
468 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
469 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
470 IWL_WARN(priv, "CCK and short slot\n");
471 error = true;
472 }
473
474 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
475 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
476 IWL_WARN(priv, "CCK and auto detect");
477 error = true;
478 }
479
480 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
481 RXON_FLG_TGG_PROTECT_MSK)) ==
482 RXON_FLG_TGG_PROTECT_MSK) {
483 IWL_WARN(priv, "TGg but no auto-detect\n");
484 error = true;
485 }
486
487 if (error)
488 IWL_WARN(priv, "Tuning to channel %d\n",
489 le16_to_cpu(rxon->channel));
490
491 if (error) {
492 IWL_ERR(priv, "Invalid RXON\n");
493 return -EINVAL;
494 }
495 return 0;
496}
497EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
498
499/**
500 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
501 * @priv: staging_rxon is compared to active_rxon
502 *
503 * If the RXON structure is changing enough to require a new tune,
504 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
505 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
506 */
507int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
508 struct iwl_rxon_context *ctx)
509{
510 const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
511 const struct iwl_legacy_rxon_cmd *active = &ctx->active;
512
513#define CHK(cond) \
514 if ((cond)) { \
515 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
516 return 1; \
517 }
518
519#define CHK_NEQ(c1, c2) \
520 if ((c1) != (c2)) { \
521 IWL_DEBUG_INFO(priv, "need full RXON - " \
522 #c1 " != " #c2 " - %d != %d\n", \
523 (c1), (c2)); \
524 return 1; \
525 }
526
527 /* These items are only settable from the full RXON command */
528 CHK(!iwl_legacy_is_associated_ctx(ctx));
529 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
530 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
531 CHK(compare_ether_addr(staging->wlap_bssid_addr,
532 active->wlap_bssid_addr));
533 CHK_NEQ(staging->dev_type, active->dev_type);
534 CHK_NEQ(staging->channel, active->channel);
535 CHK_NEQ(staging->air_propagation, active->air_propagation);
536 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
537 active->ofdm_ht_single_stream_basic_rates);
538 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
539 active->ofdm_ht_dual_stream_basic_rates);
540 CHK_NEQ(staging->assoc_id, active->assoc_id);
541
542 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
543 * be updated with the RXON_ASSOC command -- however only some
544 * flag transitions are allowed using RXON_ASSOC */
545
546 /* Check if we are not switching bands */
547 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
548 active->flags & RXON_FLG_BAND_24G_MSK);
549
550 /* Check if we are switching association toggle */
551 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
552 active->filter_flags & RXON_FILTER_ASSOC_MSK);
553
554#undef CHK
555#undef CHK_NEQ
556
557 return 0;
558}
559EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
560
561u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
562 struct iwl_rxon_context *ctx)
563{
564 /*
565 * Assign the lowest rate -- should really get this from
566 * the beacon skb from mac80211.
567 */
568 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
569 return IWL_RATE_1M_PLCP;
570 else
571 return IWL_RATE_6M_PLCP;
572}
573EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
574
575static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
576 struct iwl_ht_config *ht_conf,
577 struct iwl_rxon_context *ctx)
578{
579 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
580
581 if (!ctx->ht.enabled) {
582 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
583 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
584 RXON_FLG_HT40_PROT_MSK |
585 RXON_FLG_HT_PROT_MSK);
586 return;
587 }
588
589 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
590 RXON_FLG_HT_OPERATING_MODE_POS);
591
592 /* Set up channel bandwidth:
593 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
594 /* clear the HT channel mode before set the mode */
595 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
596 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
597 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
598 /* pure ht40 */
599 if (ctx->ht.protection ==
600 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
601 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
602 /* Note: control channel is opposite of extension channel */
603 switch (ctx->ht.extension_chan_offset) {
604 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
605 rxon->flags &=
606 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
607 break;
608 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
609 rxon->flags |=
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
611 break;
612 }
613 } else {
614 /* Note: control channel is opposite of extension channel */
615 switch (ctx->ht.extension_chan_offset) {
616 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
617 rxon->flags &=
618 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
619 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
620 break;
621 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
622 rxon->flags |=
623 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
624 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
625 break;
626 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
627 default:
628 /* channel location only valid if in Mixed mode */
629 IWL_ERR(priv,
630 "invalid extension channel offset\n");
631 break;
632 }
633 }
634 } else {
635 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
636 }
637
638 if (priv->cfg->ops->hcmd->set_rxon_chain)
639 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
640
641 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
642 "extension channel offset 0x%x\n",
643 le32_to_cpu(rxon->flags), ctx->ht.protection,
644 ctx->ht.extension_chan_offset);
645}
646
647void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
648{
649 struct iwl_rxon_context *ctx;
650
651 for_each_context(priv, ctx)
652 _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
653}
654EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
655
656/* Return valid, unused, channel for a passive scan to reset the RF */
657u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
658 enum ieee80211_band band)
659{
660 const struct iwl_channel_info *ch_info;
661 int i;
662 u8 channel = 0;
663 u8 min, max;
664 struct iwl_rxon_context *ctx;
665
666 if (band == IEEE80211_BAND_5GHZ) {
667 min = 14;
668 max = priv->channel_count;
669 } else {
670 min = 0;
671 max = 14;
672 }
673
674 for (i = min; i < max; i++) {
675 bool busy = false;
676
677 for_each_context(priv, ctx) {
678 busy = priv->channel_info[i].channel ==
679 le16_to_cpu(ctx->staging.channel);
680 if (busy)
681 break;
682 }
683
684 if (busy)
685 continue;
686
687 channel = priv->channel_info[i].channel;
688 ch_info = iwl_legacy_get_channel_info(priv, band, channel);
689 if (iwl_legacy_is_channel_valid(ch_info))
690 break;
691 }
692
693 return channel;
694}
695EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
696
697/**
698 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
699 * @ch: requested channel as a pointer to struct ieee80211_channel
700
701 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
702 * in the staging RXON flag structure based on the ch->band
703 */
704int
705iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
706 struct iwl_rxon_context *ctx)
707{
708 enum ieee80211_band band = ch->band;
709 u16 channel = ch->hw_value;
710
711 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
712 (priv->band == band))
713 return 0;
714
715 ctx->staging.channel = cpu_to_le16(channel);
716 if (band == IEEE80211_BAND_5GHZ)
717 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
718 else
719 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
720
721 priv->band = band;
722
723 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
724
725 return 0;
726}
727EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
728
729void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
730 struct iwl_rxon_context *ctx,
731 enum ieee80211_band band,
732 struct ieee80211_vif *vif)
733{
734 if (band == IEEE80211_BAND_5GHZ) {
735 ctx->staging.flags &=
736 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
737 | RXON_FLG_CCK_MSK);
738 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
739 } else {
740 /* Copied from iwl_post_associate() */
741 if (vif && vif->bss_conf.use_short_slot)
742 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
743 else
744 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
745
746 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
747 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
748 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
749 }
750}
751EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
752
753/*
754 * initialize rxon structure with default values from eeprom
755 */
756void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
757 struct iwl_rxon_context *ctx)
758{
759 const struct iwl_channel_info *ch_info;
760
761 memset(&ctx->staging, 0, sizeof(ctx->staging));
762
763 if (!ctx->vif) {
764 ctx->staging.dev_type = ctx->unused_devtype;
765 } else
766 switch (ctx->vif->type) {
767
768 case NL80211_IFTYPE_STATION:
769 ctx->staging.dev_type = ctx->station_devtype;
770 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
771 break;
772
773 case NL80211_IFTYPE_ADHOC:
774 ctx->staging.dev_type = ctx->ibss_devtype;
775 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
776 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
777 RXON_FILTER_ACCEPT_GRP_MSK;
778 break;
779
780 default:
781 IWL_ERR(priv, "Unsupported interface type %d\n",
782 ctx->vif->type);
783 break;
784 }
785
786#if 0
787 /* TODO: Figure out when short_preamble would be set and cache from
788 * that */
789 if (!hw_to_local(priv->hw)->short_preamble)
790 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
791 else
792 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
793#endif
794
795 ch_info = iwl_legacy_get_channel_info(priv, priv->band,
796 le16_to_cpu(ctx->active.channel));
797
798 if (!ch_info)
799 ch_info = &priv->channel_info[0];
800
801 ctx->staging.channel = cpu_to_le16(ch_info->channel);
802 priv->band = ch_info->band;
803
804 iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
805
806 ctx->staging.ofdm_basic_rates =
807 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
808 ctx->staging.cck_basic_rates =
809 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
810
811 /* clear both MIX and PURE40 mode flag */
812 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
813 RXON_FLG_CHANNEL_MODE_PURE_40);
814 if (ctx->vif)
815 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
816
817 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
818 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
819}
820EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
821
822void iwl_legacy_set_rate(struct iwl_priv *priv)
823{
824 const struct ieee80211_supported_band *hw = NULL;
825 struct ieee80211_rate *rate;
826 struct iwl_rxon_context *ctx;
827 int i;
828
829 hw = iwl_get_hw_mode(priv, priv->band);
830 if (!hw) {
831 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
832 return;
833 }
834
835 priv->active_rate = 0;
836
837 for (i = 0; i < hw->n_bitrates; i++) {
838 rate = &(hw->bitrates[i]);
839 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
840 priv->active_rate |= (1 << rate->hw_value);
841 }
842
843 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
844
845 for_each_context(priv, ctx) {
846 ctx->staging.cck_basic_rates =
847 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
848
849 ctx->staging.ofdm_basic_rates =
850 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
851 }
852}
853EXPORT_SYMBOL(iwl_legacy_set_rate);
854
855void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
856{
857 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
858
859 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
860 return;
861
862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
863 ieee80211_chswitch_done(ctx->vif, is_success);
864}
865EXPORT_SYMBOL(iwl_legacy_chswitch_done);
866
867void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
868{
869 struct iwl_rx_packet *pkt = rxb_addr(rxb);
870 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
871
872 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
873 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
874
875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
876 return;
877
878 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
879 rxon->channel = csa->channel;
880 ctx->staging.channel = csa->channel;
881 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
882 le16_to_cpu(csa->channel));
883 iwl_legacy_chswitch_done(priv, true);
884 } else {
885 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa->channel));
887 iwl_legacy_chswitch_done(priv, false);
888 }
889}
890EXPORT_SYMBOL(iwl_legacy_rx_csa);
891
892#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
893void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
894 struct iwl_rxon_context *ctx)
895{
896 struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
897
898 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
899 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
900 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
901 le16_to_cpu(rxon->channel));
902 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
903 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
904 le32_to_cpu(rxon->filter_flags));
905 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
906 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
907 rxon->ofdm_basic_rates);
908 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
909 rxon->cck_basic_rates);
910 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
911 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
912 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
913 le16_to_cpu(rxon->assoc_id));
914}
915EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
916#endif
917/**
918 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
919 */
920void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
921{
922 /* Set the FW error flag -- cleared on iwl_down */
923 set_bit(STATUS_FW_ERROR, &priv->status);
924
925 /* Cancel currently queued command. */
926 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
927
928 IWL_ERR(priv, "Loaded firmware version: %s\n",
929 priv->hw->wiphy->fw_version);
930
931 priv->cfg->ops->lib->dump_nic_error_log(priv);
932 if (priv->cfg->ops->lib->dump_fh)
933 priv->cfg->ops->lib->dump_fh(priv, NULL, false);
934#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
935 if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
936 iwl_legacy_print_rx_config_cmd(priv,
937 &priv->contexts[IWL_RXON_CTX_BSS]);
938#endif
939
940 wake_up(&priv->wait_command_queue);
941
942 /* Keep the restart process from trying to send host
943 * commands by clearing the INIT status bit */
944 clear_bit(STATUS_READY, &priv->status);
945
946 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
947 IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
948 "Restarting adapter due to uCode error.\n");
949
950 if (priv->cfg->mod_params->restart_fw)
951 queue_work(priv->workqueue, &priv->restart);
952 }
953}
954EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
955
956static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
957{
958 int ret = 0;
959
960 /* stop device's busmaster DMA activity */
961 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
962
963 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
964 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
965 if (ret)
966 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
967
968 IWL_DEBUG_INFO(priv, "stop master\n");
969
970 return ret;
971}
972
973void iwl_legacy_apm_stop(struct iwl_priv *priv)
974{
975 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
976
977 /* Stop device's DMA activity */
978 iwl_legacy_apm_stop_master(priv);
979
980 /* Reset the entire device */
981 iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
982
983 udelay(10);
984
985 /*
986 * Clear "initialization complete" bit to move adapter from
987 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
988 */
989 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
990 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
991}
992EXPORT_SYMBOL(iwl_legacy_apm_stop);
993
994
995/*
996 * Start up NIC's basic functionality after it has been reset
997 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
998 * NOTE: This does not load uCode nor start the embedded processor
999 */
1000int iwl_legacy_apm_init(struct iwl_priv *priv)
1001{
1002 int ret = 0;
1003 u16 lctl;
1004
1005 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1006
1007 /*
1008 * Use "set_bit" below rather than "write", to preserve any hardware
1009 * bits already set by default after reset.
1010 */
1011
1012 /* Disable L0S exit timer (platform NMI Work/Around) */
1013 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1014 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1015
1016 /*
1017 * Disable L0s without affecting L1;
1018 * don't wait for ICH L0s (ICH bug W/A)
1019 */
1020 iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1021 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1022
1023 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1024 iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
1025 CSR_DBG_HPET_MEM_REG_VAL);
1026
1027 /*
1028 * Enable HAP INTA (interrupt from management bus) to
1029 * wake device's PCI Express link L1a -> L0s
1030 * NOTE: This is no-op for 3945 (non-existent bit)
1031 */
1032 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1033 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1034
1035 /*
1036 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1037 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1038 * If so (likely), disable L0S, so device moves directly L0->L1;
1039 * costs negligible amount of power savings.
1040 * If not (unlikely), enable L0S, so there is at least some
1041 * power savings, even without L1.
1042 */
1043 if (priv->cfg->base_params->set_l0s) {
1044 lctl = iwl_legacy_pcie_link_ctl(priv);
1045 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1046 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1047 /* L1-ASPM enabled; disable(!) L0S */
1048 iwl_legacy_set_bit(priv, CSR_GIO_REG,
1049 CSR_GIO_REG_VAL_L0S_ENABLED);
1050 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1051 } else {
1052 /* L1-ASPM disabled; enable(!) L0S */
1053 iwl_legacy_clear_bit(priv, CSR_GIO_REG,
1054 CSR_GIO_REG_VAL_L0S_ENABLED);
1055 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1056 }
1057 }
1058
1059 /* Configure analog phase-lock-loop before activating to D0A */
1060 if (priv->cfg->base_params->pll_cfg_val)
1061 iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
1062 priv->cfg->base_params->pll_cfg_val);
1063
1064 /*
1065 * Set "initialization complete" bit to move adapter from
1066 * D0U* --> D0A* (powered-up active) state.
1067 */
1068 iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1069
1070 /*
1071 * Wait for clock stabilization; once stabilized, access to
1072 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1073 * and accesses to uCode SRAM.
1074 */
1075 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1076 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1078 if (ret < 0) {
1079 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1080 goto out;
1081 }
1082
1083 /*
1084 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1085 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1086 *
1087 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1088 * do not disable clocks. This preserves any hardware bits already
1089 * set by default in "CLK_CTRL_REG" after reset.
1090 */
1091 if (priv->cfg->base_params->use_bsm)
1092 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1093 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1094 else
1095 iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
1096 APMG_CLK_VAL_DMA_CLK_RQT);
1097 udelay(20);
1098
1099 /* Disable L1-Active */
1100 iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1101 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1102
1103out:
1104 return ret;
1105}
1106EXPORT_SYMBOL(iwl_legacy_apm_init);
1107
1108
1109int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1110{
1111 int ret;
1112 s8 prev_tx_power;
1113 bool defer;
1114 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1115
1116 lockdep_assert_held(&priv->mutex);
1117
1118 if (priv->tx_power_user_lmt == tx_power && !force)
1119 return 0;
1120
1121 if (!priv->cfg->ops->lib->send_tx_power)
1122 return -EOPNOTSUPP;
1123
1124 /* 0 dBm mean 1 milliwatt */
1125 if (tx_power < 0) {
1126 IWL_WARN(priv,
1127 "Requested user TXPOWER %d below 1 mW.\n",
1128 tx_power);
1129 return -EINVAL;
1130 }
1131
1132 if (tx_power > priv->tx_power_device_lmt) {
1133 IWL_WARN(priv,
1134 "Requested user TXPOWER %d above upper limit %d.\n",
1135 tx_power, priv->tx_power_device_lmt);
1136 return -EINVAL;
1137 }
1138
1139 if (!iwl_legacy_is_ready_rf(priv))
1140 return -EIO;
1141
1142 /* scan complete and commit_rxon use tx_power_next value,
1143 * it always need to be updated for newest request */
1144 priv->tx_power_next = tx_power;
1145
1146 /* do not set tx power when scanning or channel changing */
1147 defer = test_bit(STATUS_SCANNING, &priv->status) ||
1148 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1149 if (defer && !force) {
1150 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
1151 return 0;
1152 }
1153
1154 prev_tx_power = priv->tx_power_user_lmt;
1155 priv->tx_power_user_lmt = tx_power;
1156
1157 ret = priv->cfg->ops->lib->send_tx_power(priv);
1158
1159 /* if fail to set tx_power, restore the orig. tx power */
1160 if (ret) {
1161 priv->tx_power_user_lmt = prev_tx_power;
1162 priv->tx_power_next = prev_tx_power;
1163 }
1164 return ret;
1165}
1166EXPORT_SYMBOL(iwl_legacy_set_tx_power);
1167
1168void iwl_legacy_send_bt_config(struct iwl_priv *priv)
1169{
1170 struct iwl_bt_cmd bt_cmd = {
1171 .lead_time = BT_LEAD_TIME_DEF,
1172 .max_kill = BT_MAX_KILL_DEF,
1173 .kill_ack_mask = 0,
1174 .kill_cts_mask = 0,
1175 };
1176
1177 if (!bt_coex_active)
1178 bt_cmd.flags = BT_COEX_DISABLE;
1179 else
1180 bt_cmd.flags = BT_COEX_ENABLE;
1181
1182 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1183 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1184
1185 if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1186 sizeof(struct iwl_bt_cmd), &bt_cmd))
1187 IWL_ERR(priv, "failed to send BT Coex Config\n");
1188}
1189EXPORT_SYMBOL(iwl_legacy_send_bt_config);
1190
1191int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1192{
1193 struct iwl_statistics_cmd statistics_cmd = {
1194 .configuration_flags =
1195 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1196 };
1197
1198 if (flags & CMD_ASYNC)
1199 return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1200 sizeof(struct iwl_statistics_cmd),
1201 &statistics_cmd, NULL);
1202 else
1203 return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
1204 sizeof(struct iwl_statistics_cmd),
1205 &statistics_cmd);
1206}
1207EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
1208
1209void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
1210 struct iwl_rx_mem_buffer *rxb)
1211{
1212#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1213 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1214 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1215 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
1216 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1217#endif
1218}
1219EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
1220
1221void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1222 struct iwl_rx_mem_buffer *rxb)
1223{
1224 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1225 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1226 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
1227 "notification for %s:\n", len,
1228 iwl_legacy_get_cmd_string(pkt->hdr.cmd));
1229 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
1230}
1231EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
1232
1233void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
1234 struct iwl_rx_mem_buffer *rxb)
1235{
1236 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1237
1238 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
1239 "seq 0x%04X ser 0x%08X\n",
1240 le32_to_cpu(pkt->u.err_resp.error_type),
1241 iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
1242 pkt->u.err_resp.cmd_id,
1243 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
1244 le32_to_cpu(pkt->u.err_resp.error_info));
1245}
1246EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
1247
1248void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
1249{
1250 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1251}
1252
1253int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
1254 struct ieee80211_vif *vif, u16 queue,
1255 const struct ieee80211_tx_queue_params *params)
1256{
1257 struct iwl_priv *priv = hw->priv;
1258 struct iwl_rxon_context *ctx;
1259 unsigned long flags;
1260 int q;
1261
1262 IWL_DEBUG_MAC80211(priv, "enter\n");
1263
1264 if (!iwl_legacy_is_ready_rf(priv)) {
1265 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1266 return -EIO;
1267 }
1268
1269 if (queue >= AC_NUM) {
1270 IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
1271 return 0;
1272 }
1273
1274 q = AC_NUM - 1 - queue;
1275
1276 spin_lock_irqsave(&priv->lock, flags);
1277
1278 for_each_context(priv, ctx) {
1279 ctx->qos_data.def_qos_parm.ac[q].cw_min =
1280 cpu_to_le16(params->cw_min);
1281 ctx->qos_data.def_qos_parm.ac[q].cw_max =
1282 cpu_to_le16(params->cw_max);
1283 ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
1284 ctx->qos_data.def_qos_parm.ac[q].edca_txop =
1285 cpu_to_le16((params->txop * 32));
1286
1287 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1288 }
1289
1290 spin_unlock_irqrestore(&priv->lock, flags);
1291
1292 IWL_DEBUG_MAC80211(priv, "leave\n");
1293 return 0;
1294}
1295EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
1296
1297int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
1298{
1299 struct iwl_priv *priv = hw->priv;
1300
1301 return priv->ibss_manager == IWL_IBSS_MANAGER;
1302}
1303EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
1304
1305static int
1306iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1307{
1308 iwl_legacy_connection_init_rx_config(priv, ctx);
1309
1310 if (priv->cfg->ops->hcmd->set_rxon_chain)
1311 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1312
1313 return iwl_legacy_commit_rxon(priv, ctx);
1314}
1315
1316static int iwl_legacy_setup_interface(struct iwl_priv *priv,
1317 struct iwl_rxon_context *ctx)
1318{
1319 struct ieee80211_vif *vif = ctx->vif;
1320 int err;
1321
1322 lockdep_assert_held(&priv->mutex);
1323
1324 /*
1325 * This variable will be correct only when there's just
1326 * a single context, but all code using it is for hardware
1327 * that supports only one context.
1328 */
1329 priv->iw_mode = vif->type;
1330
1331 ctx->is_active = true;
1332
1333 err = iwl_legacy_set_mode(priv, ctx);
1334 if (err) {
1335 if (!ctx->always_active)
1336 ctx->is_active = false;
1337 return err;
1338 }
1339
1340 return 0;
1341}
1342
1343int
1344iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1345{
1346 struct iwl_priv *priv = hw->priv;
1347 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1348 struct iwl_rxon_context *tmp, *ctx = NULL;
1349 int err;
1350
1351 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1352 vif->type, vif->addr);
1353
1354 mutex_lock(&priv->mutex);
1355
1356 if (!iwl_legacy_is_ready_rf(priv)) {
1357 IWL_WARN(priv, "Try to add interface when device not ready\n");
1358 err = -EINVAL;
1359 goto out;
1360 }
1361
1362 for_each_context(priv, tmp) {
1363 u32 possible_modes =
1364 tmp->interface_modes | tmp->exclusive_interface_modes;
1365
1366 if (tmp->vif) {
1367 /* check if this busy context is exclusive */
1368 if (tmp->exclusive_interface_modes &
1369 BIT(tmp->vif->type)) {
1370 err = -EINVAL;
1371 goto out;
1372 }
1373 continue;
1374 }
1375
1376 if (!(possible_modes & BIT(vif->type)))
1377 continue;
1378
1379 /* have maybe usable context w/o interface */
1380 ctx = tmp;
1381 break;
1382 }
1383
1384 if (!ctx) {
1385 err = -EOPNOTSUPP;
1386 goto out;
1387 }
1388
1389 vif_priv->ctx = ctx;
1390 ctx->vif = vif;
1391
1392 err = iwl_legacy_setup_interface(priv, ctx);
1393 if (!err)
1394 goto out;
1395
1396 ctx->vif = NULL;
1397 priv->iw_mode = NL80211_IFTYPE_STATION;
1398 out:
1399 mutex_unlock(&priv->mutex);
1400
1401 IWL_DEBUG_MAC80211(priv, "leave\n");
1402 return err;
1403}
1404EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
1405
1406static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
1407 struct ieee80211_vif *vif,
1408 bool mode_change)
1409{
1410 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1411
1412 lockdep_assert_held(&priv->mutex);
1413
1414 if (priv->scan_vif == vif) {
1415 iwl_legacy_scan_cancel_timeout(priv, 200);
1416 iwl_legacy_force_scan_end(priv);
1417 }
1418
1419 if (!mode_change) {
1420 iwl_legacy_set_mode(priv, ctx);
1421 if (!ctx->always_active)
1422 ctx->is_active = false;
1423 }
1424}
1425
1426void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
1427 struct ieee80211_vif *vif)
1428{
1429 struct iwl_priv *priv = hw->priv;
1430 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1431
1432 IWL_DEBUG_MAC80211(priv, "enter\n");
1433
1434 mutex_lock(&priv->mutex);
1435
1436 WARN_ON(ctx->vif != vif);
1437 ctx->vif = NULL;
1438
1439 iwl_legacy_teardown_interface(priv, vif, false);
1440
1441 memset(priv->bssid, 0, ETH_ALEN);
1442 mutex_unlock(&priv->mutex);
1443
1444 IWL_DEBUG_MAC80211(priv, "leave\n");
1445
1446}
1447EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
1448
1449int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
1450{
1451 if (!priv->txq)
1452 priv->txq = kzalloc(
1453 sizeof(struct iwl_tx_queue) *
1454 priv->cfg->base_params->num_of_queues,
1455 GFP_KERNEL);
1456 if (!priv->txq) {
1457 IWL_ERR(priv, "Not enough memory for txq\n");
1458 return -ENOMEM;
1459 }
1460 return 0;
1461}
1462EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
1463
1464void iwl_legacy_txq_mem(struct iwl_priv *priv)
1465{
1466 kfree(priv->txq);
1467 priv->txq = NULL;
1468}
1469EXPORT_SYMBOL(iwl_legacy_txq_mem);
1470
1471#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1472
1473#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1474
1475void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
1476{
1477 priv->tx_traffic_idx = 0;
1478 priv->rx_traffic_idx = 0;
1479 if (priv->tx_traffic)
1480 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1481 if (priv->rx_traffic)
1482 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1483}
1484
1485int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
1486{
1487 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1488
1489 if (iwlegacy_debug_level & IWL_DL_TX) {
1490 if (!priv->tx_traffic) {
1491 priv->tx_traffic =
1492 kzalloc(traffic_size, GFP_KERNEL);
1493 if (!priv->tx_traffic)
1494 return -ENOMEM;
1495 }
1496 }
1497 if (iwlegacy_debug_level & IWL_DL_RX) {
1498 if (!priv->rx_traffic) {
1499 priv->rx_traffic =
1500 kzalloc(traffic_size, GFP_KERNEL);
1501 if (!priv->rx_traffic)
1502 return -ENOMEM;
1503 }
1504 }
1505 iwl_legacy_reset_traffic_log(priv);
1506 return 0;
1507}
1508EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
1509
1510void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
1511{
1512 kfree(priv->tx_traffic);
1513 priv->tx_traffic = NULL;
1514
1515 kfree(priv->rx_traffic);
1516 priv->rx_traffic = NULL;
1517}
1518EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
1519
1520void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
1521 u16 length, struct ieee80211_hdr *header)
1522{
1523 __le16 fc;
1524 u16 len;
1525
1526 if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
1527 return;
1528
1529 if (!priv->tx_traffic)
1530 return;
1531
1532 fc = header->frame_control;
1533 if (ieee80211_is_data(fc)) {
1534 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1535 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1536 memcpy((priv->tx_traffic +
1537 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1538 header, len);
1539 priv->tx_traffic_idx =
1540 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1541 }
1542}
1543EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
1544
1545void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
1546 u16 length, struct ieee80211_hdr *header)
1547{
1548 __le16 fc;
1549 u16 len;
1550
1551 if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
1552 return;
1553
1554 if (!priv->rx_traffic)
1555 return;
1556
1557 fc = header->frame_control;
1558 if (ieee80211_is_data(fc)) {
1559 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1560 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1561 memcpy((priv->rx_traffic +
1562 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1563 header, len);
1564 priv->rx_traffic_idx =
1565 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1566 }
1567}
1568EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
1569
1570const char *iwl_legacy_get_mgmt_string(int cmd)
1571{
1572 switch (cmd) {
1573 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1574 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1575 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1576 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1577 IWL_CMD(MANAGEMENT_PROBE_REQ);
1578 IWL_CMD(MANAGEMENT_PROBE_RESP);
1579 IWL_CMD(MANAGEMENT_BEACON);
1580 IWL_CMD(MANAGEMENT_ATIM);
1581 IWL_CMD(MANAGEMENT_DISASSOC);
1582 IWL_CMD(MANAGEMENT_AUTH);
1583 IWL_CMD(MANAGEMENT_DEAUTH);
1584 IWL_CMD(MANAGEMENT_ACTION);
1585 default:
1586 return "UNKNOWN";
1587
1588 }
1589}
1590
1591const char *iwl_legacy_get_ctrl_string(int cmd)
1592{
1593 switch (cmd) {
1594 IWL_CMD(CONTROL_BACK_REQ);
1595 IWL_CMD(CONTROL_BACK);
1596 IWL_CMD(CONTROL_PSPOLL);
1597 IWL_CMD(CONTROL_RTS);
1598 IWL_CMD(CONTROL_CTS);
1599 IWL_CMD(CONTROL_ACK);
1600 IWL_CMD(CONTROL_CFEND);
1601 IWL_CMD(CONTROL_CFENDACK);
1602 default:
1603 return "UNKNOWN";
1604
1605 }
1606}
1607
1608void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
1609{
1610 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1611 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1612}
1613
1614/*
1615 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1616 * iwl_legacy_update_stats function will
1617 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1618 * Use debugFs to display the rx/rx_statistics
1619 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1620 * information will be recorded, but DATA pkt still will be recorded
1621 * for the reason of iwl_led.c need to control the led blinking based on
1622 * number of tx and rx data.
1623 *
1624 */
1625void
1626iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1627{
1628 struct traffic_stats *stats;
1629
1630 if (is_tx)
1631 stats = &priv->tx_stats;
1632 else
1633 stats = &priv->rx_stats;
1634
1635 if (ieee80211_is_mgmt(fc)) {
1636 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1637 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1638 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1639 break;
1640 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1641 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1642 break;
1643 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1644 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1645 break;
1646 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1647 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1648 break;
1649 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1650 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1651 break;
1652 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1653 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1654 break;
1655 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1656 stats->mgmt[MANAGEMENT_BEACON]++;
1657 break;
1658 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1659 stats->mgmt[MANAGEMENT_ATIM]++;
1660 break;
1661 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1662 stats->mgmt[MANAGEMENT_DISASSOC]++;
1663 break;
1664 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1665 stats->mgmt[MANAGEMENT_AUTH]++;
1666 break;
1667 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1668 stats->mgmt[MANAGEMENT_DEAUTH]++;
1669 break;
1670 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1671 stats->mgmt[MANAGEMENT_ACTION]++;
1672 break;
1673 }
1674 } else if (ieee80211_is_ctl(fc)) {
1675 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1676 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1677 stats->ctrl[CONTROL_BACK_REQ]++;
1678 break;
1679 case cpu_to_le16(IEEE80211_STYPE_BACK):
1680 stats->ctrl[CONTROL_BACK]++;
1681 break;
1682 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1683 stats->ctrl[CONTROL_PSPOLL]++;
1684 break;
1685 case cpu_to_le16(IEEE80211_STYPE_RTS):
1686 stats->ctrl[CONTROL_RTS]++;
1687 break;
1688 case cpu_to_le16(IEEE80211_STYPE_CTS):
1689 stats->ctrl[CONTROL_CTS]++;
1690 break;
1691 case cpu_to_le16(IEEE80211_STYPE_ACK):
1692 stats->ctrl[CONTROL_ACK]++;
1693 break;
1694 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1695 stats->ctrl[CONTROL_CFEND]++;
1696 break;
1697 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1698 stats->ctrl[CONTROL_CFENDACK]++;
1699 break;
1700 }
1701 } else {
1702 /* data */
1703 stats->data_cnt++;
1704 stats->data_bytes += len;
1705 }
1706}
1707EXPORT_SYMBOL(iwl_legacy_update_stats);
1708#endif
1709
1710int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
1711{
1712 struct iwl_force_reset *force_reset;
1713
1714 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1715 return -EINVAL;
1716
1717 force_reset = &priv->force_reset;
1718 force_reset->reset_request_count++;
1719 if (!external) {
1720 if (force_reset->last_force_reset_jiffies &&
1721 time_after(force_reset->last_force_reset_jiffies +
1722 force_reset->reset_duration, jiffies)) {
1723 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1724 force_reset->reset_reject_count++;
1725 return -EAGAIN;
1726 }
1727 }
1728 force_reset->reset_success_count++;
1729 force_reset->last_force_reset_jiffies = jiffies;
1730
1731 /*
1732 * if the request is from external(ex: debugfs),
1733 * then always perform the request in regardless the module
1734 * parameter setting
1735 * if the request is from internal (uCode error or driver
1736 * detect failure), then fw_restart module parameter
1737 * need to be check before performing firmware reload
1738 */
1739
1740 if (!external && !priv->cfg->mod_params->restart_fw) {
1741 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1742 "module parameter setting\n");
1743 return 0;
1744 }
1745
1746 IWL_ERR(priv, "On demand firmware reload\n");
1747
1748 /* Set the FW error flag -- cleared on iwl_down */
1749 set_bit(STATUS_FW_ERROR, &priv->status);
1750 wake_up(&priv->wait_command_queue);
1751 /*
1752 * Keep the restart process from trying to send host
1753 * commands by clearing the INIT status bit
1754 */
1755 clear_bit(STATUS_READY, &priv->status);
1756 queue_work(priv->workqueue, &priv->restart);
1757
1758 return 0;
1759}
1760
1761int
1762iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
1763 struct ieee80211_vif *vif,
1764 enum nl80211_iftype newtype, bool newp2p)
1765{
1766 struct iwl_priv *priv = hw->priv;
1767 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
1768 struct iwl_rxon_context *tmp;
1769 u32 interface_modes;
1770 int err;
1771
1772 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1773
1774 mutex_lock(&priv->mutex);
1775
1776 if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
1777 /*
1778 * Huh? But wait ... this can maybe happen when
1779 * we're in the middle of a firmware restart!
1780 */
1781 err = -EBUSY;
1782 goto out;
1783 }
1784
1785 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
1786
1787 if (!(interface_modes & BIT(newtype))) {
1788 err = -EBUSY;
1789 goto out;
1790 }
1791
1792 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1793 for_each_context(priv, tmp) {
1794 if (ctx == tmp)
1795 continue;
1796
1797 if (!tmp->vif)
1798 continue;
1799
1800 /*
1801 * The current mode switch would be exclusive, but
1802 * another context is active ... refuse the switch.
1803 */
1804 err = -EBUSY;
1805 goto out;
1806 }
1807 }
1808
1809 /* success */
1810 iwl_legacy_teardown_interface(priv, vif, true);
1811 vif->type = newtype;
1812 vif->p2p = newp2p;
1813 err = iwl_legacy_setup_interface(priv, ctx);
1814 WARN_ON(err);
1815 /*
1816 * We've switched internally, but submitting to the
1817 * device may have failed for some reason. Mask this
1818 * error, because otherwise mac80211 will not switch
1819 * (and set the interface type back) and we'll be
1820 * out of sync with it.
1821 */
1822 err = 0;
1823
1824 out:
1825 mutex_unlock(&priv->mutex);
1826 return err;
1827}
1828EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
1829
1830/*
1831 * On every watchdog tick we check (latest) time stamp. If it does not
1832 * change during timeout period and queue is not empty we reset firmware.
1833 */
1834static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
1835{
1836 struct iwl_tx_queue *txq = &priv->txq[cnt];
1837 struct iwl_queue *q = &txq->q;
1838 unsigned long timeout;
1839 int ret;
1840
1841 if (q->read_ptr == q->write_ptr) {
1842 txq->time_stamp = jiffies;
1843 return 0;
1844 }
1845
1846 timeout = txq->time_stamp +
1847 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1848
1849 if (time_after(jiffies, timeout)) {
1850 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1851 q->id, priv->cfg->base_params->wd_timeout);
1852 ret = iwl_legacy_force_reset(priv, false);
1853 return (ret == -EAGAIN) ? 0 : 1;
1854 }
1855
1856 return 0;
1857}
1858
1859/*
1860 * Making watchdog tick be a quarter of timeout assure we will
1861 * discover the queue hung between timeout and 1.25*timeout
1862 */
1863#define IWL_WD_TICK(timeout) ((timeout) / 4)
1864
1865/*
1866 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1867 * we reset the firmware. If everything is fine just rearm the timer.
1868 */
1869void iwl_legacy_bg_watchdog(unsigned long data)
1870{
1871 struct iwl_priv *priv = (struct iwl_priv *)data;
1872 int cnt;
1873 unsigned long timeout;
1874
1875 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1876 return;
1877
1878 timeout = priv->cfg->base_params->wd_timeout;
1879 if (timeout == 0)
1880 return;
1881
1882 /* monitor and check for stuck cmd queue */
1883 if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
1884 return;
1885
1886 /* monitor and check for other stuck queues */
1887 if (iwl_legacy_is_any_associated(priv)) {
1888 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1889 /* skip as we already checked the command queue */
1890 if (cnt == priv->cmd_queue)
1891 continue;
1892 if (iwl_legacy_check_stuck_queue(priv, cnt))
1893 return;
1894 }
1895 }
1896
1897 mod_timer(&priv->watchdog, jiffies +
1898 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1899}
1900EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
1901
1902void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
1903{
1904 unsigned int timeout = priv->cfg->base_params->wd_timeout;
1905
1906 if (timeout)
1907 mod_timer(&priv->watchdog,
1908 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1909 else
1910 del_timer(&priv->watchdog);
1911}
1912EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
1913
1914/*
1915 * extended beacon time format
1916 * time in usec will be changed into a 32-bit value in extended:internal format
1917 * the extended part is the beacon counts
1918 * the internal part is the time in usec within one beacon interval
1919 */
1920u32
1921iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
1922 u32 usec, u32 beacon_interval)
1923{
1924 u32 quot;
1925 u32 rem;
1926 u32 interval = beacon_interval * TIME_UNIT;
1927
1928 if (!interval || !usec)
1929 return 0;
1930
1931 quot = (usec / interval) &
1932 (iwl_legacy_beacon_time_mask_high(priv,
1933 priv->hw_params.beacon_time_tsf_bits) >>
1934 priv->hw_params.beacon_time_tsf_bits);
1935 rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
1936 priv->hw_params.beacon_time_tsf_bits);
1937
1938 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
1939}
1940EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
1941
1942/* base is usually what we get from ucode with each received frame,
1943 * the same as HW timer counter counting down
1944 */
1945__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
1946 u32 addon, u32 beacon_interval)
1947{
1948 u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
1949 priv->hw_params.beacon_time_tsf_bits);
1950 u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
1951 priv->hw_params.beacon_time_tsf_bits);
1952 u32 interval = beacon_interval * TIME_UNIT;
1953 u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
1954 priv->hw_params.beacon_time_tsf_bits)) +
1955 (addon & iwl_legacy_beacon_time_mask_high(priv,
1956 priv->hw_params.beacon_time_tsf_bits));
1957
1958 if (base_low > addon_low)
1959 res += base_low - addon_low;
1960 else if (base_low < addon_low) {
1961 res += interval + base_low - addon_low;
1962 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1963 } else
1964 res += (1 << priv->hw_params.beacon_time_tsf_bits);
1965
1966 return cpu_to_le32(res);
1967}
1968EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
1969
1970#ifdef CONFIG_PM
1971
1972int iwl_legacy_pci_suspend(struct device *device)
1973{
1974 struct pci_dev *pdev = to_pci_dev(device);
1975 struct iwl_priv *priv = pci_get_drvdata(pdev);
1976
1977 /*
1978 * This function is called when system goes into suspend state
1979 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1980 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1981 * it will not call apm_ops.stop() to stop the DMA operation.
1982 * Calling apm_ops.stop here to make sure we stop the DMA.
1983 */
1984 iwl_legacy_apm_stop(priv);
1985
1986 return 0;
1987}
1988EXPORT_SYMBOL(iwl_legacy_pci_suspend);
1989
1990int iwl_legacy_pci_resume(struct device *device)
1991{
1992 struct pci_dev *pdev = to_pci_dev(device);
1993 struct iwl_priv *priv = pci_get_drvdata(pdev);
1994 bool hw_rfkill = false;
1995
1996 /*
1997 * We disable the RETRY_TIMEOUT register (0x41) to keep
1998 * PCI Tx retries from interfering with C3 CPU state.
1999 */
2000 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2001
2002 iwl_legacy_enable_interrupts(priv);
2003
2004 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
2005 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
2006 hw_rfkill = true;
2007
2008 if (hw_rfkill)
2009 set_bit(STATUS_RF_KILL_HW, &priv->status);
2010 else
2011 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2012
2013 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
2014
2015 return 0;
2016}
2017EXPORT_SYMBOL(iwl_legacy_pci_resume);
2018
2019const struct dev_pm_ops iwl_legacy_pm_ops = {
2020 .suspend = iwl_legacy_pci_suspend,
2021 .resume = iwl_legacy_pci_resume,
2022 .freeze = iwl_legacy_pci_suspend,
2023 .thaw = iwl_legacy_pci_resume,
2024 .poweroff = iwl_legacy_pci_suspend,
2025 .restore = iwl_legacy_pci_resume,
2026};
2027EXPORT_SYMBOL(iwl_legacy_pm_ops);
2028
2029#endif /* CONFIG_PM */
2030
2031static void
2032iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
2033{
2034 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2035 return;
2036
2037 if (!ctx->is_active)
2038 return;
2039
2040 ctx->qos_data.def_qos_parm.qos_flags = 0;
2041
2042 if (ctx->qos_data.qos_active)
2043 ctx->qos_data.def_qos_parm.qos_flags |=
2044 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2045
2046 if (ctx->ht.enabled)
2047 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
2048
2049 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2050 ctx->qos_data.qos_active,
2051 ctx->qos_data.def_qos_parm.qos_flags);
2052
2053 iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
2054 sizeof(struct iwl_qosparam_cmd),
2055 &ctx->qos_data.def_qos_parm, NULL);
2056}
2057
2058/**
2059 * iwl_legacy_mac_config - mac80211 config callback
2060 */
2061int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
2062{
2063 struct iwl_priv *priv = hw->priv;
2064 const struct iwl_channel_info *ch_info;
2065 struct ieee80211_conf *conf = &hw->conf;
2066 struct ieee80211_channel *channel = conf->channel;
2067 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2068 struct iwl_rxon_context *ctx;
2069 unsigned long flags = 0;
2070 int ret = 0;
2071 u16 ch;
2072 int scan_active = 0;
2073 bool ht_changed[NUM_IWL_RXON_CTX] = {};
2074
2075 if (WARN_ON(!priv->cfg->ops->legacy))
2076 return -EOPNOTSUPP;
2077
2078 mutex_lock(&priv->mutex);
2079
2080 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
2081 channel->hw_value, changed);
2082
2083 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
2084 scan_active = 1;
2085 IWL_DEBUG_MAC80211(priv, "scan active\n");
2086 }
2087
2088 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
2089 IEEE80211_CONF_CHANGE_CHANNEL)) {
2090 /* mac80211 uses static for non-HT which is what we want */
2091 priv->current_ht_config.smps = conf->smps_mode;
2092
2093 /*
2094 * Recalculate chain counts.
2095 *
2096 * If monitor mode is enabled then mac80211 will
2097 * set up the SM PS mode to OFF if an HT channel is
2098 * configured.
2099 */
2100 if (priv->cfg->ops->hcmd->set_rxon_chain)
2101 for_each_context(priv, ctx)
2102 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2103 }
2104
2105 /* during scanning mac80211 will delay channel setting until
2106 * scan finish with changed = 0
2107 */
2108 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
2109 if (scan_active)
2110 goto set_ch_out;
2111
2112 ch = channel->hw_value;
2113 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2114 if (!iwl_legacy_is_channel_valid(ch_info)) {
2115 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
2116 ret = -EINVAL;
2117 goto set_ch_out;
2118 }
2119
2120 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2121 !iwl_legacy_is_channel_ibss(ch_info)) {
2122 IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
2123 ret = -EINVAL;
2124 goto set_ch_out;
2125 }
2126
2127 spin_lock_irqsave(&priv->lock, flags);
2128
2129 for_each_context(priv, ctx) {
2130 /* Configure HT40 channels */
2131 if (ctx->ht.enabled != conf_is_ht(conf)) {
2132 ctx->ht.enabled = conf_is_ht(conf);
2133 ht_changed[ctx->ctxid] = true;
2134 }
2135 if (ctx->ht.enabled) {
2136 if (conf_is_ht40_minus(conf)) {
2137 ctx->ht.extension_chan_offset =
2138 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2139 ctx->ht.is_40mhz = true;
2140 } else if (conf_is_ht40_plus(conf)) {
2141 ctx->ht.extension_chan_offset =
2142 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2143 ctx->ht.is_40mhz = true;
2144 } else {
2145 ctx->ht.extension_chan_offset =
2146 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2147 ctx->ht.is_40mhz = false;
2148 }
2149 } else
2150 ctx->ht.is_40mhz = false;
2151
2152 /*
2153 * Default to no protection. Protection mode will
2154 * later be set from BSS config in iwl_ht_conf
2155 */
2156 ctx->ht.protection =
2157 IEEE80211_HT_OP_MODE_PROTECTION_NONE;
2158
2159 /* if we are switching from ht to 2.4 clear flags
2160 * from any ht related info since 2.4 does not
2161 * support ht */
2162 if ((le16_to_cpu(ctx->staging.channel) != ch))
2163 ctx->staging.flags = 0;
2164
2165 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2166 iwl_legacy_set_rxon_ht(priv, ht_conf);
2167
2168 iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
2169 ctx->vif);
2170 }
2171
2172 spin_unlock_irqrestore(&priv->lock, flags);
2173
2174 if (priv->cfg->ops->legacy->update_bcast_stations)
2175 ret =
2176 priv->cfg->ops->legacy->update_bcast_stations(priv);
2177
2178 set_ch_out:
2179 /* The list of supported rates and rate mask can be different
2180 * for each band; since the band may have changed, reset
2181 * the rate mask to what mac80211 lists */
2182 iwl_legacy_set_rate(priv);
2183 }
2184
2185 if (changed & (IEEE80211_CONF_CHANGE_PS |
2186 IEEE80211_CONF_CHANGE_IDLE)) {
2187 ret = iwl_legacy_power_update_mode(priv, false);
2188 if (ret)
2189 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2190 }
2191
2192 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2193 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2194 priv->tx_power_user_lmt, conf->power_level);
2195
2196 iwl_legacy_set_tx_power(priv, conf->power_level, false);
2197 }
2198
2199 if (!iwl_legacy_is_ready(priv)) {
2200 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2201 goto out;
2202 }
2203
2204 if (scan_active)
2205 goto out;
2206
2207 for_each_context(priv, ctx) {
2208 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2209 iwl_legacy_commit_rxon(priv, ctx);
2210 else
2211 IWL_DEBUG_INFO(priv,
2212 "Not re-sending same RXON configuration.\n");
2213 if (ht_changed[ctx->ctxid])
2214 iwl_legacy_update_qos(priv, ctx);
2215 }
2216
2217out:
2218 IWL_DEBUG_MAC80211(priv, "leave\n");
2219 mutex_unlock(&priv->mutex);
2220 return ret;
2221}
2222EXPORT_SYMBOL(iwl_legacy_mac_config);
2223
2224void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
2225 struct ieee80211_vif *vif)
2226{
2227 struct iwl_priv *priv = hw->priv;
2228 unsigned long flags;
2229 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2230 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2231
2232 if (WARN_ON(!priv->cfg->ops->legacy))
2233 return;
2234
2235 mutex_lock(&priv->mutex);
2236 IWL_DEBUG_MAC80211(priv, "enter\n");
2237
2238 spin_lock_irqsave(&priv->lock, flags);
2239 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2240 spin_unlock_irqrestore(&priv->lock, flags);
2241
2242 spin_lock_irqsave(&priv->lock, flags);
2243
2244 /* new association get rid of ibss beacon skb */
2245 if (priv->beacon_skb)
2246 dev_kfree_skb(priv->beacon_skb);
2247
2248 priv->beacon_skb = NULL;
2249
2250 priv->timestamp = 0;
2251
2252 spin_unlock_irqrestore(&priv->lock, flags);
2253
2254 iwl_legacy_scan_cancel_timeout(priv, 100);
2255 if (!iwl_legacy_is_ready_rf(priv)) {
2256 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2257 mutex_unlock(&priv->mutex);
2258 return;
2259 }
2260
2261 /* we are restarting association process
2262 * clear RXON_FILTER_ASSOC_MSK bit
2263 */
2264 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2265 iwl_legacy_commit_rxon(priv, ctx);
2266
2267 iwl_legacy_set_rate(priv);
2268
2269 mutex_unlock(&priv->mutex);
2270
2271 IWL_DEBUG_MAC80211(priv, "leave\n");
2272}
2273EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
2274
2275static void iwl_legacy_ht_conf(struct iwl_priv *priv,
2276 struct ieee80211_vif *vif)
2277{
2278 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2279 struct ieee80211_sta *sta;
2280 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2281 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2282
2283 IWL_DEBUG_ASSOC(priv, "enter:\n");
2284
2285 if (!ctx->ht.enabled)
2286 return;
2287
2288 ctx->ht.protection =
2289 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2290 ctx->ht.non_gf_sta_present =
2291 !!(bss_conf->ht_operation_mode &
2292 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2293
2294 ht_conf->single_chain_sufficient = false;
2295
2296 switch (vif->type) {
2297 case NL80211_IFTYPE_STATION:
2298 rcu_read_lock();
2299 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2300 if (sta) {
2301 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2302 int maxstreams;
2303
2304 maxstreams = (ht_cap->mcs.tx_params &
2305 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2306 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2307 maxstreams += 1;
2308
2309 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2310 (ht_cap->mcs.rx_mask[2] == 0))
2311 ht_conf->single_chain_sufficient = true;
2312 if (maxstreams <= 1)
2313 ht_conf->single_chain_sufficient = true;
2314 } else {
2315 /*
2316 * If at all, this can only happen through a race
2317 * when the AP disconnects us while we're still
2318 * setting up the connection, in that case mac80211
2319 * will soon tell us about that.
2320 */
2321 ht_conf->single_chain_sufficient = true;
2322 }
2323 rcu_read_unlock();
2324 break;
2325 case NL80211_IFTYPE_ADHOC:
2326 ht_conf->single_chain_sufficient = true;
2327 break;
2328 default:
2329 break;
2330 }
2331
2332 IWL_DEBUG_ASSOC(priv, "leave\n");
2333}
2334
2335static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
2336 struct ieee80211_vif *vif)
2337{
2338 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2339
2340 /*
2341 * inform the ucode that there is no longer an
2342 * association and that no more packets should be
2343 * sent
2344 */
2345 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2346 ctx->staging.assoc_id = 0;
2347 iwl_legacy_commit_rxon(priv, ctx);
2348}
2349
2350static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
2351 struct ieee80211_vif *vif)
2352{
2353 struct iwl_priv *priv = hw->priv;
2354 unsigned long flags;
2355 __le64 timestamp;
2356 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
2357
2358 if (!skb)
2359 return;
2360
2361 IWL_DEBUG_MAC80211(priv, "enter\n");
2362
2363 lockdep_assert_held(&priv->mutex);
2364
2365 if (!priv->beacon_ctx) {
2366 IWL_ERR(priv, "update beacon but no beacon context!\n");
2367 dev_kfree_skb(skb);
2368 return;
2369 }
2370
2371 spin_lock_irqsave(&priv->lock, flags);
2372
2373 if (priv->beacon_skb)
2374 dev_kfree_skb(priv->beacon_skb);
2375
2376 priv->beacon_skb = skb;
2377
2378 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2379 priv->timestamp = le64_to_cpu(timestamp);
2380
2381 IWL_DEBUG_MAC80211(priv, "leave\n");
2382 spin_unlock_irqrestore(&priv->lock, flags);
2383
2384 if (!iwl_legacy_is_ready_rf(priv)) {
2385 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
2386 return;
2387 }
2388
2389 priv->cfg->ops->legacy->post_associate(priv);
2390}
2391
2392void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
2393 struct ieee80211_vif *vif,
2394 struct ieee80211_bss_conf *bss_conf,
2395 u32 changes)
2396{
2397 struct iwl_priv *priv = hw->priv;
2398 struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
2399 int ret;
2400
2401 if (WARN_ON(!priv->cfg->ops->legacy))
2402 return;
2403
2404 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
2405
2406 mutex_lock(&priv->mutex);
2407
2408 if (!iwl_legacy_is_alive(priv)) {
2409 mutex_unlock(&priv->mutex);
2410 return;
2411 }
2412
2413 if (changes & BSS_CHANGED_QOS) {
2414 unsigned long flags;
2415
2416 spin_lock_irqsave(&priv->lock, flags);
2417 ctx->qos_data.qos_active = bss_conf->qos;
2418 iwl_legacy_update_qos(priv, ctx);
2419 spin_unlock_irqrestore(&priv->lock, flags);
2420 }
2421
2422 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2423 /*
2424 * the add_interface code must make sure we only ever
2425 * have a single interface that could be beaconing at
2426 * any time.
2427 */
2428 if (vif->bss_conf.enable_beacon)
2429 priv->beacon_ctx = ctx;
2430 else
2431 priv->beacon_ctx = NULL;
2432 }
2433
2434 if (changes & BSS_CHANGED_BSSID) {
2435 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
2436
2437 /*
2438 * If there is currently a HW scan going on in the
2439 * background then we need to cancel it else the RXON
2440 * below/in post_associate will fail.
2441 */
2442 if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
2443 IWL_WARN(priv,
2444 "Aborted scan still in progress after 100ms\n");
2445 IWL_DEBUG_MAC80211(priv,
2446 "leaving - scan abort failed.\n");
2447 mutex_unlock(&priv->mutex);
2448 return;
2449 }
2450
2451 /* mac80211 only sets assoc when in STATION mode */
2452 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2453 memcpy(ctx->staging.bssid_addr,
2454 bss_conf->bssid, ETH_ALEN);
2455
2456 /* currently needed in a few places */
2457 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2458 } else {
2459 ctx->staging.filter_flags &=
2460 ~RXON_FILTER_ASSOC_MSK;
2461 }
2462
2463 }
2464
2465 /*
2466 * This needs to be after setting the BSSID in case
2467 * mac80211 decides to do both changes at once because
2468 * it will invoke post_associate.
2469 */
2470 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
2471 iwl_legacy_beacon_update(hw, vif);
2472
2473 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
2474 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
2475 bss_conf->use_short_preamble);
2476 if (bss_conf->use_short_preamble)
2477 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2478 else
2479 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2480 }
2481
2482 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
2483 IWL_DEBUG_MAC80211(priv,
2484 "ERP_CTS %d\n", bss_conf->use_cts_prot);
2485 if (bss_conf->use_cts_prot &&
2486 (priv->band != IEEE80211_BAND_5GHZ))
2487 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
2488 else
2489 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
2490 if (bss_conf->use_cts_prot)
2491 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
2492 else
2493 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
2494 }
2495
2496 if (changes & BSS_CHANGED_BASIC_RATES) {
2497 /* XXX use this information
2498 *
2499 * To do that, remove code from iwl_legacy_set_rate() and put something
2500 * like this here:
2501 *
2502 if (A-band)
2503 ctx->staging.ofdm_basic_rates =
2504 bss_conf->basic_rates;
2505 else
2506 ctx->staging.ofdm_basic_rates =
2507 bss_conf->basic_rates >> 4;
2508 ctx->staging.cck_basic_rates =
2509 bss_conf->basic_rates & 0xF;
2510 */
2511 }
2512
2513 if (changes & BSS_CHANGED_HT) {
2514 iwl_legacy_ht_conf(priv, vif);
2515
2516 if (priv->cfg->ops->hcmd->set_rxon_chain)
2517 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2518 }
2519
2520 if (changes & BSS_CHANGED_ASSOC) {
2521 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2522 if (bss_conf->assoc) {
2523 priv->timestamp = bss_conf->timestamp;
2524
2525 if (!iwl_legacy_is_rfkill(priv))
2526 priv->cfg->ops->legacy->post_associate(priv);
2527 } else
2528 iwl_legacy_set_no_assoc(priv, vif);
2529 }
2530
2531 if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
2532 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2533 changes);
2534 ret = iwl_legacy_send_rxon_assoc(priv, ctx);
2535 if (!ret) {
2536 /* Sync active_rxon with latest change. */
2537 memcpy((void *)&ctx->active,
2538 &ctx->staging,
2539 sizeof(struct iwl_legacy_rxon_cmd));
2540 }
2541 }
2542
2543 if (changes & BSS_CHANGED_BEACON_ENABLED) {
2544 if (vif->bss_conf.enable_beacon) {
2545 memcpy(ctx->staging.bssid_addr,
2546 bss_conf->bssid, ETH_ALEN);
2547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2548 priv->cfg->ops->legacy->config_ap(priv);
2549 } else
2550 iwl_legacy_set_no_assoc(priv, vif);
2551 }
2552
2553 if (changes & BSS_CHANGED_IBSS) {
2554 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
2555 bss_conf->ibss_joined);
2556 if (ret)
2557 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
2558 bss_conf->ibss_joined ? "add" : "remove",
2559 bss_conf->bssid);
2560 }
2561
2562 mutex_unlock(&priv->mutex);
2563
2564 IWL_DEBUG_MAC80211(priv, "leave\n");
2565}
2566EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
2567
2568irqreturn_t iwl_legacy_isr(int irq, void *data)
2569{
2570 struct iwl_priv *priv = data;
2571 u32 inta, inta_mask;
2572 u32 inta_fh;
2573 unsigned long flags;
2574 if (!priv)
2575 return IRQ_NONE;
2576
2577 spin_lock_irqsave(&priv->lock, flags);
2578
2579 /* Disable (but don't clear!) interrupts here to avoid
2580 * back-to-back ISRs and sporadic interrupts from our NIC.
2581 * If we have something to service, the tasklet will re-enable ints.
2582 * If we *don't* have something, we'll re-enable before leaving here. */
2583 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
2584 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
2585
2586 /* Discover which interrupts are active/pending */
2587 inta = iwl_read32(priv, CSR_INT);
2588 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
2589
2590 /* Ignore interrupt if there's nothing in NIC to service.
2591 * This may be due to IRQ shared with another device,
2592 * or due to sporadic interrupts thrown from our NIC. */
2593 if (!inta && !inta_fh) {
2594 IWL_DEBUG_ISR(priv,
2595 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2596 goto none;
2597 }
2598
2599 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
2600 /* Hardware disappeared. It might have already raised
2601 * an interrupt */
2602 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
2603 goto unplugged;
2604 }
2605
2606 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2607 inta, inta_mask, inta_fh);
2608
2609 inta &= ~CSR_INT_BIT_SCD;
2610
2611 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2612 if (likely(inta || inta_fh))
2613 tasklet_schedule(&priv->irq_tasklet);
2614
2615unplugged:
2616 spin_unlock_irqrestore(&priv->lock, flags);
2617 return IRQ_HANDLED;
2618
2619none:
2620 /* re-enable interrupts here since we don't have anything to service. */
2621 /* only Re-enable if disabled by irq */
2622 if (test_bit(STATUS_INT_ENABLED, &priv->status))
2623 iwl_legacy_enable_interrupts(priv);
2624 spin_unlock_irqrestore(&priv->lock, flags);
2625 return IRQ_NONE;
2626}
2627EXPORT_SYMBOL(iwl_legacy_isr);
2628
2629/*
2630 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2631 * function.
2632 */
2633void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
2634 struct ieee80211_tx_info *info,
2635 __le16 fc, __le32 *tx_flags)
2636{
2637 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2638 *tx_flags |= TX_CMD_FLG_RTS_MSK;
2639 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2640 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2641
2642 if (!ieee80211_is_mgmt(fc))
2643 return;
2644
2645 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
2646 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2647 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2648 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
2649 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
2650 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2651 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2652 break;
2653 }
2654 } else if (info->control.rates[0].flags &
2655 IEEE80211_TX_RC_USE_CTS_PROTECT) {
2656 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2657 *tx_flags |= TX_CMD_FLG_CTS_MSK;
2658 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2659 }
2660}
2661EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe07d4b..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_core_h__
64#define __iwl_legacy_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "in-tree:"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
76
77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
79 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
80 .driver_data = (kernel_ulong_t)&(cfg)
81
82#define TIME_UNIT 1024
83
84#define IWL_SKU_G 0x1
85#define IWL_SKU_A 0x2
86#define IWL_SKU_N 0x8
87
88#define IWL_CMD(x) case x: return #x
89
90struct iwl_hcmd_ops {
91 int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
92 int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
93 void (*set_rxon_chain)(struct iwl_priv *priv,
94 struct iwl_rxon_context *ctx);
95};
96
97struct iwl_hcmd_utils_ops {
98 u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
99 u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
100 u8 *data);
101 int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
102 void (*post_scan)(struct iwl_priv *priv);
103};
104
105struct iwl_apm_ops {
106 int (*init)(struct iwl_priv *priv);
107 void (*config)(struct iwl_priv *priv);
108};
109
110struct iwl_debugfs_ops {
111 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
112 size_t count, loff_t *ppos);
113 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
114 size_t count, loff_t *ppos);
115 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
116 size_t count, loff_t *ppos);
117};
118
119struct iwl_temp_ops {
120 void (*temperature)(struct iwl_priv *priv);
121};
122
123struct iwl_lib_ops {
124 /* set hw dependent parameters */
125 int (*set_hw_params)(struct iwl_priv *priv);
126 /* Handling TX */
127 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
128 struct iwl_tx_queue *txq,
129 u16 byte_cnt);
130 int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
131 struct iwl_tx_queue *txq,
132 dma_addr_t addr,
133 u16 len, u8 reset, u8 pad);
134 void (*txq_free_tfd)(struct iwl_priv *priv,
135 struct iwl_tx_queue *txq);
136 int (*txq_init)(struct iwl_priv *priv,
137 struct iwl_tx_queue *txq);
138 /* setup Rx handler */
139 void (*rx_handler_setup)(struct iwl_priv *priv);
140 /* alive notification after init uCode load */
141 void (*init_alive_start)(struct iwl_priv *priv);
142 /* check validity of rtc data address */
143 int (*is_valid_rtc_data_addr)(u32 addr);
144 /* 1st ucode load */
145 int (*load_ucode)(struct iwl_priv *priv);
146
147 void (*dump_nic_error_log)(struct iwl_priv *priv);
148 int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
149 int (*set_channel_switch)(struct iwl_priv *priv,
150 struct ieee80211_channel_switch *ch_switch);
151 /* power management */
152 struct iwl_apm_ops apm_ops;
153
154 /* power */
155 int (*send_tx_power) (struct iwl_priv *priv);
156 void (*update_chain_flags)(struct iwl_priv *priv);
157
158 /* eeprom operations (as defined in iwl-eeprom.h) */
159 struct iwl_eeprom_ops eeprom_ops;
160
161 /* temperature */
162 struct iwl_temp_ops temp_ops;
163
164 struct iwl_debugfs_ops debugfs_ops;
165
166};
167
168struct iwl_led_ops {
169 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
170};
171
172struct iwl_legacy_ops {
173 void (*post_associate)(struct iwl_priv *priv);
174 void (*config_ap)(struct iwl_priv *priv);
175 /* station management */
176 int (*update_bcast_stations)(struct iwl_priv *priv);
177 int (*manage_ibss_station)(struct iwl_priv *priv,
178 struct ieee80211_vif *vif, bool add);
179};
180
181struct iwl_ops {
182 const struct iwl_lib_ops *lib;
183 const struct iwl_hcmd_ops *hcmd;
184 const struct iwl_hcmd_utils_ops *utils;
185 const struct iwl_led_ops *led;
186 const struct iwl_nic_ops *nic;
187 const struct iwl_legacy_ops *legacy;
188 const struct ieee80211_ops *ieee80211_ops;
189};
190
191struct iwl_mod_params {
192 int sw_crypto; /* def: 0 = using hardware encryption */
193 int disable_hw_scan; /* def: 0 = use h/w scan */
194 int num_of_queues; /* def: HW dependent */
195 int disable_11n; /* def: 0 = 11n capabilities enabled */
196 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
197 int antenna; /* def: 0 = both antennas (use diversity) */
198 int restart_fw; /* def: 1 = restart firmware */
199};
200
201/*
202 * @led_compensation: compensate on the led on/off time per HW according
203 * to the deviation to achieve the desired led frequency.
204 * The detail algorithm is described in iwl-led.c
205 * @chain_noise_num_beacons: number of beacons used to compute chain noise
206 * @wd_timeout: TX queues watchdog timeout
207 * @temperature_kelvin: temperature report by uCode in kelvin
208 * @ucode_tracing: support ucode continuous tracing
209 * @sensitivity_calib_by_driver: driver has the capability to perform
210 * sensitivity calibration operation
211 * @chain_noise_calib_by_driver: driver has the capability to perform
212 * chain noise calibration operation
213 */
214struct iwl_base_params {
215 int eeprom_size;
216 int num_of_queues; /* def: HW dependent */
217 int num_of_ampdu_queues;/* def: HW dependent */
218 /* for iwl_legacy_apm_init() */
219 u32 pll_cfg_val;
220 bool set_l0s;
221 bool use_bsm;
222
223 u16 led_compensation;
224 int chain_noise_num_beacons;
225 unsigned int wd_timeout;
226 bool temperature_kelvin;
227 const bool ucode_tracing;
228 const bool sensitivity_calib_by_driver;
229 const bool chain_noise_calib_by_driver;
230};
231
232/**
233 * struct iwl_cfg
234 * @fw_name_pre: Firmware filename prefix. The api version and extension
235 * (.ucode) will be added to filename before loading from disk. The
236 * filename is constructed as fw_name_pre<api>.ucode.
237 * @ucode_api_max: Highest version of uCode API supported by driver.
238 * @ucode_api_min: Lowest version of uCode API supported by driver.
239 * @scan_antennas: available antenna for scan operation
240 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
241 *
242 * We enable the driver to be backward compatible wrt API version. The
243 * driver specifies which APIs it supports (with @ucode_api_max being the
244 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
245 * it has a supported API version. The firmware's API version will be
246 * stored in @iwl_priv, enabling the driver to make runtime changes based
247 * on firmware version used.
248 *
249 * For example,
250 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
251 * Driver interacts with Firmware API version >= 2.
252 * } else {
253 * Driver interacts with Firmware API version 1.
254 * }
255 *
256 * The ideal usage of this infrastructure is to treat a new ucode API
257 * release as a new hardware revision. That is, through utilizing the
258 * iwl_hcmd_utils_ops etc. we accommodate different command structures
259 * and flows between hardware versions as well as their API
260 * versions.
261 *
262 */
263struct iwl_cfg {
264 /* params specific to an individual device within a device family */
265 const char *name;
266 const char *fw_name_pre;
267 const unsigned int ucode_api_max;
268 const unsigned int ucode_api_min;
269 u8 valid_tx_ant;
270 u8 valid_rx_ant;
271 unsigned int sku;
272 u16 eeprom_ver;
273 u16 eeprom_calib_ver;
274 const struct iwl_ops *ops;
275 /* module based parameters which can be set from modprobe cmd */
276 const struct iwl_mod_params *mod_params;
277 /* params not likely to change within a device family */
278 struct iwl_base_params *base_params;
279 /* params likely to change within a device family */
280 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
281 enum iwl_led_mode led_mode;
282};
283
284/***************************
285 * L i b *
286 ***************************/
287
288struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
289int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
290 struct ieee80211_vif *vif, u16 queue,
291 const struct ieee80211_tx_queue_params *params);
292int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
293void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
294 struct iwl_rxon_context *ctx,
295 int hw_decrypt);
296int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
297 struct iwl_rxon_context *ctx);
298int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
299 struct iwl_rxon_context *ctx);
300int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
301 struct ieee80211_channel *ch,
302 struct iwl_rxon_context *ctx);
303void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
304 struct iwl_rxon_context *ctx,
305 enum ieee80211_band band,
306 struct ieee80211_vif *vif);
307u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
308 enum ieee80211_band band);
309void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
310 struct iwl_ht_config *ht_conf);
311bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
312 struct iwl_rxon_context *ctx,
313 struct ieee80211_sta_ht_cap *ht_cap);
314void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
315 struct iwl_rxon_context *ctx);
316void iwl_legacy_set_rate(struct iwl_priv *priv);
317int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
318 struct ieee80211_hdr *hdr,
319 u32 decrypt_res,
320 struct ieee80211_rx_status *stats);
321void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
322int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
323 struct ieee80211_vif *vif);
324void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
325 struct ieee80211_vif *vif);
326int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
327 struct ieee80211_vif *vif,
328 enum nl80211_iftype newtype, bool newp2p);
329int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
330void iwl_legacy_txq_mem(struct iwl_priv *priv);
331
332#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
333int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
334void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
335void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
336void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
337 u16 length, struct ieee80211_hdr *header);
338void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
339 u16 length, struct ieee80211_hdr *header);
340const char *iwl_legacy_get_mgmt_string(int cmd);
341const char *iwl_legacy_get_ctrl_string(int cmd);
342void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
343void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
344 u16 len);
345#else
346static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
347{
348 return 0;
349}
350static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
351{
352}
353static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
354{
355}
356static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
357 u16 length, struct ieee80211_hdr *header)
358{
359}
360static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
361 u16 length, struct ieee80211_hdr *header)
362{
363}
364static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
365 __le16 fc, u16 len)
366{
367}
368#endif
369/*****************************************************
370 * RX handlers.
371 * **************************************************/
372void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
373 struct iwl_rx_mem_buffer *rxb);
374void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
375 struct iwl_rx_mem_buffer *rxb);
376void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
377 struct iwl_rx_mem_buffer *rxb);
378
379/*****************************************************
380* RX
381******************************************************/
382void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
383void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
384int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
385void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
386 struct iwl_rx_queue *q);
387int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
388void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
389 struct iwl_rx_mem_buffer *rxb);
390/* Handlers */
391void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
392 struct iwl_rx_mem_buffer *rxb);
393void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
394 struct iwl_rx_packet *pkt);
395void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
396void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
397
398/* TX helpers */
399
400/*****************************************************
401* TX
402******************************************************/
403void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
404 struct iwl_tx_queue *txq);
405int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
406 int slots_num, u32 txq_id);
407void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
408 struct iwl_tx_queue *txq,
409 int slots_num, u32 txq_id);
410void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
411void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
412void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
413/*****************************************************
414 * TX power
415 ****************************************************/
416int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
417
418/*******************************************************************************
419 * Rate
420 ******************************************************************************/
421
422u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
423 struct iwl_rxon_context *ctx);
424
425/*******************************************************************************
426 * Scanning
427 ******************************************************************************/
428void iwl_legacy_init_scan_params(struct iwl_priv *priv);
429int iwl_legacy_scan_cancel(struct iwl_priv *priv);
430int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
431void iwl_legacy_force_scan_end(struct iwl_priv *priv);
432int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
433 struct ieee80211_vif *vif,
434 struct cfg80211_scan_request *req);
435void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
436int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
437u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
438 struct ieee80211_mgmt *frame,
439 const u8 *ta, const u8 *ie, int ie_len, int left);
440void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
441u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
442 enum ieee80211_band band,
443 u8 n_probes);
444u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
445 enum ieee80211_band band,
446 struct ieee80211_vif *vif);
447void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
448void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
449
450/* For faster active scanning, scan will move to the next channel if fewer than
451 * PLCP_QUIET_THRESH packets are heard on this channel within
452 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
453 * time if it's a quiet channel (nothing responded to our probe, and there's
454 * no other traffic).
455 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
456#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
457#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
458
459#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
460
461/*****************************************************
462 * S e n d i n g H o s t C o m m a n d s *
463 *****************************************************/
464
465const char *iwl_legacy_get_cmd_string(u8 cmd);
466int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
467 struct iwl_host_cmd *cmd);
468int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
469int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
470 u16 len, const void *data);
471int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
472 const void *data,
473 void (*callback)(struct iwl_priv *priv,
474 struct iwl_device_cmd *cmd,
475 struct iwl_rx_packet *pkt));
476
477int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
478
479
480/*****************************************************
481 * PCI *
482 *****************************************************/
483
484static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
485{
486 int pos;
487 u16 pci_lnk_ctl;
488 pos = pci_pcie_cap(priv->pci_dev);
489 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
490 return pci_lnk_ctl;
491}
492
493void iwl_legacy_bg_watchdog(unsigned long data);
494u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
495 u32 usec, u32 beacon_interval);
496__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
497 u32 addon, u32 beacon_interval);
498
499#ifdef CONFIG_PM
500int iwl_legacy_pci_suspend(struct device *device);
501int iwl_legacy_pci_resume(struct device *device);
502extern const struct dev_pm_ops iwl_legacy_pm_ops;
503
504#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
505
506#else /* !CONFIG_PM */
507
508#define IWL_LEGACY_PM_OPS NULL
509
510#endif /* !CONFIG_PM */
511
512/*****************************************************
513* Error Handling Debugging
514******************************************************/
515void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
516#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
517void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
518 struct iwl_rxon_context *ctx);
519#else
520static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
521 struct iwl_rxon_context *ctx)
522{
523}
524#endif
525
526void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
527
528/*****************************************************
529* GEOS
530******************************************************/
531int iwl_legacy_init_geos(struct iwl_priv *priv);
532void iwl_legacy_free_geos(struct iwl_priv *priv);
533
534/*************** DRIVER STATUS FUNCTIONS *****/
535
536#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
537/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
538#define STATUS_INT_ENABLED 2
539#define STATUS_RF_KILL_HW 3
540#define STATUS_CT_KILL 4
541#define STATUS_INIT 5
542#define STATUS_ALIVE 6
543#define STATUS_READY 7
544#define STATUS_TEMPERATURE 8
545#define STATUS_GEO_CONFIGURED 9
546#define STATUS_EXIT_PENDING 10
547#define STATUS_STATISTICS 12
548#define STATUS_SCANNING 13
549#define STATUS_SCAN_ABORTING 14
550#define STATUS_SCAN_HW 15
551#define STATUS_POWER_PMI 16
552#define STATUS_FW_ERROR 17
553#define STATUS_CHANNEL_SWITCH_PENDING 18
554
555static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
556{
557 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
558 * set but EXIT_PENDING is not */
559 return test_bit(STATUS_READY, &priv->status) &&
560 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
561 !test_bit(STATUS_EXIT_PENDING, &priv->status);
562}
563
564static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
565{
566 return test_bit(STATUS_ALIVE, &priv->status);
567}
568
569static inline int iwl_legacy_is_init(struct iwl_priv *priv)
570{
571 return test_bit(STATUS_INIT, &priv->status);
572}
573
574static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
575{
576 return test_bit(STATUS_RF_KILL_HW, &priv->status);
577}
578
579static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
580{
581 return iwl_legacy_is_rfkill_hw(priv);
582}
583
584static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
585{
586 return test_bit(STATUS_CT_KILL, &priv->status);
587}
588
589static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
590{
591
592 if (iwl_legacy_is_rfkill(priv))
593 return 0;
594
595 return iwl_legacy_is_ready(priv);
596}
597
598extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
599extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
600 u8 flags, bool clear);
601void iwl_legacy_apm_stop(struct iwl_priv *priv);
602int iwl_legacy_apm_init(struct iwl_priv *priv);
603
604int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
605 struct iwl_rxon_context *ctx);
606static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
607 struct iwl_rxon_context *ctx)
608{
609 return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
610}
611static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
612 struct iwl_rxon_context *ctx)
613{
614 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
615}
616static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
617 struct iwl_priv *priv, enum ieee80211_band band)
618{
619 return priv->hw->wiphy->bands[band];
620}
621
622/* mac80211 handlers */
623int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
624void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
625 struct ieee80211_vif *vif);
626void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
627 struct ieee80211_vif *vif,
628 struct ieee80211_bss_conf *bss_conf,
629 u32 changes);
630void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
631 struct ieee80211_tx_info *info,
632 __le16 fc, __le32 *tx_flags);
633
634irqreturn_t iwl_legacy_isr(int irq, void *data);
635
636#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112701bf..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_debug_h__
30#define __iwl_legacy_debug_h__
31
32struct iwl_priv;
33extern u32 iwlegacy_debug_level;
34
35#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
36#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
39
40#define iwl_print_hex_error(priv, p, len) \
41do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0)
45
46#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \
48do { \
49 if (iwl_legacy_get_debug_level(__priv) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \
53} while (0)
54
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
56do { \
57 if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \
61} while (0)
62
63#define iwl_print_hex_dump(priv, level, p, len) \
64do { \
65 if (iwl_legacy_get_debug_level(priv) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0)
69
70#else
71#define IWL_DEBUG(__priv, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
77
78#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
79int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
81#else
82static inline int
83iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
84{
85 return 0;
86}
87static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
88{
89}
90#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
91
92/*
93 * To use the debug system:
94 *
95 * If you are defining a new debug classification, simply add it to the #define
96 * list here in the form of
97 *
98 * #define IWL_DL_xxxx VALUE
99 *
100 * where xxxx should be the name of the classification (for example, WEP).
101 *
102 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
103 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
104 * to send output to that classification.
105 *
106 * The active debug levels can be accessed via files
107 *
108 * /sys/module/iwl4965/parameters/debug{50}
109 * /sys/module/iwl3945/parameters/debug
110 * /sys/class/net/wlan0/device/debug_level
111 *
112 * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
113 */
114
115/* 0x0000000F - 0x00000001 */
116#define IWL_DL_INFO (1 << 0)
117#define IWL_DL_MAC80211 (1 << 1)
118#define IWL_DL_HCMD (1 << 2)
119#define IWL_DL_STATE (1 << 3)
120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
124#define IWL_DL_RADIO (1 << 7)
125/* 0x00000F00 - 0x00000100 */
126#define IWL_DL_POWER (1 << 8)
127#define IWL_DL_TEMP (1 << 9)
128#define IWL_DL_NOTIF (1 << 10)
129#define IWL_DL_SCAN (1 << 11)
130/* 0x0000F000 - 0x00001000 */
131#define IWL_DL_ASSOC (1 << 12)
132#define IWL_DL_DROP (1 << 13)
133#define IWL_DL_TXPOWER (1 << 14)
134#define IWL_DL_AP (1 << 15)
135/* 0x000F0000 - 0x00010000 */
136#define IWL_DL_FW (1 << 16)
137#define IWL_DL_RF_KILL (1 << 17)
138#define IWL_DL_FW_ERRORS (1 << 18)
139#define IWL_DL_LED (1 << 19)
140/* 0x00F00000 - 0x00100000 */
141#define IWL_DL_RATE (1 << 20)
142#define IWL_DL_CALIB (1 << 21)
143#define IWL_DL_WEP (1 << 22)
144#define IWL_DL_TX (1 << 23)
145/* 0x0F000000 - 0x01000000 */
146#define IWL_DL_RX (1 << 24)
147#define IWL_DL_ISR (1 << 25)
148#define IWL_DL_HT (1 << 26)
149#define IWL_DL_IO (1 << 27)
150/* 0xF0000000 - 0x10000000 */
151#define IWL_DL_11H (1 << 28)
152#define IWL_DL_STATS (1 << 29)
153#define IWL_DL_TX_REPLY (1 << 30)
154#define IWL_DL_QOS (1 << 31)
155
156#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
157#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
158#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
159#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
160#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
161#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
162#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
163#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
164#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
172#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
173#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
174 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
175#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
176#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
177#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
178#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
179#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
180 IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
181#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
182#define IWL_DEBUG_ASSOC(p, f, a...) \
183 IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
184#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
185 IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
186#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
187#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
188#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
189 IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
191#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
192 IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
194#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
195#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
196#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
197
198#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 996996a71657..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1313 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/ieee80211.h>
29#include <net/mac80211.h>
30
31
32#include "iwl-dev.h"
33#include "iwl-debug.h"
34#include "iwl-core.h"
35#include "iwl-io.h"
36
37/* create and remove of files */
38#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
39 if (!debugfs_create_file(#name, mode, parent, priv, \
40 &iwl_legacy_dbgfs_##name##_ops)) \
41 goto err; \
42} while (0)
43
44#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
45 struct dentry *__tmp; \
46 __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
47 parent, ptr); \
48 if (IS_ERR(__tmp) || !__tmp) \
49 goto err; \
50} while (0)
51
52#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
53 struct dentry *__tmp; \
54 __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
55 parent, ptr); \
56 if (IS_ERR(__tmp) || !__tmp) \
57 goto err; \
58} while (0)
59
60/* file operation */
61#define DEBUGFS_READ_FUNC(name) \
62static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
63 char __user *user_buf, \
64 size_t count, loff_t *ppos);
65
66#define DEBUGFS_WRITE_FUNC(name) \
67static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
68 const char __user *user_buf, \
69 size_t count, loff_t *ppos);
70
71
72static int
73iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
74{
75 file->private_data = inode->i_private;
76 return 0;
77}
78
79#define DEBUGFS_READ_FILE_OPS(name) \
80 DEBUGFS_READ_FUNC(name); \
81static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
82 .read = iwl_legacy_dbgfs_##name##_read, \
83 .open = iwl_legacy_dbgfs_open_file_generic, \
84 .llseek = generic_file_llseek, \
85};
86
87#define DEBUGFS_WRITE_FILE_OPS(name) \
88 DEBUGFS_WRITE_FUNC(name); \
89static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
90 .write = iwl_legacy_dbgfs_##name##_write, \
91 .open = iwl_legacy_dbgfs_open_file_generic, \
92 .llseek = generic_file_llseek, \
93};
94
95#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
96 DEBUGFS_READ_FUNC(name); \
97 DEBUGFS_WRITE_FUNC(name); \
98static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
99 .write = iwl_legacy_dbgfs_##name##_write, \
100 .read = iwl_legacy_dbgfs_##name##_read, \
101 .open = iwl_legacy_dbgfs_open_file_generic, \
102 .llseek = generic_file_llseek, \
103};
104
105static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
106 char __user *user_buf,
107 size_t count, loff_t *ppos) {
108
109 struct iwl_priv *priv = file->private_data;
110 char *buf;
111 int pos = 0;
112
113 int cnt;
114 ssize_t ret;
115 const size_t bufsz = 100 +
116 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
117 buf = kzalloc(bufsz, GFP_KERNEL);
118 if (!buf)
119 return -ENOMEM;
120 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
121 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
122 pos += scnprintf(buf + pos, bufsz - pos,
123 "\t%25s\t\t: %u\n",
124 iwl_legacy_get_mgmt_string(cnt),
125 priv->tx_stats.mgmt[cnt]);
126 }
127 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
128 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
129 pos += scnprintf(buf + pos, bufsz - pos,
130 "\t%25s\t\t: %u\n",
131 iwl_legacy_get_ctrl_string(cnt),
132 priv->tx_stats.ctrl[cnt]);
133 }
134 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
135 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
136 priv->tx_stats.data_cnt);
137 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
138 priv->tx_stats.data_bytes);
139 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
140 kfree(buf);
141 return ret;
142}
143
144static ssize_t
145iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
146 const char __user *user_buf,
147 size_t count, loff_t *ppos)
148{
149 struct iwl_priv *priv = file->private_data;
150 u32 clear_flag;
151 char buf[8];
152 int buf_size;
153
154 memset(buf, 0, sizeof(buf));
155 buf_size = min(count, sizeof(buf) - 1);
156 if (copy_from_user(buf, user_buf, buf_size))
157 return -EFAULT;
158 if (sscanf(buf, "%x", &clear_flag) != 1)
159 return -EFAULT;
160 iwl_legacy_clear_traffic_stats(priv);
161
162 return count;
163}
164
165static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
166 char __user *user_buf,
167 size_t count, loff_t *ppos) {
168
169 struct iwl_priv *priv = file->private_data;
170 char *buf;
171 int pos = 0;
172 int cnt;
173 ssize_t ret;
174 const size_t bufsz = 100 +
175 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
176 buf = kzalloc(bufsz, GFP_KERNEL);
177 if (!buf)
178 return -ENOMEM;
179
180 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
181 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
182 pos += scnprintf(buf + pos, bufsz - pos,
183 "\t%25s\t\t: %u\n",
184 iwl_legacy_get_mgmt_string(cnt),
185 priv->rx_stats.mgmt[cnt]);
186 }
187 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
188 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
189 pos += scnprintf(buf + pos, bufsz - pos,
190 "\t%25s\t\t: %u\n",
191 iwl_legacy_get_ctrl_string(cnt),
192 priv->rx_stats.ctrl[cnt]);
193 }
194 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
195 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
196 priv->rx_stats.data_cnt);
197 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
198 priv->rx_stats.data_bytes);
199
200 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
201 kfree(buf);
202 return ret;
203}
204
205#define BYTE1_MASK 0x000000ff;
206#define BYTE2_MASK 0x0000ffff;
207#define BYTE3_MASK 0x00ffffff;
208static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
209 char __user *user_buf,
210 size_t count, loff_t *ppos)
211{
212 u32 val;
213 char *buf;
214 ssize_t ret;
215 int i;
216 int pos = 0;
217 struct iwl_priv *priv = file->private_data;
218 size_t bufsz;
219
220 /* default is to dump the entire data segment */
221 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
222 priv->dbgfs_sram_offset = 0x800000;
223 if (priv->ucode_type == UCODE_INIT)
224 priv->dbgfs_sram_len = priv->ucode_init_data.len;
225 else
226 priv->dbgfs_sram_len = priv->ucode_data.len;
227 }
228 bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
229 buf = kmalloc(bufsz, GFP_KERNEL);
230 if (!buf)
231 return -ENOMEM;
232 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
233 priv->dbgfs_sram_len);
234 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
235 priv->dbgfs_sram_offset);
236 for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
237 val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
238 priv->dbgfs_sram_len - i);
239 if (i < 4) {
240 switch (i) {
241 case 1:
242 val &= BYTE1_MASK;
243 break;
244 case 2:
245 val &= BYTE2_MASK;
246 break;
247 case 3:
248 val &= BYTE3_MASK;
249 break;
250 }
251 }
252 if (!(i % 16))
253 pos += scnprintf(buf + pos, bufsz - pos, "\n");
254 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
255 }
256 pos += scnprintf(buf + pos, bufsz - pos, "\n");
257
258 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
259 kfree(buf);
260 return ret;
261}
262
263static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
264 const char __user *user_buf,
265 size_t count, loff_t *ppos)
266{
267 struct iwl_priv *priv = file->private_data;
268 char buf[64];
269 int buf_size;
270 u32 offset, len;
271
272 memset(buf, 0, sizeof(buf));
273 buf_size = min(count, sizeof(buf) - 1);
274 if (copy_from_user(buf, user_buf, buf_size))
275 return -EFAULT;
276
277 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
278 priv->dbgfs_sram_offset = offset;
279 priv->dbgfs_sram_len = len;
280 } else {
281 priv->dbgfs_sram_offset = 0;
282 priv->dbgfs_sram_len = 0;
283 }
284
285 return count;
286}
287
288static ssize_t
289iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
290 size_t count, loff_t *ppos)
291{
292 struct iwl_priv *priv = file->private_data;
293 struct iwl_station_entry *station;
294 int max_sta = priv->hw_params.max_stations;
295 char *buf;
296 int i, j, pos = 0;
297 ssize_t ret;
298 /* Add 30 for initial string */
299 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
300
301 buf = kmalloc(bufsz, GFP_KERNEL);
302 if (!buf)
303 return -ENOMEM;
304
305 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
306 priv->num_stations);
307
308 for (i = 0; i < max_sta; i++) {
309 station = &priv->stations[i];
310 if (!station->used)
311 continue;
312 pos += scnprintf(buf + pos, bufsz - pos,
313 "station %d - addr: %pM, flags: %#x\n",
314 i, station->sta.sta.addr,
315 station->sta.station_flags_msk);
316 pos += scnprintf(buf + pos, bufsz - pos,
317 "TID\tseq_num\ttxq_id\tframes\ttfds\t");
318 pos += scnprintf(buf + pos, bufsz - pos,
319 "start_idx\tbitmap\t\t\trate_n_flags\n");
320
321 for (j = 0; j < MAX_TID_COUNT; j++) {
322 pos += scnprintf(buf + pos, bufsz - pos,
323 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
324 j, station->tid[j].seq_number,
325 station->tid[j].agg.txq_id,
326 station->tid[j].agg.frame_count,
327 station->tid[j].tfds_in_queue,
328 station->tid[j].agg.start_idx,
329 station->tid[j].agg.bitmap,
330 station->tid[j].agg.rate_n_flags);
331
332 if (station->tid[j].agg.wait_for_ba)
333 pos += scnprintf(buf + pos, bufsz - pos,
334 " - waitforba");
335 pos += scnprintf(buf + pos, bufsz - pos, "\n");
336 }
337
338 pos += scnprintf(buf + pos, bufsz - pos, "\n");
339 }
340
341 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
342 kfree(buf);
343 return ret;
344}
345
346static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
347 char __user *user_buf,
348 size_t count,
349 loff_t *ppos)
350{
351 ssize_t ret;
352 struct iwl_priv *priv = file->private_data;
353 int pos = 0, ofs = 0, buf_size = 0;
354 const u8 *ptr;
355 char *buf;
356 u16 eeprom_ver;
357 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
358 buf_size = 4 * eeprom_len + 256;
359
360 if (eeprom_len % 16) {
361 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
362 return -ENODATA;
363 }
364
365 ptr = priv->eeprom;
366 if (!ptr) {
367 IWL_ERR(priv, "Invalid EEPROM memory\n");
368 return -ENOMEM;
369 }
370
371 /* 4 characters for byte 0xYY */
372 buf = kzalloc(buf_size, GFP_KERNEL);
373 if (!buf) {
374 IWL_ERR(priv, "Can not allocate Buffer\n");
375 return -ENOMEM;
376 }
377 eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
378 pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
379 "version: 0x%x\n", eeprom_ver);
380 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
381 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
382 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
383 buf_size - pos, 0);
384 pos += strlen(buf + pos);
385 if (buf_size - pos > 0)
386 buf[pos++] = '\n';
387 }
388
389 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
390 kfree(buf);
391 return ret;
392}
393
394static ssize_t
395iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
396 size_t count, loff_t *ppos)
397{
398 struct iwl_priv *priv = file->private_data;
399 struct ieee80211_channel *channels = NULL;
400 const struct ieee80211_supported_band *supp_band = NULL;
401 int pos = 0, i, bufsz = PAGE_SIZE;
402 char *buf;
403 ssize_t ret;
404
405 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
406 return -EAGAIN;
407
408 buf = kzalloc(bufsz, GFP_KERNEL);
409 if (!buf) {
410 IWL_ERR(priv, "Can not allocate Buffer\n");
411 return -ENOMEM;
412 }
413
414 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
415 if (supp_band) {
416 channels = supp_band->channels;
417
418 pos += scnprintf(buf + pos, bufsz - pos,
419 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
420 supp_band->n_channels);
421
422 for (i = 0; i < supp_band->n_channels; i++)
423 pos += scnprintf(buf + pos, bufsz - pos,
424 "%d: %ddBm: BSS%s%s, %s.\n",
425 channels[i].hw_value,
426 channels[i].max_power,
427 channels[i].flags & IEEE80211_CHAN_RADAR ?
428 " (IEEE 802.11h required)" : "",
429 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
430 || (channels[i].flags &
431 IEEE80211_CHAN_RADAR)) ? "" :
432 ", IBSS",
433 channels[i].flags &
434 IEEE80211_CHAN_PASSIVE_SCAN ?
435 "passive only" : "active/passive");
436 }
437 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
438 if (supp_band) {
439 channels = supp_band->channels;
440
441 pos += scnprintf(buf + pos, bufsz - pos,
442 "Displaying %d channels in 5.2GHz band (802.11a)\n",
443 supp_band->n_channels);
444
445 for (i = 0; i < supp_band->n_channels; i++)
446 pos += scnprintf(buf + pos, bufsz - pos,
447 "%d: %ddBm: BSS%s%s, %s.\n",
448 channels[i].hw_value,
449 channels[i].max_power,
450 channels[i].flags & IEEE80211_CHAN_RADAR ?
451 " (IEEE 802.11h required)" : "",
452 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
453 || (channels[i].flags &
454 IEEE80211_CHAN_RADAR)) ? "" :
455 ", IBSS",
456 channels[i].flags &
457 IEEE80211_CHAN_PASSIVE_SCAN ?
458 "passive only" : "active/passive");
459 }
460 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
461 kfree(buf);
462 return ret;
463}
464
465static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
466 char __user *user_buf,
467 size_t count, loff_t *ppos) {
468
469 struct iwl_priv *priv = file->private_data;
470 char buf[512];
471 int pos = 0;
472 const size_t bufsz = sizeof(buf);
473
474 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
475 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
476 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
477 test_bit(STATUS_INT_ENABLED, &priv->status));
478 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
479 test_bit(STATUS_RF_KILL_HW, &priv->status));
480 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
481 test_bit(STATUS_CT_KILL, &priv->status));
482 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
483 test_bit(STATUS_INIT, &priv->status));
484 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
485 test_bit(STATUS_ALIVE, &priv->status));
486 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
487 test_bit(STATUS_READY, &priv->status));
488 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
489 test_bit(STATUS_TEMPERATURE, &priv->status));
490 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
491 test_bit(STATUS_GEO_CONFIGURED, &priv->status));
492 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
493 test_bit(STATUS_EXIT_PENDING, &priv->status));
494 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
495 test_bit(STATUS_STATISTICS, &priv->status));
496 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
497 test_bit(STATUS_SCANNING, &priv->status));
498 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
499 test_bit(STATUS_SCAN_ABORTING, &priv->status));
500 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
501 test_bit(STATUS_SCAN_HW, &priv->status));
502 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
503 test_bit(STATUS_POWER_PMI, &priv->status));
504 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
505 test_bit(STATUS_FW_ERROR, &priv->status));
506 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
507}
508
509static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
510 char __user *user_buf,
511 size_t count, loff_t *ppos) {
512
513 struct iwl_priv *priv = file->private_data;
514 int pos = 0;
515 int cnt = 0;
516 char *buf;
517 int bufsz = 24 * 64; /* 24 items * 64 char per item */
518 ssize_t ret;
519
520 buf = kzalloc(bufsz, GFP_KERNEL);
521 if (!buf) {
522 IWL_ERR(priv, "Can not allocate Buffer\n");
523 return -ENOMEM;
524 }
525
526 pos += scnprintf(buf + pos, bufsz - pos,
527 "Interrupt Statistics Report:\n");
528
529 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
530 priv->isr_stats.hw);
531 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
532 priv->isr_stats.sw);
533 if (priv->isr_stats.sw || priv->isr_stats.hw) {
534 pos += scnprintf(buf + pos, bufsz - pos,
535 "\tLast Restarting Code: 0x%X\n",
536 priv->isr_stats.err_code);
537 }
538#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
539 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
540 priv->isr_stats.sch);
541 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
542 priv->isr_stats.alive);
543#endif
544 pos += scnprintf(buf + pos, bufsz - pos,
545 "HW RF KILL switch toggled:\t %u\n",
546 priv->isr_stats.rfkill);
547
548 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
549 priv->isr_stats.ctkill);
550
551 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
552 priv->isr_stats.wakeup);
553
554 pos += scnprintf(buf + pos, bufsz - pos,
555 "Rx command responses:\t\t %u\n",
556 priv->isr_stats.rx);
557 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
558 if (priv->isr_stats.rx_handlers[cnt] > 0)
559 pos += scnprintf(buf + pos, bufsz - pos,
560 "\tRx handler[%36s]:\t\t %u\n",
561 iwl_legacy_get_cmd_string(cnt),
562 priv->isr_stats.rx_handlers[cnt]);
563 }
564
565 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
566 priv->isr_stats.tx);
567
568 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
569 priv->isr_stats.unhandled);
570
571 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
572 kfree(buf);
573 return ret;
574}
575
576static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
577 const char __user *user_buf,
578 size_t count, loff_t *ppos)
579{
580 struct iwl_priv *priv = file->private_data;
581 char buf[8];
582 int buf_size;
583 u32 reset_flag;
584
585 memset(buf, 0, sizeof(buf));
586 buf_size = min(count, sizeof(buf) - 1);
587 if (copy_from_user(buf, user_buf, buf_size))
588 return -EFAULT;
589 if (sscanf(buf, "%x", &reset_flag) != 1)
590 return -EFAULT;
591 if (reset_flag == 0)
592 iwl_legacy_clear_isr_stats(priv);
593
594 return count;
595}
596
597static ssize_t
598iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
599 size_t count, loff_t *ppos)
600{
601 struct iwl_priv *priv = file->private_data;
602 struct iwl_rxon_context *ctx;
603 int pos = 0, i;
604 char buf[256 * NUM_IWL_RXON_CTX];
605 const size_t bufsz = sizeof(buf);
606
607 for_each_context(priv, ctx) {
608 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
609 ctx->ctxid);
610 for (i = 0; i < AC_NUM; i++) {
611 pos += scnprintf(buf + pos, bufsz - pos,
612 "\tcw_min\tcw_max\taifsn\ttxop\n");
613 pos += scnprintf(buf + pos, bufsz - pos,
614 "AC[%d]\t%u\t%u\t%u\t%u\n", i,
615 ctx->qos_data.def_qos_parm.ac[i].cw_min,
616 ctx->qos_data.def_qos_parm.ac[i].cw_max,
617 ctx->qos_data.def_qos_parm.ac[i].aifsn,
618 ctx->qos_data.def_qos_parm.ac[i].edca_txop);
619 }
620 pos += scnprintf(buf + pos, bufsz - pos, "\n");
621 }
622 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
623}
624
625static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
626 const char __user *user_buf,
627 size_t count, loff_t *ppos)
628{
629 struct iwl_priv *priv = file->private_data;
630 char buf[8];
631 int buf_size;
632 int ht40;
633
634 memset(buf, 0, sizeof(buf));
635 buf_size = min(count, sizeof(buf) - 1);
636 if (copy_from_user(buf, user_buf, buf_size))
637 return -EFAULT;
638 if (sscanf(buf, "%d", &ht40) != 1)
639 return -EFAULT;
640 if (!iwl_legacy_is_any_associated(priv))
641 priv->disable_ht40 = ht40 ? true : false;
642 else {
643 IWL_ERR(priv, "Sta associated with AP - "
644 "Change to 40MHz channel support is not allowed\n");
645 return -EINVAL;
646 }
647
648 return count;
649}
650
651static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
652 char __user *user_buf,
653 size_t count, loff_t *ppos)
654{
655 struct iwl_priv *priv = file->private_data;
656 char buf[100];
657 int pos = 0;
658 const size_t bufsz = sizeof(buf);
659
660 pos += scnprintf(buf + pos, bufsz - pos,
661 "11n 40MHz Mode: %s\n",
662 priv->disable_ht40 ? "Disabled" : "Enabled");
663 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
664}
665
666DEBUGFS_READ_WRITE_FILE_OPS(sram);
667DEBUGFS_READ_FILE_OPS(nvm);
668DEBUGFS_READ_FILE_OPS(stations);
669DEBUGFS_READ_FILE_OPS(channels);
670DEBUGFS_READ_FILE_OPS(status);
671DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
672DEBUGFS_READ_FILE_OPS(qos);
673DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
674
675static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
676 char __user *user_buf,
677 size_t count, loff_t *ppos)
678{
679 struct iwl_priv *priv = file->private_data;
680 int pos = 0, ofs = 0;
681 int cnt = 0, entry;
682 struct iwl_tx_queue *txq;
683 struct iwl_queue *q;
684 struct iwl_rx_queue *rxq = &priv->rxq;
685 char *buf;
686 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
687 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
688 const u8 *ptr;
689 ssize_t ret;
690
691 if (!priv->txq) {
692 IWL_ERR(priv, "txq not ready\n");
693 return -EAGAIN;
694 }
695 buf = kzalloc(bufsz, GFP_KERNEL);
696 if (!buf) {
697 IWL_ERR(priv, "Can not allocate buffer\n");
698 return -ENOMEM;
699 }
700 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
701 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
702 txq = &priv->txq[cnt];
703 q = &txq->q;
704 pos += scnprintf(buf + pos, bufsz - pos,
705 "q[%d]: read_ptr: %u, write_ptr: %u\n",
706 cnt, q->read_ptr, q->write_ptr);
707 }
708 if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
709 ptr = priv->tx_traffic;
710 pos += scnprintf(buf + pos, bufsz - pos,
711 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
712 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
713 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
714 entry++, ofs += 16) {
715 pos += scnprintf(buf + pos, bufsz - pos,
716 "0x%.4x ", ofs);
717 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
718 buf + pos, bufsz - pos, 0);
719 pos += strlen(buf + pos);
720 if (bufsz - pos > 0)
721 buf[pos++] = '\n';
722 }
723 }
724 }
725
726 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
727 pos += scnprintf(buf + pos, bufsz - pos,
728 "read: %u, write: %u\n",
729 rxq->read, rxq->write);
730
731 if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
732 ptr = priv->rx_traffic;
733 pos += scnprintf(buf + pos, bufsz - pos,
734 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
735 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
736 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
737 entry++, ofs += 16) {
738 pos += scnprintf(buf + pos, bufsz - pos,
739 "0x%.4x ", ofs);
740 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
741 buf + pos, bufsz - pos, 0);
742 pos += strlen(buf + pos);
743 if (bufsz - pos > 0)
744 buf[pos++] = '\n';
745 }
746 }
747 }
748
749 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
750 kfree(buf);
751 return ret;
752}
753
754static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
755 const char __user *user_buf,
756 size_t count, loff_t *ppos)
757{
758 struct iwl_priv *priv = file->private_data;
759 char buf[8];
760 int buf_size;
761 int traffic_log;
762
763 memset(buf, 0, sizeof(buf));
764 buf_size = min(count, sizeof(buf) - 1);
765 if (copy_from_user(buf, user_buf, buf_size))
766 return -EFAULT;
767 if (sscanf(buf, "%d", &traffic_log) != 1)
768 return -EFAULT;
769 if (traffic_log == 0)
770 iwl_legacy_reset_traffic_log(priv);
771
772 return count;
773}
774
775static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
776 char __user *user_buf,
777 size_t count, loff_t *ppos) {
778
779 struct iwl_priv *priv = file->private_data;
780 struct iwl_tx_queue *txq;
781 struct iwl_queue *q;
782 char *buf;
783 int pos = 0;
784 int cnt;
785 int ret;
786 const size_t bufsz = sizeof(char) * 64 *
787 priv->cfg->base_params->num_of_queues;
788
789 if (!priv->txq) {
790 IWL_ERR(priv, "txq not ready\n");
791 return -EAGAIN;
792 }
793 buf = kzalloc(bufsz, GFP_KERNEL);
794 if (!buf)
795 return -ENOMEM;
796
797 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
798 txq = &priv->txq[cnt];
799 q = &txq->q;
800 pos += scnprintf(buf + pos, bufsz - pos,
801 "hwq %.2d: read=%u write=%u stop=%d"
802 " swq_id=%#.2x (ac %d/hwq %d)\n",
803 cnt, q->read_ptr, q->write_ptr,
804 !!test_bit(cnt, priv->queue_stopped),
805 txq->swq_id, txq->swq_id & 3,
806 (txq->swq_id >> 2) & 0x1f);
807 if (cnt >= 4)
808 continue;
809 /* for the ACs, display the stop count too */
810 pos += scnprintf(buf + pos, bufsz - pos,
811 " stop-count: %d\n",
812 atomic_read(&priv->queue_stop_count[cnt]));
813 }
814 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
815 kfree(buf);
816 return ret;
817}
818
819static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
820 char __user *user_buf,
821 size_t count, loff_t *ppos) {
822
823 struct iwl_priv *priv = file->private_data;
824 struct iwl_rx_queue *rxq = &priv->rxq;
825 char buf[256];
826 int pos = 0;
827 const size_t bufsz = sizeof(buf);
828
829 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
830 rxq->read);
831 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
832 rxq->write);
833 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
834 rxq->free_count);
835 if (rxq->rb_stts) {
836 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
837 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
838 } else {
839 pos += scnprintf(buf + pos, bufsz - pos,
840 "closed_rb_num: Not Allocated\n");
841 }
842 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
843}
844
845static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
846 char __user *user_buf,
847 size_t count, loff_t *ppos)
848{
849 struct iwl_priv *priv = file->private_data;
850 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
851 user_buf, count, ppos);
852}
853
854static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
855 char __user *user_buf,
856 size_t count, loff_t *ppos)
857{
858 struct iwl_priv *priv = file->private_data;
859 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
860 user_buf, count, ppos);
861}
862
863static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
864 char __user *user_buf,
865 size_t count, loff_t *ppos)
866{
867 struct iwl_priv *priv = file->private_data;
868 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
869 user_buf, count, ppos);
870}
871
872static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
873 char __user *user_buf,
874 size_t count, loff_t *ppos) {
875
876 struct iwl_priv *priv = file->private_data;
877 int pos = 0;
878 int cnt = 0;
879 char *buf;
880 int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
881 ssize_t ret;
882 struct iwl_sensitivity_data *data;
883
884 data = &priv->sensitivity_data;
885 buf = kzalloc(bufsz, GFP_KERNEL);
886 if (!buf) {
887 IWL_ERR(priv, "Can not allocate Buffer\n");
888 return -ENOMEM;
889 }
890
891 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
892 data->auto_corr_ofdm);
893 pos += scnprintf(buf + pos, bufsz - pos,
894 "auto_corr_ofdm_mrc:\t\t %u\n",
895 data->auto_corr_ofdm_mrc);
896 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
897 data->auto_corr_ofdm_x1);
898 pos += scnprintf(buf + pos, bufsz - pos,
899 "auto_corr_ofdm_mrc_x1:\t\t %u\n",
900 data->auto_corr_ofdm_mrc_x1);
901 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
902 data->auto_corr_cck);
903 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
904 data->auto_corr_cck_mrc);
905 pos += scnprintf(buf + pos, bufsz - pos,
906 "last_bad_plcp_cnt_ofdm:\t\t %u\n",
907 data->last_bad_plcp_cnt_ofdm);
908 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
909 data->last_fa_cnt_ofdm);
910 pos += scnprintf(buf + pos, bufsz - pos,
911 "last_bad_plcp_cnt_cck:\t\t %u\n",
912 data->last_bad_plcp_cnt_cck);
913 pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
914 data->last_fa_cnt_cck);
915 pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
916 data->nrg_curr_state);
917 pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
918 data->nrg_prev_state);
919 pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
920 for (cnt = 0; cnt < 10; cnt++) {
921 pos += scnprintf(buf + pos, bufsz - pos, " %u",
922 data->nrg_value[cnt]);
923 }
924 pos += scnprintf(buf + pos, bufsz - pos, "\n");
925 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
926 for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
927 pos += scnprintf(buf + pos, bufsz - pos, " %u",
928 data->nrg_silence_rssi[cnt]);
929 }
930 pos += scnprintf(buf + pos, bufsz - pos, "\n");
931 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
932 data->nrg_silence_ref);
933 pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
934 data->nrg_energy_idx);
935 pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
936 data->nrg_silence_idx);
937 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
938 data->nrg_th_cck);
939 pos += scnprintf(buf + pos, bufsz - pos,
940 "nrg_auto_corr_silence_diff:\t %u\n",
941 data->nrg_auto_corr_silence_diff);
942 pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
943 data->num_in_cck_no_fa);
944 pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
945 data->nrg_th_ofdm);
946
947 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
948 kfree(buf);
949 return ret;
950}
951
952
953static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
954 char __user *user_buf,
955 size_t count, loff_t *ppos) {
956
957 struct iwl_priv *priv = file->private_data;
958 int pos = 0;
959 int cnt = 0;
960 char *buf;
961 int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
962 ssize_t ret;
963 struct iwl_chain_noise_data *data;
964
965 data = &priv->chain_noise_data;
966 buf = kzalloc(bufsz, GFP_KERNEL);
967 if (!buf) {
968 IWL_ERR(priv, "Can not allocate Buffer\n");
969 return -ENOMEM;
970 }
971
972 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
973 data->active_chains);
974 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
975 data->chain_noise_a);
976 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
977 data->chain_noise_b);
978 pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
979 data->chain_noise_c);
980 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
981 data->chain_signal_a);
982 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
983 data->chain_signal_b);
984 pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
985 data->chain_signal_c);
986 pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
987 data->beacon_count);
988
989 pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
990 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
991 pos += scnprintf(buf + pos, bufsz - pos, " %u",
992 data->disconn_array[cnt]);
993 }
994 pos += scnprintf(buf + pos, bufsz - pos, "\n");
995 pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
996 for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
997 pos += scnprintf(buf + pos, bufsz - pos, " %u",
998 data->delta_gain_code[cnt]);
999 }
1000 pos += scnprintf(buf + pos, bufsz - pos, "\n");
1001 pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
1002 data->radio_write);
1003 pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
1004 data->state);
1005
1006 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1007 kfree(buf);
1008 return ret;
1009}
1010
1011static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
1012 char __user *user_buf,
1013 size_t count, loff_t *ppos)
1014{
1015 struct iwl_priv *priv = file->private_data;
1016 char buf[60];
1017 int pos = 0;
1018 const size_t bufsz = sizeof(buf);
1019 u32 pwrsave_status;
1020
1021 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1022 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1023
1024 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1025 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1026 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1027 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1028 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1029 "error");
1030
1031 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1032}
1033
1034static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
1035 const char __user *user_buf,
1036 size_t count, loff_t *ppos)
1037{
1038 struct iwl_priv *priv = file->private_data;
1039 char buf[8];
1040 int buf_size;
1041 int clear;
1042
1043 memset(buf, 0, sizeof(buf));
1044 buf_size = min(count, sizeof(buf) - 1);
1045 if (copy_from_user(buf, user_buf, buf_size))
1046 return -EFAULT;
1047 if (sscanf(buf, "%d", &clear) != 1)
1048 return -EFAULT;
1049
1050 /* make request to uCode to retrieve statistics information */
1051 mutex_lock(&priv->mutex);
1052 iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
1053 mutex_unlock(&priv->mutex);
1054
1055 return count;
1056}
1057
1058static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
1059 char __user *user_buf,
1060 size_t count, loff_t *ppos) {
1061
1062 struct iwl_priv *priv = file->private_data;
1063 int len = 0;
1064 char buf[20];
1065
1066 len = sprintf(buf, "0x%04X\n",
1067 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
1068 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1069}
1070
1071static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
1072 char __user *user_buf,
1073 size_t count, loff_t *ppos) {
1074
1075 struct iwl_priv *priv = file->private_data;
1076 int len = 0;
1077 char buf[20];
1078
1079 len = sprintf(buf, "0x%04X\n",
1080 le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
1081 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1082}
1083
1084static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
1085 char __user *user_buf,
1086 size_t count, loff_t *ppos)
1087{
1088 struct iwl_priv *priv = file->private_data;
1089 char *buf;
1090 int pos = 0;
1091 ssize_t ret = -EFAULT;
1092
1093 if (priv->cfg->ops->lib->dump_fh) {
1094 ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
1095 if (buf) {
1096 ret = simple_read_from_buffer(user_buf,
1097 count, ppos, buf, pos);
1098 kfree(buf);
1099 }
1100 }
1101
1102 return ret;
1103}
1104
1105static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
1106 char __user *user_buf,
1107 size_t count, loff_t *ppos) {
1108
1109 struct iwl_priv *priv = file->private_data;
1110 int pos = 0;
1111 char buf[12];
1112 const size_t bufsz = sizeof(buf);
1113
1114 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
1115 priv->missed_beacon_threshold);
1116
1117 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1118}
1119
1120static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
1121 const char __user *user_buf,
1122 size_t count, loff_t *ppos)
1123{
1124 struct iwl_priv *priv = file->private_data;
1125 char buf[8];
1126 int buf_size;
1127 int missed;
1128
1129 memset(buf, 0, sizeof(buf));
1130 buf_size = min(count, sizeof(buf) - 1);
1131 if (copy_from_user(buf, user_buf, buf_size))
1132 return -EFAULT;
1133 if (sscanf(buf, "%d", &missed) != 1)
1134 return -EINVAL;
1135
1136 if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
1137 missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
1138 priv->missed_beacon_threshold =
1139 IWL_MISSED_BEACON_THRESHOLD_DEF;
1140 else
1141 priv->missed_beacon_threshold = missed;
1142
1143 return count;
1144}
1145
1146static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
1147 char __user *user_buf,
1148 size_t count, loff_t *ppos) {
1149
1150 struct iwl_priv *priv = file->private_data;
1151 int pos = 0;
1152 char buf[300];
1153 const size_t bufsz = sizeof(buf);
1154 struct iwl_force_reset *force_reset;
1155
1156 force_reset = &priv->force_reset;
1157
1158 pos += scnprintf(buf + pos, bufsz - pos,
1159 "\tnumber of reset request: %d\n",
1160 force_reset->reset_request_count);
1161 pos += scnprintf(buf + pos, bufsz - pos,
1162 "\tnumber of reset request success: %d\n",
1163 force_reset->reset_success_count);
1164 pos += scnprintf(buf + pos, bufsz - pos,
1165 "\tnumber of reset request reject: %d\n",
1166 force_reset->reset_reject_count);
1167 pos += scnprintf(buf + pos, bufsz - pos,
1168 "\treset duration: %lu\n",
1169 force_reset->reset_duration);
1170
1171 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1172}
1173
1174static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
1175 const char __user *user_buf,
1176 size_t count, loff_t *ppos) {
1177
1178 int ret;
1179 struct iwl_priv *priv = file->private_data;
1180
1181 ret = iwl_legacy_force_reset(priv, true);
1182
1183 return ret ? ret : count;
1184}
1185
1186static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
1187 const char __user *user_buf,
1188 size_t count, loff_t *ppos) {
1189
1190 struct iwl_priv *priv = file->private_data;
1191 char buf[8];
1192 int buf_size;
1193 int timeout;
1194
1195 memset(buf, 0, sizeof(buf));
1196 buf_size = min(count, sizeof(buf) - 1);
1197 if (copy_from_user(buf, user_buf, buf_size))
1198 return -EFAULT;
1199 if (sscanf(buf, "%d", &timeout) != 1)
1200 return -EINVAL;
1201 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1202 timeout = IWL_DEF_WD_TIMEOUT;
1203
1204 priv->cfg->base_params->wd_timeout = timeout;
1205 iwl_legacy_setup_watchdog(priv);
1206 return count;
1207}
1208
1209DEBUGFS_READ_FILE_OPS(rx_statistics);
1210DEBUGFS_READ_FILE_OPS(tx_statistics);
1211DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1212DEBUGFS_READ_FILE_OPS(rx_queue);
1213DEBUGFS_READ_FILE_OPS(tx_queue);
1214DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
1215DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
1216DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1217DEBUGFS_READ_FILE_OPS(sensitivity);
1218DEBUGFS_READ_FILE_OPS(chain_noise);
1219DEBUGFS_READ_FILE_OPS(power_save_status);
1220DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1221DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1222DEBUGFS_READ_FILE_OPS(fh_reg);
1223DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
1224DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1225DEBUGFS_READ_FILE_OPS(rxon_flags);
1226DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1227DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1228
1229/*
1230 * Create the debugfs files and directories
1231 *
1232 */
1233int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
1234{
1235 struct dentry *phyd = priv->hw->wiphy->debugfsdir;
1236 struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
1237
1238 dir_drv = debugfs_create_dir(name, phyd);
1239 if (!dir_drv)
1240 return -ENOMEM;
1241
1242 priv->debugfs_dir = dir_drv;
1243
1244 dir_data = debugfs_create_dir("data", dir_drv);
1245 if (!dir_data)
1246 goto err;
1247 dir_rf = debugfs_create_dir("rf", dir_drv);
1248 if (!dir_rf)
1249 goto err;
1250 dir_debug = debugfs_create_dir("debug", dir_drv);
1251 if (!dir_debug)
1252 goto err;
1253
1254 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
1255 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
1256 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
1257 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
1258 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
1259 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
1260 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
1261 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
1262 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
1263 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
1264 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
1265 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
1266 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
1267 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
1268 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
1269 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
1270 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
1271 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
1272 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
1273 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
1274 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
1275 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
1276
1277 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1278 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1279 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1280 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1281 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1282 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1283 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1284 if (priv->cfg->base_params->sensitivity_calib_by_driver)
1285 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
1286 &priv->disable_sens_cal);
1287 if (priv->cfg->base_params->chain_noise_calib_by_driver)
1288 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1289 &priv->disable_chain_noise_cal);
1290 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
1291 &priv->disable_tx_power_cal);
1292 return 0;
1293
1294err:
1295 IWL_ERR(priv, "Can't create the debugfs directory\n");
1296 iwl_legacy_dbgfs_unregister(priv);
1297 return -ENOMEM;
1298}
1299EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
1300
1301/**
1302 * Remove the debugfs files and directories
1303 *
1304 */
1305void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
1306{
1307 if (!priv->debugfs_dir)
1308 return;
1309
1310 debugfs_remove_recursive(priv->debugfs_dir);
1311 priv->debugfs_dir = NULL;
1312}
1313EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786edf56fd..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26/*
27 * Please use this file (iwl-dev.h) for driver implementation definitions.
28 * Please use iwl-commands.h for uCode API definitions.
29 * Please use iwl-4965-hw.h for hardware-related definitions.
30 */
31
32#ifndef __iwl_legacy_dev_h__
33#define __iwl_legacy_dev_h__
34
35#include <linux/interrupt.h>
36#include <linux/pci.h> /* for struct pci_device_id */
37#include <linux/kernel.h>
38#include <linux/leds.h>
39#include <linux/wait.h>
40#include <net/ieee80211_radiotap.h>
41
42#include "iwl-eeprom.h"
43#include "iwl-csr.h"
44#include "iwl-prph.h"
45#include "iwl-fh.h"
46#include "iwl-debug.h"
47#include "iwl-4965-hw.h"
48#include "iwl-3945-hw.h"
49#include "iwl-led.h"
50#include "iwl-power.h"
51#include "iwl-legacy-rs.h"
52
53struct iwl_tx_queue;
54
55/* CT-KILL constants */
56#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
57
58/* Default noise level to report when noise measurement is not available.
59 * This may be because we're:
60 * 1) Not associated (4965, no beacon statistics being sent to driver)
61 * 2) Scanning (noise measurement does not apply to associated channel)
62 * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
63 * Use default noise value of -127 ... this is below the range of measurable
64 * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
65 * Also, -127 works better than 0 when averaging frames with/without
66 * noise info (e.g. averaging might be done in app); measured dBm values are
67 * always negative ... using a negative value as the default keeps all
68 * averages within an s8's (used in some apps) range of negative values. */
69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
70
71/*
72 * RTS threshold here is total size [2347] minus 4 FCS bytes
73 * Per spec:
74 * a value of 0 means RTS on all data/management packets
75 * a value > max MSDU size means no RTS
76 * else RTS for data/management frames where MPDU is larger
77 * than RTS value.
78 */
79#define DEFAULT_RTS_THRESHOLD 2347U
80#define MIN_RTS_THRESHOLD 0U
81#define MAX_RTS_THRESHOLD 2347U
82#define MAX_MSDU_SIZE 2304U
83#define MAX_MPDU_SIZE 2346U
84#define DEFAULT_BEACON_INTERVAL 100U
85#define DEFAULT_SHORT_RETRY_LIMIT 7U
86#define DEFAULT_LONG_RETRY_LIMIT 4U
87
88struct iwl_rx_mem_buffer {
89 dma_addr_t page_dma;
90 struct page *page;
91 struct list_head list;
92};
93
94#define rxb_addr(r) page_address(r->page)
95
96/* defined below */
97struct iwl_device_cmd;
98
99struct iwl_cmd_meta {
100 /* only for SYNC commands, iff the reply skb is wanted */
101 struct iwl_host_cmd *source;
102 /*
103 * only for ASYNC commands
104 * (which is somewhat stupid -- look at iwl-sta.c for instance
105 * which duplicates a bunch of code because the callback isn't
106 * invoked for SYNC commands, if it were and its result passed
107 * through it would be simpler...)
108 */
109 void (*callback)(struct iwl_priv *priv,
110 struct iwl_device_cmd *cmd,
111 struct iwl_rx_packet *pkt);
112
113 /* The CMD_SIZE_HUGE flag bit indicates that the command
114 * structure is stored at the end of the shared queue memory. */
115 u32 flags;
116
117 DEFINE_DMA_UNMAP_ADDR(mapping);
118 DEFINE_DMA_UNMAP_LEN(len);
119};
120
121/*
122 * Generic queue structure
123 *
124 * Contains common data for Rx and Tx queues
125 */
126struct iwl_queue {
127 int n_bd; /* number of BDs in this queue */
128 int write_ptr; /* 1-st empty entry (index) host_w*/
129 int read_ptr; /* last used entry (index) host_r*/
130 /* use for monitoring and recovering the stuck queue */
131 dma_addr_t dma_addr; /* physical addr for BD's */
132 int n_window; /* safe queue window */
133 u32 id;
134 int low_mark; /* low watermark, resume queue if free
135 * space more than this */
136 int high_mark; /* high watermark, stop queue if free
137 * space less than this */
138};
139
140/* One for each TFD */
141struct iwl_tx_info {
142 struct sk_buff *skb;
143 struct iwl_rxon_context *ctx;
144};
145
146/**
147 * struct iwl_tx_queue - Tx Queue for DMA
148 * @q: generic Rx/Tx queue descriptor
149 * @bd: base of circular buffer of TFDs
150 * @cmd: array of command/TX buffer pointers
151 * @meta: array of meta data for each command/tx buffer
152 * @dma_addr_cmd: physical address of cmd/tx buffer array
153 * @txb: array of per-TFD driver data
154 * @time_stamp: time (in jiffies) of last read_ptr change
155 * @need_update: indicates need to update read/write index
156 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
157 *
158 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
159 * descriptors) and required locking structures.
160 */
161#define TFD_TX_CMD_SLOTS 256
162#define TFD_CMD_SLOTS 32
163
164struct iwl_tx_queue {
165 struct iwl_queue q;
166 void *tfds;
167 struct iwl_device_cmd **cmd;
168 struct iwl_cmd_meta *meta;
169 struct iwl_tx_info *txb;
170 unsigned long time_stamp;
171 u8 need_update;
172 u8 sched_retry;
173 u8 active;
174 u8 swq_id;
175};
176
177#define IWL_NUM_SCAN_RATES (2)
178
179struct iwl4965_channel_tgd_info {
180 u8 type;
181 s8 max_power;
182};
183
184struct iwl4965_channel_tgh_info {
185 s64 last_radar_time;
186};
187
188#define IWL4965_MAX_RATE (33)
189
190struct iwl3945_clip_group {
191 /* maximum power level to prevent clipping for each rate, derived by
192 * us from this band's saturation power in EEPROM */
193 const s8 clip_powers[IWL_MAX_RATES];
194};
195
196/* current Tx power values to use, one for each rate for each channel.
197 * requested power is limited by:
198 * -- regulatory EEPROM limits for this channel
199 * -- hardware capabilities (clip-powers)
200 * -- spectrum management
201 * -- user preference (e.g. iwconfig)
202 * when requested power is set, base power index must also be set. */
203struct iwl3945_channel_power_info {
204 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
205 s8 power_table_index; /* actual (compenst'd) index into gain table */
206 s8 base_power_index; /* gain index for power at factory temp. */
207 s8 requested_power; /* power (dBm) requested for this chnl/rate */
208};
209
210/* current scan Tx power values to use, one for each scan rate for each
211 * channel. */
212struct iwl3945_scan_power_info {
213 struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
214 s8 power_table_index; /* actual (compenst'd) index into gain table */
215 s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
216};
217
218/*
219 * One for each channel, holds all channel setup data
220 * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
221 * with one another!
222 */
223struct iwl_channel_info {
224 struct iwl4965_channel_tgd_info tgd;
225 struct iwl4965_channel_tgh_info tgh;
226 struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
227 struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
228 * HT40 channel */
229
230 u8 channel; /* channel number */
231 u8 flags; /* flags copied from EEPROM */
232 s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
233 s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
234 s8 min_power; /* always 0 */
235 s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
236
237 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
238 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
239 enum ieee80211_band band;
240
241 /* HT40 channel info */
242 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
243 u8 ht40_flags; /* flags copied from EEPROM */
244 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
245
246 /* Radio/DSP gain settings for each "normal" data Tx rate.
247 * These include, in addition to RF and DSP gain, a few fields for
248 * remembering/modifying gain settings (indexes). */
249 struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
250
251 /* Radio/DSP gain settings for each scan rate, for directed scans. */
252 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
253};
254
255#define IWL_TX_FIFO_BK 0 /* shared */
256#define IWL_TX_FIFO_BE 1
257#define IWL_TX_FIFO_VI 2 /* shared */
258#define IWL_TX_FIFO_VO 3
259#define IWL_TX_FIFO_UNUSED -1
260
261/* Minimum number of queues. MAX_NUM is defined in hw specific files.
262 * Set the minimum to accommodate the 4 standard TX queues, 1 command
263 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
264#define IWL_MIN_NUM_QUEUES 10
265
266#define IWL_DEFAULT_CMD_QUEUE_NUM 4
267
268#define IEEE80211_DATA_LEN 2304
269#define IEEE80211_4ADDR_LEN 30
270#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
271#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
272
273struct iwl_frame {
274 union {
275 struct ieee80211_hdr frame;
276 struct iwl_tx_beacon_cmd beacon;
277 u8 raw[IEEE80211_FRAME_LEN];
278 u8 cmd[360];
279 } u;
280 struct list_head list;
281};
282
283#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
284#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
285#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
286
287enum {
288 CMD_SYNC = 0,
289 CMD_SIZE_NORMAL = 0,
290 CMD_NO_SKB = 0,
291 CMD_SIZE_HUGE = (1 << 0),
292 CMD_ASYNC = (1 << 1),
293 CMD_WANT_SKB = (1 << 2),
294 CMD_MAPPED = (1 << 3),
295};
296
297#define DEF_CMD_PAYLOAD_SIZE 320
298
299/**
300 * struct iwl_device_cmd
301 *
302 * For allocation of the command and tx queues, this establishes the overall
303 * size of the largest command we send to uCode, except for a scan command
304 * (which is relatively huge; space is allocated separately).
305 */
306struct iwl_device_cmd {
307 struct iwl_cmd_header hdr; /* uCode API */
308 union {
309 u32 flags;
310 u8 val8;
311 u16 val16;
312 u32 val32;
313 struct iwl_tx_cmd tx;
314 u8 payload[DEF_CMD_PAYLOAD_SIZE];
315 } __packed cmd;
316} __packed;
317
318#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
319
320
321struct iwl_host_cmd {
322 const void *data;
323 unsigned long reply_page;
324 void (*callback)(struct iwl_priv *priv,
325 struct iwl_device_cmd *cmd,
326 struct iwl_rx_packet *pkt);
327 u32 flags;
328 u16 len;
329 u8 id;
330};
331
332#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
333#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
334#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
335
336/**
337 * struct iwl_rx_queue - Rx queue
338 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
339 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
340 * @read: Shared index to newest available Rx buffer
341 * @write: Shared index to oldest written Rx packet
342 * @free_count: Number of pre-allocated buffers in rx_free
343 * @rx_free: list of free SKBs for use
344 * @rx_used: List of Rx buffers with no SKB
345 * @need_update: flag to indicate we need to update read/write index
346 * @rb_stts: driver's pointer to receive buffer status
347 * @rb_stts_dma: bus address of receive buffer status
348 *
349 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
350 */
351struct iwl_rx_queue {
352 __le32 *bd;
353 dma_addr_t bd_dma;
354 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
355 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
356 u32 read;
357 u32 write;
358 u32 free_count;
359 u32 write_actual;
360 struct list_head rx_free;
361 struct list_head rx_used;
362 int need_update;
363 struct iwl_rb_status *rb_stts;
364 dma_addr_t rb_stts_dma;
365 spinlock_t lock;
366};
367
368#define IWL_SUPPORTED_RATES_IE_LEN 8
369
370#define MAX_TID_COUNT 9
371
372#define IWL_INVALID_RATE 0xFF
373#define IWL_INVALID_VALUE -1
374
375/**
376 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
377 * @txq_id: Tx queue used for Tx attempt
378 * @frame_count: # frames attempted by Tx command
379 * @wait_for_ba: Expect block-ack before next Tx reply
380 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
381 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
382 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
383 * @rate_n_flags: Rate at which Tx was attempted
384 *
385 * If REPLY_TX indicates that aggregation was attempted, driver must wait
386 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
387 * until block ack arrives.
388 */
389struct iwl_ht_agg {
390 u16 txq_id;
391 u16 frame_count;
392 u16 wait_for_ba;
393 u16 start_idx;
394 u64 bitmap;
395 u32 rate_n_flags;
396#define IWL_AGG_OFF 0
397#define IWL_AGG_ON 1
398#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
399#define IWL_EMPTYING_HW_QUEUE_DELBA 3
400 u8 state;
401};
402
403
404struct iwl_tid_data {
405 u16 seq_number; /* 4965 only */
406 u16 tfds_in_queue;
407 struct iwl_ht_agg agg;
408};
409
410struct iwl_hw_key {
411 u32 cipher;
412 int keylen;
413 u8 keyidx;
414 u8 key[32];
415};
416
417union iwl_ht_rate_supp {
418 u16 rates;
419 struct {
420 u8 siso_rate;
421 u8 mimo_rate;
422 };
423};
424
425#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
426#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
427#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
428#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
429#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
430#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
431#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
432
433/*
434 * Maximal MPDU density for TX aggregation
435 * 4 - 2us density
436 * 5 - 4us density
437 * 6 - 8us density
438 * 7 - 16us density
439 */
440#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
441#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
442#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
443#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
444#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
445#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
446#define CFG_HT_MPDU_DENSITY_MIN (0x1)
447
448struct iwl_ht_config {
449 bool single_chain_sufficient;
450 enum ieee80211_smps_mode smps; /* current smps mode */
451};
452
453/* QoS structures */
454struct iwl_qos_info {
455 int qos_active;
456 struct iwl_qosparam_cmd def_qos_parm;
457};
458
459/*
460 * Structure should be accessed with sta_lock held. When station addition
461 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
462 * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
463 * sta_lock held.
464 */
465struct iwl_station_entry {
466 struct iwl_legacy_addsta_cmd sta;
467 struct iwl_tid_data tid[MAX_TID_COUNT];
468 u8 used, ctxid;
469 struct iwl_hw_key keyinfo;
470 struct iwl_link_quality_cmd *lq;
471};
472
473struct iwl_station_priv_common {
474 struct iwl_rxon_context *ctx;
475 u8 sta_id;
476};
477
478/*
479 * iwl_station_priv: Driver's private station information
480 *
481 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
482 * in the structure for use by driver. This structure is places in that
483 * space.
484 *
485 * The common struct MUST be first because it is shared between
486 * 3945 and 4965!
487 */
488struct iwl_station_priv {
489 struct iwl_station_priv_common common;
490 struct iwl_lq_sta lq_sta;
491 atomic_t pending_frames;
492 bool client;
493 bool asleep;
494};
495
496/**
497 * struct iwl_vif_priv - driver's private per-interface information
498 *
499 * When mac80211 allocates a virtual interface, it can allocate
500 * space for us to put data into.
501 */
502struct iwl_vif_priv {
503 struct iwl_rxon_context *ctx;
504 u8 ibss_bssid_sta_id;
505};
506
507/* one for each uCode image (inst/data, boot/init/runtime) */
508struct fw_desc {
509 void *v_addr; /* access by driver */
510 dma_addr_t p_addr; /* access by card's busmaster DMA */
511 u32 len; /* bytes */
512};
513
514/* uCode file layout */
515struct iwl_ucode_header {
516 __le32 ver; /* major/minor/API/serial */
517 struct {
518 __le32 inst_size; /* bytes of runtime code */
519 __le32 data_size; /* bytes of runtime data */
520 __le32 init_size; /* bytes of init code */
521 __le32 init_data_size; /* bytes of init data */
522 __le32 boot_size; /* bytes of bootstrap code */
523 u8 data[0]; /* in same order as sizes */
524 } v1;
525};
526
527struct iwl4965_ibss_seq {
528 u8 mac[ETH_ALEN];
529 u16 seq_num;
530 u16 frag_num;
531 unsigned long packet_time;
532 struct list_head list;
533};
534
535struct iwl_sensitivity_ranges {
536 u16 min_nrg_cck;
537 u16 max_nrg_cck;
538
539 u16 nrg_th_cck;
540 u16 nrg_th_ofdm;
541
542 u16 auto_corr_min_ofdm;
543 u16 auto_corr_min_ofdm_mrc;
544 u16 auto_corr_min_ofdm_x1;
545 u16 auto_corr_min_ofdm_mrc_x1;
546
547 u16 auto_corr_max_ofdm;
548 u16 auto_corr_max_ofdm_mrc;
549 u16 auto_corr_max_ofdm_x1;
550 u16 auto_corr_max_ofdm_mrc_x1;
551
552 u16 auto_corr_max_cck;
553 u16 auto_corr_max_cck_mrc;
554 u16 auto_corr_min_cck;
555 u16 auto_corr_min_cck_mrc;
556
557 u16 barker_corr_th_min;
558 u16 barker_corr_th_min_mrc;
559 u16 nrg_th_cca;
560};
561
562
563#define KELVIN_TO_CELSIUS(x) ((x)-273)
564#define CELSIUS_TO_KELVIN(x) ((x)+273)
565
566
567/**
568 * struct iwl_hw_params
569 * @max_txq_num: Max # Tx queues supported
570 * @dma_chnl_num: Number of Tx DMA/FIFO channels
571 * @scd_bc_tbls_size: size of scheduler byte count tables
572 * @tfd_size: TFD size
573 * @tx/rx_chains_num: Number of TX/RX chains
574 * @valid_tx/rx_ant: usable antennas
575 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
576 * @max_rxq_log: Log-base-2 of max_rxq_size
577 * @rx_page_order: Rx buffer page order
578 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
579 * @max_stations:
580 * @ht40_channel: is 40MHz width possible in band 2.4
581 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
582 * @sw_crypto: 0 for hw, 1 for sw
583 * @max_xxx_size: for ucode uses
584 * @ct_kill_threshold: temperature threshold
585 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
586 * @struct iwl_sensitivity_ranges: range of sensitivity values
587 */
588struct iwl_hw_params {
589 u8 max_txq_num;
590 u8 dma_chnl_num;
591 u16 scd_bc_tbls_size;
592 u32 tfd_size;
593 u8 tx_chains_num;
594 u8 rx_chains_num;
595 u8 valid_tx_ant;
596 u8 valid_rx_ant;
597 u16 max_rxq_size;
598 u16 max_rxq_log;
599 u32 rx_page_order;
600 u32 rx_wrt_ptr_reg;
601 u8 max_stations;
602 u8 ht40_channel;
603 u8 max_beacon_itrvl; /* in 1024 ms */
604 u32 max_inst_size;
605 u32 max_data_size;
606 u32 max_bsm_size;
607 u32 ct_kill_threshold; /* value in hw-dependent units */
608 u16 beacon_time_tsf_bits;
609 const struct iwl_sensitivity_ranges *sens;
610};
611
612
613/******************************************************************************
614 *
615 * Functions implemented in core module which are forward declared here
616 * for use by iwl-[4-5].c
617 *
618 * NOTE: The implementation of these functions are not hardware specific
619 * which is why they are in the core module files.
620 *
621 * Naming convention --
622 * iwl_ <-- Is part of iwlwifi
623 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
624 * iwl4965_bg_ <-- Called from work queue context
625 * iwl4965_mac_ <-- mac80211 callback
626 *
627 ****************************************************************************/
628extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
629extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
630extern int iwl_legacy_queue_space(const struct iwl_queue *q);
631static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
632{
633 return q->write_ptr >= q->read_ptr ?
634 (i >= q->read_ptr && i < q->write_ptr) :
635 !(i < q->read_ptr && i >= q->write_ptr);
636}
637
638
639static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
640 int is_huge)
641{
642 /*
643 * This is for init calibration result and scan command which
644 * required buffer > TFD_MAX_PAYLOAD_SIZE,
645 * the big buffer at end of command array
646 */
647 if (is_huge)
648 return q->n_window; /* must be power of 2 */
649
650 /* Otherwise, use normal size buffers */
651 return index & (q->n_window - 1);
652}
653
654
655struct iwl_dma_ptr {
656 dma_addr_t dma;
657 void *addr;
658 size_t size;
659};
660
661#define IWL_OPERATION_MODE_AUTO 0
662#define IWL_OPERATION_MODE_HT_ONLY 1
663#define IWL_OPERATION_MODE_MIXED 2
664#define IWL_OPERATION_MODE_20MHZ 3
665
666#define IWL_TX_CRC_SIZE 4
667#define IWL_TX_DELIMITER_SIZE 4
668
669#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
670
671/* Sensitivity and chain noise calibration */
672#define INITIALIZATION_VALUE 0xFFFF
673#define IWL4965_CAL_NUM_BEACONS 20
674#define IWL_CAL_NUM_BEACONS 16
675#define MAXIMUM_ALLOWED_PATHLOSS 15
676
677#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
678
679#define MAX_FA_OFDM 50
680#define MIN_FA_OFDM 5
681#define MAX_FA_CCK 50
682#define MIN_FA_CCK 5
683
684#define AUTO_CORR_STEP_OFDM 1
685
686#define AUTO_CORR_STEP_CCK 3
687#define AUTO_CORR_MAX_TH_CCK 160
688
689#define NRG_DIFF 2
690#define NRG_STEP_CCK 2
691#define NRG_MARGIN 8
692#define MAX_NUMBER_CCK_NO_FA 100
693
694#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
695
696#define CHAIN_A 0
697#define CHAIN_B 1
698#define CHAIN_C 2
699#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
700#define ALL_BAND_FILTER 0xFF00
701#define IN_BAND_FILTER 0xFF
702#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
703
704#define NRG_NUM_PREV_STAT_L 20
705#define NUM_RX_CHAINS 3
706
707enum iwl4965_false_alarm_state {
708 IWL_FA_TOO_MANY = 0,
709 IWL_FA_TOO_FEW = 1,
710 IWL_FA_GOOD_RANGE = 2,
711};
712
713enum iwl4965_chain_noise_state {
714 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
715 IWL_CHAIN_NOISE_ACCUMULATE,
716 IWL_CHAIN_NOISE_CALIBRATED,
717 IWL_CHAIN_NOISE_DONE,
718};
719
720enum iwl4965_calib_enabled_state {
721 IWL_CALIB_DISABLED = 0, /* must be 0 */
722 IWL_CALIB_ENABLED = 1,
723};
724
725/*
726 * enum iwl_calib
727 * defines the order in which results of initial calibrations
728 * should be sent to the runtime uCode
729 */
730enum iwl_calib {
731 IWL_CALIB_MAX,
732};
733
734/* Opaque calibration results */
735struct iwl_calib_result {
736 void *buf;
737 size_t buf_len;
738};
739
740enum ucode_type {
741 UCODE_NONE = 0,
742 UCODE_INIT,
743 UCODE_RT
744};
745
746/* Sensitivity calib data */
747struct iwl_sensitivity_data {
748 u32 auto_corr_ofdm;
749 u32 auto_corr_ofdm_mrc;
750 u32 auto_corr_ofdm_x1;
751 u32 auto_corr_ofdm_mrc_x1;
752 u32 auto_corr_cck;
753 u32 auto_corr_cck_mrc;
754
755 u32 last_bad_plcp_cnt_ofdm;
756 u32 last_fa_cnt_ofdm;
757 u32 last_bad_plcp_cnt_cck;
758 u32 last_fa_cnt_cck;
759
760 u32 nrg_curr_state;
761 u32 nrg_prev_state;
762 u32 nrg_value[10];
763 u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
764 u32 nrg_silence_ref;
765 u32 nrg_energy_idx;
766 u32 nrg_silence_idx;
767 u32 nrg_th_cck;
768 s32 nrg_auto_corr_silence_diff;
769 u32 num_in_cck_no_fa;
770 u32 nrg_th_ofdm;
771
772 u16 barker_corr_th_min;
773 u16 barker_corr_th_min_mrc;
774 u16 nrg_th_cca;
775};
776
777/* Chain noise (differential Rx gain) calib data */
778struct iwl_chain_noise_data {
779 u32 active_chains;
780 u32 chain_noise_a;
781 u32 chain_noise_b;
782 u32 chain_noise_c;
783 u32 chain_signal_a;
784 u32 chain_signal_b;
785 u32 chain_signal_c;
786 u16 beacon_count;
787 u8 disconn_array[NUM_RX_CHAINS];
788 u8 delta_gain_code[NUM_RX_CHAINS];
789 u8 radio_write;
790 u8 state;
791};
792
793#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
794#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
795
796#define IWL_TRAFFIC_ENTRIES (256)
797#define IWL_TRAFFIC_ENTRY_SIZE (64)
798
799enum {
800 MEASUREMENT_READY = (1 << 0),
801 MEASUREMENT_ACTIVE = (1 << 1),
802};
803
804/* interrupt statistics */
805struct isr_statistics {
806 u32 hw;
807 u32 sw;
808 u32 err_code;
809 u32 sch;
810 u32 alive;
811 u32 rfkill;
812 u32 ctkill;
813 u32 wakeup;
814 u32 rx;
815 u32 rx_handlers[REPLY_MAX];
816 u32 tx;
817 u32 unhandled;
818};
819
820/* management statistics */
821enum iwl_mgmt_stats {
822 MANAGEMENT_ASSOC_REQ = 0,
823 MANAGEMENT_ASSOC_RESP,
824 MANAGEMENT_REASSOC_REQ,
825 MANAGEMENT_REASSOC_RESP,
826 MANAGEMENT_PROBE_REQ,
827 MANAGEMENT_PROBE_RESP,
828 MANAGEMENT_BEACON,
829 MANAGEMENT_ATIM,
830 MANAGEMENT_DISASSOC,
831 MANAGEMENT_AUTH,
832 MANAGEMENT_DEAUTH,
833 MANAGEMENT_ACTION,
834 MANAGEMENT_MAX,
835};
836/* control statistics */
837enum iwl_ctrl_stats {
838 CONTROL_BACK_REQ = 0,
839 CONTROL_BACK,
840 CONTROL_PSPOLL,
841 CONTROL_RTS,
842 CONTROL_CTS,
843 CONTROL_ACK,
844 CONTROL_CFEND,
845 CONTROL_CFENDACK,
846 CONTROL_MAX,
847};
848
849struct traffic_stats {
850#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
851 u32 mgmt[MANAGEMENT_MAX];
852 u32 ctrl[CONTROL_MAX];
853 u32 data_cnt;
854 u64 data_bytes;
855#endif
856};
857
858/*
859 * host interrupt timeout value
860 * used with setting interrupt coalescing timer
861 * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
862 *
863 * default interrupt coalescing timer is 64 x 32 = 2048 usecs
864 * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
865 */
866#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
867#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
868#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
869#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
870#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
871#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
872
873#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
874
875/* TX queue watchdog timeouts in mSecs */
876#define IWL_DEF_WD_TIMEOUT (2000)
877#define IWL_LONG_WD_TIMEOUT (10000)
878#define IWL_MAX_WD_TIMEOUT (120000)
879
880struct iwl_force_reset {
881 int reset_request_count;
882 int reset_success_count;
883 int reset_reject_count;
884 unsigned long reset_duration;
885 unsigned long last_force_reset_jiffies;
886};
887
888/* extend beacon time format bit shifting */
889/*
890 * for _3945 devices
891 * bits 31:24 - extended
892 * bits 23:0 - interval
893 */
894#define IWL3945_EXT_BEACON_TIME_POS 24
895/*
896 * for _4965 devices
897 * bits 31:22 - extended
898 * bits 21:0 - interval
899 */
900#define IWL4965_EXT_BEACON_TIME_POS 22
901
902enum iwl_rxon_context_id {
903 IWL_RXON_CTX_BSS,
904
905 NUM_IWL_RXON_CTX
906};
907
908struct iwl_rxon_context {
909 struct ieee80211_vif *vif;
910
911 const u8 *ac_to_fifo;
912 const u8 *ac_to_queue;
913 u8 mcast_queue;
914
915 /*
916 * We could use the vif to indicate active, but we
917 * also need it to be active during disabling when
918 * we already removed the vif for type setting.
919 */
920 bool always_active, is_active;
921
922 bool ht_need_multiple_chains;
923
924 enum iwl_rxon_context_id ctxid;
925
926 u32 interface_modes, exclusive_interface_modes;
927 u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
928
929 /*
930 * We declare this const so it can only be
931 * changed via explicit cast within the
932 * routines that actually update the physical
933 * hardware.
934 */
935 const struct iwl_legacy_rxon_cmd active;
936 struct iwl_legacy_rxon_cmd staging;
937
938 struct iwl_rxon_time_cmd timing;
939
940 struct iwl_qos_info qos_data;
941
942 u8 bcast_sta_id, ap_sta_id;
943
944 u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
945 u8 qos_cmd;
946 u8 wep_key_cmd;
947
948 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
949 u8 key_mapping_keys;
950
951 __le32 station_flags;
952
953 struct {
954 bool non_gf_sta_present;
955 u8 protection;
956 bool enabled, is_40mhz;
957 u8 extension_chan_offset;
958 } ht;
959};
960
961struct iwl_priv {
962
963 /* ieee device used by generic ieee processing code */
964 struct ieee80211_hw *hw;
965 struct ieee80211_channel *ieee_channels;
966 struct ieee80211_rate *ieee_rates;
967 struct iwl_cfg *cfg;
968
969 /* temporary frame storage list */
970 struct list_head free_frames;
971 int frames_count;
972
973 enum ieee80211_band band;
974 int alloc_rxb_page;
975
976 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
977 struct iwl_rx_mem_buffer *rxb);
978
979 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
980
981 /* spectrum measurement report caching */
982 struct iwl_spectrum_notification measure_report;
983 u8 measurement_status;
984
985 /* ucode beacon time */
986 u32 ucode_beacon_time;
987 int missed_beacon_threshold;
988
989 /* track IBSS manager (last beacon) status */
990 u32 ibss_manager;
991
992 /* force reset */
993 struct iwl_force_reset force_reset;
994
995 /* we allocate array of iwl_channel_info for NIC's valid channels.
996 * Access via channel # using indirect index array */
997 struct iwl_channel_info *channel_info; /* channel info array */
998 u8 channel_count; /* # of channels */
999
1000 /* thermal calibration */
1001 s32 temperature; /* degrees Kelvin */
1002 s32 last_temperature;
1003
1004 /* init calibration results */
1005 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1006
1007 /* Scan related variables */
1008 unsigned long scan_start;
1009 unsigned long scan_start_tsf;
1010 void *scan_cmd;
1011 enum ieee80211_band scan_band;
1012 struct cfg80211_scan_request *scan_request;
1013 struct ieee80211_vif *scan_vif;
1014 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1015 u8 mgmt_tx_ant;
1016
1017 /* spinlock */
1018 spinlock_t lock; /* protect general shared data */
1019 spinlock_t hcmd_lock; /* protect hcmd */
1020 spinlock_t reg_lock; /* protect hw register access */
1021 struct mutex mutex;
1022
1023 /* basic pci-network driver stuff */
1024 struct pci_dev *pci_dev;
1025
1026 /* pci hardware address support */
1027 void __iomem *hw_base;
1028 u32 hw_rev;
1029 u32 hw_wa_rev;
1030 u8 rev_id;
1031
1032 /* microcode/device supports multiple contexts */
1033 u8 valid_contexts;
1034
1035 /* command queue number */
1036 u8 cmd_queue;
1037
1038 /* max number of station keys */
1039 u8 sta_key_max_num;
1040
1041 /* EEPROM MAC addresses */
1042 struct mac_address addresses[1];
1043
1044 /* uCode images, save to reload in case of failure */
1045 int fw_index; /* firmware we're trying to load */
1046 u32 ucode_ver; /* version of ucode, copy of
1047 iwl_ucode.ver */
1048 struct fw_desc ucode_code; /* runtime inst */
1049 struct fw_desc ucode_data; /* runtime data original */
1050 struct fw_desc ucode_data_backup; /* runtime data save/restore */
1051 struct fw_desc ucode_init; /* initialization inst */
1052 struct fw_desc ucode_init_data; /* initialization data */
1053 struct fw_desc ucode_boot; /* bootstrap inst */
1054 enum ucode_type ucode_type;
1055 u8 ucode_write_complete; /* the image write is complete */
1056 char firmware_name[25];
1057
1058 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1059
1060 __le16 switch_channel;
1061
1062 /* 1st responses from initialize and runtime uCode images.
1063 * _4965's initialize alive response contains some calibration data. */
1064 struct iwl_init_alive_resp card_alive_init;
1065 struct iwl_alive_resp card_alive;
1066
1067 u16 active_rate;
1068
1069 u8 start_calib;
1070 struct iwl_sensitivity_data sensitivity_data;
1071 struct iwl_chain_noise_data chain_noise_data;
1072 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1073
1074 struct iwl_ht_config current_ht_config;
1075
1076 /* Rate scaling data */
1077 u8 retry_rate;
1078
1079 wait_queue_head_t wait_command_queue;
1080
1081 int activity_timer_active;
1082
1083 /* Rx and Tx DMA processing queues */
1084 struct iwl_rx_queue rxq;
1085 struct iwl_tx_queue *txq;
1086 unsigned long txq_ctx_active_msk;
1087 struct iwl_dma_ptr kw; /* keep warm address */
1088 struct iwl_dma_ptr scd_bc_tbls;
1089
1090 u32 scd_base_addr; /* scheduler sram base address */
1091
1092 unsigned long status;
1093
1094 /* counts mgmt, ctl, and data packets */
1095 struct traffic_stats tx_stats;
1096 struct traffic_stats rx_stats;
1097
1098 /* counts interrupts */
1099 struct isr_statistics isr_stats;
1100
1101 struct iwl_power_mgr power_data;
1102
1103 /* context information */
1104 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1105
1106 /* station table variables */
1107
1108 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1109 spinlock_t sta_lock;
1110 int num_stations;
1111 struct iwl_station_entry stations[IWL_STATION_COUNT];
1112 unsigned long ucode_key_table;
1113
1114 /* queue refcounts */
1115#define IWL_MAX_HW_QUEUES 32
1116 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1117 /* for each AC */
1118 atomic_t queue_stop_count[4];
1119
1120 /* Indication if ieee80211_ops->open has been called */
1121 u8 is_open;
1122
1123 u8 mac80211_registered;
1124
1125 /* eeprom -- this is in the card's little endian byte order */
1126 u8 *eeprom;
1127 struct iwl_eeprom_calib_info *calib_info;
1128
1129 enum nl80211_iftype iw_mode;
1130
1131 /* Last Rx'd beacon timestamp */
1132 u64 timestamp;
1133
1134 union {
1135#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1136 struct {
1137 void *shared_virt;
1138 dma_addr_t shared_phys;
1139
1140 struct delayed_work thermal_periodic;
1141 struct delayed_work rfkill_poll;
1142
1143 struct iwl3945_notif_statistics statistics;
1144#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1145 struct iwl3945_notif_statistics accum_statistics;
1146 struct iwl3945_notif_statistics delta_statistics;
1147 struct iwl3945_notif_statistics max_delta;
1148#endif
1149
1150 u32 sta_supp_rates;
1151 int last_rx_rssi; /* From Rx packet statistics */
1152
1153 /* Rx'd packet timing information */
1154 u32 last_beacon_time;
1155 u64 last_tsf;
1156
1157 /*
1158 * each calibration channel group in the
1159 * EEPROM has a derived clip setting for
1160 * each rate.
1161 */
1162 const struct iwl3945_clip_group clip_groups[5];
1163
1164 } _3945;
1165#endif
1166#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
1167 struct {
1168 struct iwl_rx_phy_res last_phy_res;
1169 bool last_phy_res_valid;
1170
1171 struct completion firmware_loading_complete;
1172
1173 /*
1174 * chain noise reset and gain commands are the
1175 * two extra calibration commands follows the standard
1176 * phy calibration commands
1177 */
1178 u8 phy_calib_chain_noise_reset_cmd;
1179 u8 phy_calib_chain_noise_gain_cmd;
1180
1181 struct iwl_notif_statistics statistics;
1182#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1183 struct iwl_notif_statistics accum_statistics;
1184 struct iwl_notif_statistics delta_statistics;
1185 struct iwl_notif_statistics max_delta;
1186#endif
1187
1188 } _4965;
1189#endif
1190 };
1191
1192 struct iwl_hw_params hw_params;
1193
1194 u32 inta_mask;
1195
1196 struct workqueue_struct *workqueue;
1197
1198 struct work_struct restart;
1199 struct work_struct scan_completed;
1200 struct work_struct rx_replenish;
1201 struct work_struct abort_scan;
1202
1203 struct iwl_rxon_context *beacon_ctx;
1204 struct sk_buff *beacon_skb;
1205
1206 struct work_struct tx_flush;
1207
1208 struct tasklet_struct irq_tasklet;
1209
1210 struct delayed_work init_alive_start;
1211 struct delayed_work alive_start;
1212 struct delayed_work scan_check;
1213
1214 /* TX Power */
1215 s8 tx_power_user_lmt;
1216 s8 tx_power_device_lmt;
1217 s8 tx_power_next;
1218
1219
1220#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1221 /* debugging info */
1222 u32 debug_level; /* per device debugging will override global
1223 iwlegacy_debug_level if set */
1224#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1225#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1226 /* debugfs */
1227 u16 tx_traffic_idx;
1228 u16 rx_traffic_idx;
1229 u8 *tx_traffic;
1230 u8 *rx_traffic;
1231 struct dentry *debugfs_dir;
1232 u32 dbgfs_sram_offset, dbgfs_sram_len;
1233 bool disable_ht40;
1234#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
1235
1236 struct work_struct txpower_work;
1237 u32 disable_sens_cal;
1238 u32 disable_chain_noise_cal;
1239 u32 disable_tx_power_cal;
1240 struct work_struct run_time_calib_work;
1241 struct timer_list statistics_periodic;
1242 struct timer_list watchdog;
1243 bool hw_ready;
1244
1245 struct led_classdev led;
1246 unsigned long blink_on, blink_off;
1247 bool led_registered;
1248}; /*iwl_priv */
1249
1250static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1251{
1252 set_bit(txq_id, &priv->txq_ctx_active_msk);
1253}
1254
1255static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1256{
1257 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1258}
1259
1260#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1261/*
1262 * iwl_legacy_get_debug_level: Return active debug level for device
1263 *
1264 * Using sysfs it is possible to set per device debug level. This debug
1265 * level will be used if set, otherwise the global debug level which can be
1266 * set via module parameter is used.
1267 */
1268static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1269{
1270 if (priv->debug_level)
1271 return priv->debug_level;
1272 else
1273 return iwlegacy_debug_level;
1274}
1275#else
1276static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
1277{
1278 return iwlegacy_debug_level;
1279}
1280#endif
1281
1282
1283static inline struct ieee80211_hdr *
1284iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
1285 int txq_id, int idx)
1286{
1287 if (priv->txq[txq_id].txb[idx].skb)
1288 return (struct ieee80211_hdr *)priv->txq[txq_id].
1289 txb[idx].skb->data;
1290 return NULL;
1291}
1292
1293static inline struct iwl_rxon_context *
1294iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
1295{
1296 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1297
1298 return vif_priv->ctx;
1299}
1300
1301#define for_each_context(priv, ctx) \
1302 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1303 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1304 if (priv->valid_contexts & BIT(ctx->ctxid))
1305
1306static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
1307 enum iwl_rxon_context_id ctxid)
1308{
1309 return (priv->contexts[ctxid].active.filter_flags &
1310 RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1311}
1312
1313static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
1314{
1315 return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
1316}
1317
1318static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
1319{
1320 return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1321}
1322
1323static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
1324{
1325 if (ch_info == NULL)
1326 return 0;
1327 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1328}
1329
1330static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
1331{
1332 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1333}
1334
1335static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
1336{
1337 return ch_info->band == IEEE80211_BAND_5GHZ;
1338}
1339
1340static inline int
1341iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
1342{
1343 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1344}
1345
1346static inline int
1347iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
1348{
1349 return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
1350}
1351
1352static inline void
1353__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
1354{
1355 __free_pages(page, priv->hw_params.rx_page_order);
1356 priv->alloc_rxb_page--;
1357}
1358
1359static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
1360{
1361 free_pages(page, priv->hw_params.rx_page_order);
1362 priv->alloc_rxb_page--;
1363}
1364#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec99197ce0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/module.h>
28
29/* sparse doesn't like tracepoint macros */
30#ifndef __CHECKER__
31#include "iwl-dev.h"
32
33#define CREATE_TRACE_POINTS
34#include "iwl-devtrace.h"
35
36EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
42#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725ba6be..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#define __IWLWIFI_LEGACY_DEVICE_TRACE
29
30#include <linux/tracepoint.h>
31
32#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \
35static inline void trace_ ## name(proto) {}
36#endif
37
38
39#define PRIV_ENTRY __field(struct iwl_priv *, priv)
40#define PRIV_ASSIGN (__entry->priv = priv)
41
42#undef TRACE_SYSTEM
43#define TRACE_SYSTEM iwlwifi_legacy_io
44
45TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
46 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
47 TP_ARGS(priv, offs, val),
48 TP_STRUCT__entry(
49 PRIV_ENTRY
50 __field(u32, offs)
51 __field(u32, val)
52 ),
53 TP_fast_assign(
54 PRIV_ASSIGN;
55 __entry->offs = offs;
56 __entry->val = val;
57 ),
58 TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
59 __entry->offs, __entry->val)
60);
61
62TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
63 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
64 TP_ARGS(priv, offs, val),
65 TP_STRUCT__entry(
66 PRIV_ENTRY
67 __field(u32, offs)
68 __field(u8, val)
69 ),
70 TP_fast_assign(
71 PRIV_ASSIGN;
72 __entry->offs = offs;
73 __entry->val = val;
74 ),
75 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
76 __entry->offs, __entry->val)
77);
78
79TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
80 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
81 TP_ARGS(priv, offs, val),
82 TP_STRUCT__entry(
83 PRIV_ENTRY
84 __field(u32, offs)
85 __field(u32, val)
86 ),
87 TP_fast_assign(
88 PRIV_ASSIGN;
89 __entry->offs = offs;
90 __entry->val = val;
91 ),
92 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
93 __entry->offs, __entry->val)
94);
95
96#undef TRACE_SYSTEM
97#define TRACE_SYSTEM iwlwifi_legacy_ucode
98
99#undef TRACE_SYSTEM
100#define TRACE_SYSTEM iwlwifi
101
102TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
103 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
104 TP_ARGS(priv, hcmd, len, flags),
105 TP_STRUCT__entry(
106 PRIV_ENTRY
107 __dynamic_array(u8, hcmd, len)
108 __field(u32, flags)
109 ),
110 TP_fast_assign(
111 PRIV_ASSIGN;
112 memcpy(__get_dynamic_array(hcmd), hcmd, len);
113 __entry->flags = flags;
114 ),
115 TP_printk("[%p] hcmd %#.2x (%ssync)",
116 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
117 __entry->flags & CMD_ASYNC ? "a" : "")
118);
119
120TRACE_EVENT(iwlwifi_legacy_dev_rx,
121 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
122 TP_ARGS(priv, rxbuf, len),
123 TP_STRUCT__entry(
124 PRIV_ENTRY
125 __dynamic_array(u8, rxbuf, len)
126 ),
127 TP_fast_assign(
128 PRIV_ASSIGN;
129 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
130 ),
131 TP_printk("[%p] RX cmd %#.2x",
132 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
133);
134
135TRACE_EVENT(iwlwifi_legacy_dev_tx,
136 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
137 void *buf0, size_t buf0_len,
138 void *buf1, size_t buf1_len),
139 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
140 TP_STRUCT__entry(
141 PRIV_ENTRY
142
143 __field(size_t, framelen)
144 __dynamic_array(u8, tfd, tfdlen)
145
146 /*
147 * Do not insert between or below these items,
148 * we want to keep the frame together (except
149 * for the possible padding).
150 */
151 __dynamic_array(u8, buf0, buf0_len)
152 __dynamic_array(u8, buf1, buf1_len)
153 ),
154 TP_fast_assign(
155 PRIV_ASSIGN;
156 __entry->framelen = buf0_len + buf1_len;
157 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
158 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
159 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
160 ),
161 TP_printk("[%p] TX %.2x (%zu bytes)",
162 __entry->priv,
163 ((u8 *)__get_dynamic_array(buf0))[0],
164 __entry->framelen)
165);
166
167TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
168 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
169 u32 data1, u32 data2, u32 line, u32 blink1,
170 u32 blink2, u32 ilink1, u32 ilink2),
171 TP_ARGS(priv, desc, time, data1, data2, line,
172 blink1, blink2, ilink1, ilink2),
173 TP_STRUCT__entry(
174 PRIV_ENTRY
175 __field(u32, desc)
176 __field(u32, time)
177 __field(u32, data1)
178 __field(u32, data2)
179 __field(u32, line)
180 __field(u32, blink1)
181 __field(u32, blink2)
182 __field(u32, ilink1)
183 __field(u32, ilink2)
184 ),
185 TP_fast_assign(
186 PRIV_ASSIGN;
187 __entry->desc = desc;
188 __entry->time = time;
189 __entry->data1 = data1;
190 __entry->data2 = data2;
191 __entry->line = line;
192 __entry->blink1 = blink1;
193 __entry->blink2 = blink2;
194 __entry->ilink1 = ilink1;
195 __entry->ilink2 = ilink2;
196 ),
197 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
198 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
199 __entry->priv, __entry->desc, __entry->time, __entry->data1,
200 __entry->data2, __entry->line, __entry->blink1,
201 __entry->blink2, __entry->ilink1, __entry->ilink2)
202);
203
204#endif /* __IWLWIFI_DEVICE_TRACE */
205
206#undef TRACE_INCLUDE_PATH
207#define TRACE_INCLUDE_PATH .
208#undef TRACE_INCLUDE_FILE
209#define TRACE_INCLUDE_FILE iwl-devtrace
210#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49b74ab..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/slab.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-commands.h"
72#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwlegacy_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwlegacy_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0;
149
150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) {
152 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
153 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
154 break;
155 default:
156 IWL_ERR(priv, "bad EEPROM signature,"
157 "EEPROM_GP=0x%08x\n", gp);
158 ret = -ENOENT;
159 break;
160 }
161 return ret;
162}
163
164const u8
165*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
166{
167 BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
168 return &priv->eeprom[offset];
169}
170EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
171
172u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
173{
174 if (!priv->eeprom)
175 return 0;
176 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
177}
178EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
179
180/**
181 * iwl_legacy_eeprom_init - read EEPROM contents
182 *
183 * Load the EEPROM contents from adapter into priv->eeprom
184 *
185 * NOTE: This routine uses the non-debug IO access functions.
186 */
187int iwl_legacy_eeprom_init(struct iwl_priv *priv)
188{
189 __le16 *e;
190 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
191 int sz;
192 int ret;
193 u16 addr;
194
195 /* allocate eeprom */
196 sz = priv->cfg->base_params->eeprom_size;
197 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
198 priv->eeprom = kzalloc(sz, GFP_KERNEL);
199 if (!priv->eeprom) {
200 ret = -ENOMEM;
201 goto alloc_err;
202 }
203 e = (__le16 *)priv->eeprom;
204
205 priv->cfg->ops->lib->apm_ops.init(priv);
206
207 ret = iwl_legacy_eeprom_verify_signature(priv);
208 if (ret < 0) {
209 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
210 ret = -ENOENT;
211 goto err;
212 }
213
214 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
215 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
216 if (ret < 0) {
217 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
218 ret = -ENOENT;
219 goto err;
220 }
221
222 /* eeprom is an array of 16bit values */
223 for (addr = 0; addr < sz; addr += sizeof(u16)) {
224 u32 r;
225
226 _iwl_legacy_write32(priv, CSR_EEPROM_REG,
227 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
228
229 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
230 CSR_EEPROM_REG_READ_VALID_MSK,
231 CSR_EEPROM_REG_READ_VALID_MSK,
232 IWL_EEPROM_ACCESS_TIMEOUT);
233 if (ret < 0) {
234 IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
235 addr);
236 goto done;
237 }
238 r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
239 e[addr / 2] = cpu_to_le16(r >> 16);
240 }
241
242 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
243 "EEPROM",
244 iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
245
246 ret = 0;
247done:
248 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
249
250err:
251 if (ret)
252 iwl_legacy_eeprom_free(priv);
253 /* Reset chip to save power until we load uCode during "up". */
254 iwl_legacy_apm_stop(priv);
255alloc_err:
256 return ret;
257}
258EXPORT_SYMBOL(iwl_legacy_eeprom_init);
259
260void iwl_legacy_eeprom_free(struct iwl_priv *priv)
261{
262 kfree(priv->eeprom);
263 priv->eeprom = NULL;
264}
265EXPORT_SYMBOL(iwl_legacy_eeprom_free);
266
267static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
268 int eep_band, int *eeprom_ch_count,
269 const struct iwl_eeprom_channel **eeprom_ch_info,
270 const u8 **eeprom_ch_index)
271{
272 u32 offset = priv->cfg->ops->lib->
273 eeprom_ops.regulatory_bands[eep_band - 1];
274 switch (eep_band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
277 *eeprom_ch_info = (struct iwl_eeprom_channel *)
278 iwl_legacy_eeprom_query_addr(priv, offset);
279 *eeprom_ch_index = iwlegacy_eeprom_band_1;
280 break;
281 case 2: /* 4.9GHz band */
282 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
283 *eeprom_ch_info = (struct iwl_eeprom_channel *)
284 iwl_legacy_eeprom_query_addr(priv, offset);
285 *eeprom_ch_index = iwlegacy_eeprom_band_2;
286 break;
287 case 3: /* 5.2GHz band */
288 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
289 *eeprom_ch_info = (struct iwl_eeprom_channel *)
290 iwl_legacy_eeprom_query_addr(priv, offset);
291 *eeprom_ch_index = iwlegacy_eeprom_band_3;
292 break;
293 case 4: /* 5.5GHz band */
294 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
295 *eeprom_ch_info = (struct iwl_eeprom_channel *)
296 iwl_legacy_eeprom_query_addr(priv, offset);
297 *eeprom_ch_index = iwlegacy_eeprom_band_4;
298 break;
299 case 5: /* 5.7GHz band */
300 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
301 *eeprom_ch_info = (struct iwl_eeprom_channel *)
302 iwl_legacy_eeprom_query_addr(priv, offset);
303 *eeprom_ch_index = iwlegacy_eeprom_band_5;
304 break;
305 case 6: /* 2.4GHz ht40 channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
307 *eeprom_ch_info = (struct iwl_eeprom_channel *)
308 iwl_legacy_eeprom_query_addr(priv, offset);
309 *eeprom_ch_index = iwlegacy_eeprom_band_6;
310 break;
311 case 7: /* 5 GHz ht40 channels */
312 *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
313 *eeprom_ch_info = (struct iwl_eeprom_channel *)
314 iwl_legacy_eeprom_query_addr(priv, offset);
315 *eeprom_ch_index = iwlegacy_eeprom_band_7;
316 break;
317 default:
318 BUG();
319 }
320}
321
322#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
323 ? # x " " : "")
324/**
325 * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
326 *
327 * Does not set up a command, or touch hardware.
328 */
329static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
330 enum ieee80211_band band, u16 channel,
331 const struct iwl_eeprom_channel *eeprom_ch,
332 u8 clear_ht40_extension_channel)
333{
334 struct iwl_channel_info *ch_info;
335
336 ch_info = (struct iwl_channel_info *)
337 iwl_legacy_get_channel_info(priv, band, channel);
338
339 if (!iwl_legacy_is_channel_valid(ch_info))
340 return -1;
341
342 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
343 " Ad-Hoc %ssupported\n",
344 ch_info->channel,
345 iwl_legacy_is_channel_a_band(ch_info) ?
346 "5.2" : "2.4",
347 CHECK_AND_PRINT(IBSS),
348 CHECK_AND_PRINT(ACTIVE),
349 CHECK_AND_PRINT(RADAR),
350 CHECK_AND_PRINT(WIDE),
351 CHECK_AND_PRINT(DFS),
352 eeprom_ch->flags,
353 eeprom_ch->max_power_avg,
354 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
355 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
356 "" : "not ");
357
358 ch_info->ht40_eeprom = *eeprom_ch;
359 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
360 ch_info->ht40_flags = eeprom_ch->flags;
361 if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
362 ch_info->ht40_extension_channel &=
363 ~clear_ht40_extension_channel;
364
365 return 0;
366}
367
368#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
369 ? # x " " : "")
370
371/**
372 * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
373 */
374int iwl_legacy_init_channel_map(struct iwl_priv *priv)
375{
376 int eeprom_ch_count = 0;
377 const u8 *eeprom_ch_index = NULL;
378 const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
379 int band, ch;
380 struct iwl_channel_info *ch_info;
381
382 if (priv->channel_count) {
383 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
384 return 0;
385 }
386
387 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
388
389 priv->channel_count =
390 ARRAY_SIZE(iwlegacy_eeprom_band_1) +
391 ARRAY_SIZE(iwlegacy_eeprom_band_2) +
392 ARRAY_SIZE(iwlegacy_eeprom_band_3) +
393 ARRAY_SIZE(iwlegacy_eeprom_band_4) +
394 ARRAY_SIZE(iwlegacy_eeprom_band_5);
395
396 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
397 priv->channel_count);
398
399 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
400 priv->channel_count, GFP_KERNEL);
401 if (!priv->channel_info) {
402 IWL_ERR(priv, "Could not allocate channel_info\n");
403 priv->channel_count = 0;
404 return -ENOMEM;
405 }
406
407 ch_info = priv->channel_info;
408
409 /* Loop through the 5 EEPROM bands adding them in order to the
410 * channel map we maintain (that contains additional information than
411 * what just in the EEPROM) */
412 for (band = 1; band <= 5; band++) {
413
414 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
415 &eeprom_ch_info, &eeprom_ch_index);
416
417 /* Loop through each band adding each of the channels */
418 for (ch = 0; ch < eeprom_ch_count; ch++) {
419 ch_info->channel = eeprom_ch_index[ch];
420 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
421 IEEE80211_BAND_5GHZ;
422
423 /* permanently store EEPROM's channel regulatory flags
424 * and max power in channel info database. */
425 ch_info->eeprom = eeprom_ch_info[ch];
426
427 /* Copy the run-time flags so they are there even on
428 * invalid channels */
429 ch_info->flags = eeprom_ch_info[ch].flags;
430 /* First write that ht40 is not enabled, and then enable
431 * one by one */
432 ch_info->ht40_extension_channel =
433 IEEE80211_CHAN_NO_HT40;
434
435 if (!(iwl_legacy_is_channel_valid(ch_info))) {
436 IWL_DEBUG_EEPROM(priv,
437 "Ch. %d Flags %x [%sGHz] - "
438 "No traffic\n",
439 ch_info->channel,
440 ch_info->flags,
441 iwl_legacy_is_channel_a_band(ch_info) ?
442 "5.2" : "2.4");
443 ch_info++;
444 continue;
445 }
446
447 /* Initialize regulatory-based run-time data */
448 ch_info->max_power_avg = ch_info->curr_txpow =
449 eeprom_ch_info[ch].max_power_avg;
450 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
451 ch_info->min_power = 0;
452
453 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
454 "%s%s%s%s%s%s(0x%02x %ddBm):"
455 " Ad-Hoc %ssupported\n",
456 ch_info->channel,
457 iwl_legacy_is_channel_a_band(ch_info) ?
458 "5.2" : "2.4",
459 CHECK_AND_PRINT_I(VALID),
460 CHECK_AND_PRINT_I(IBSS),
461 CHECK_AND_PRINT_I(ACTIVE),
462 CHECK_AND_PRINT_I(RADAR),
463 CHECK_AND_PRINT_I(WIDE),
464 CHECK_AND_PRINT_I(DFS),
465 eeprom_ch_info[ch].flags,
466 eeprom_ch_info[ch].max_power_avg,
467 ((eeprom_ch_info[ch].
468 flags & EEPROM_CHANNEL_IBSS)
469 && !(eeprom_ch_info[ch].
470 flags & EEPROM_CHANNEL_RADAR))
471 ? "" : "not ");
472
473 ch_info++;
474 }
475 }
476
477 /* Check if we do have HT40 channels */
478 if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
479 EEPROM_REGULATORY_BAND_NO_HT40 &&
480 priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
481 EEPROM_REGULATORY_BAND_NO_HT40)
482 return 0;
483
484 /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
485 for (band = 6; band <= 7; band++) {
486 enum ieee80211_band ieeeband;
487
488 iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
489 &eeprom_ch_info, &eeprom_ch_index);
490
491 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
492 ieeeband =
493 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
494
495 /* Loop through each band adding each of the channels */
496 for (ch = 0; ch < eeprom_ch_count; ch++) {
497 /* Set up driver's info for lower half */
498 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
499 eeprom_ch_index[ch],
500 &eeprom_ch_info[ch],
501 IEEE80211_CHAN_NO_HT40PLUS);
502
503 /* Set up driver's info for upper half */
504 iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
505 eeprom_ch_index[ch] + 4,
506 &eeprom_ch_info[ch],
507 IEEE80211_CHAN_NO_HT40MINUS);
508 }
509 }
510
511 return 0;
512}
513EXPORT_SYMBOL(iwl_legacy_init_channel_map);
514
515/*
516 * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
517 */
518void iwl_legacy_free_channel_map(struct iwl_priv *priv)
519{
520 kfree(priv->channel_info);
521 priv->channel_count = 0;
522}
523EXPORT_SYMBOL(iwl_legacy_free_channel_map);
524
525/**
526 * iwl_legacy_get_channel_info - Find driver's private channel info
527 *
528 * Based on band and channel number.
529 */
530const struct
531iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
532 enum ieee80211_band band, u16 channel)
533{
534 int i;
535
536 switch (band) {
537 case IEEE80211_BAND_5GHZ:
538 for (i = 14; i < priv->channel_count; i++) {
539 if (priv->channel_info[i].channel == channel)
540 return &priv->channel_info[i];
541 }
542 break;
543 case IEEE80211_BAND_2GHZ:
544 if (channel >= 1 && channel <= 14)
545 return &priv->channel_info[channel - 1];
546 break;
547 default:
548 BUG();
549 }
550
551 return NULL;
552}
553EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c81002022..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_eeprom_h__
64#define __iwl_legacy_eeprom_h__
65
66#include <net/mac80211.h>
67
68struct iwl_priv;
69
70/*
71 * EEPROM access time values:
72 *
73 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79
80#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
81#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
82
83
84/*
85 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
86 *
87 * IBSS and/or AP operation is allowed *only* on those channels with
88 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
89 * RADAR detection is not supported by the 4965 driver, but is a
90 * requirement for establishing a new network for legal operation on channels
91 * requiring RADAR detection or restricting ACTIVE scanning.
92 *
93 * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
94 * It only indicates that 20 MHz channel use is supported; HT40 channel
95 * usage is indicated by a separate set of regulatory flags for each
96 * HT40 channel pair.
97 *
98 * NOTE: Using a channel inappropriately will result in a uCode error!
99 */
100#define IWL_NUM_TX_CALIB_GROUPS 5
101enum {
102 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
103 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
104 /* Bit 2 Reserved */
105 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
106 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
107 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
108 /* Bit 6 Reserved (was Narrow Channel) */
109 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
110};
111
112/* SKU Capabilities */
113/* 3945 only */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __packed;
123
124/* 3945 Specific */
125#define EEPROM_3945_EEPROM_VERSION (0x2f)
126
127/* 4965 has two radio transmitters (and 3 radio receivers) */
128#define EEPROM_TX_POWER_TX_CHAINS (2)
129
130/* 4965 has room for up to 8 sets of txpower calibration data */
131#define EEPROM_TX_POWER_BANDS (8)
132
133/* 4965 factory calibration measures txpower gain settings for
134 * each of 3 target output levels */
135#define EEPROM_TX_POWER_MEASUREMENTS (3)
136
137/* 4965 Specific */
138/* 4965 driver does not work with txpower calibration version < 5 */
139#define EEPROM_4965_TX_POWER_VERSION (5)
140#define EEPROM_4965_EEPROM_VERSION (0x2f)
141#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
142#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
143#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
144#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
145
146/* 2.4 GHz */
147extern const u8 iwlegacy_eeprom_band_1[14];
148
149/*
150 * factory calibration data for one txpower level, on one channel,
151 * measured on one of the 2 tx chains (radio transmitter and associated
152 * antenna). EEPROM contains:
153 *
154 * 1) Temperature (degrees Celsius) of device when measurement was made.
155 *
156 * 2) Gain table index used to achieve the target measurement power.
157 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
158 *
159 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
160 *
161 * 4) RF power amplifier detector level measurement (not used).
162 */
163struct iwl_eeprom_calib_measure {
164 u8 temperature; /* Device temperature (Celsius) */
165 u8 gain_idx; /* Index into gain table */
166 u8 actual_pow; /* Measured RF output power, half-dBm */
167 s8 pa_det; /* Power amp detector level (not used) */
168} __packed;
169
170
171/*
172 * measurement set for one channel. EEPROM contains:
173 *
174 * 1) Channel number measured
175 *
176 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
177 * (a.k.a. "tx chains") (6 measurements altogether)
178 */
179struct iwl_eeprom_calib_ch_info {
180 u8 ch_num;
181 struct iwl_eeprom_calib_measure
182 measurements[EEPROM_TX_POWER_TX_CHAINS]
183 [EEPROM_TX_POWER_MEASUREMENTS];
184} __packed;
185
186/*
187 * txpower subband info.
188 *
189 * For each frequency subband, EEPROM contains the following:
190 *
191 * 1) First and last channels within range of the subband. "0" values
192 * indicate that this sample set is not being used.
193 *
194 * 2) Sample measurement sets for 2 channels close to the range endpoints.
195 */
196struct iwl_eeprom_calib_subband_info {
197 u8 ch_from; /* channel number of lowest channel in subband */
198 u8 ch_to; /* channel number of highest channel in subband */
199 struct iwl_eeprom_calib_ch_info ch1;
200 struct iwl_eeprom_calib_ch_info ch2;
201} __packed;
202
203
204/*
205 * txpower calibration info. EEPROM contains:
206 *
207 * 1) Factory-measured saturation power levels (maximum levels at which
208 * tx power amplifier can output a signal without too much distortion).
209 * There is one level for 2.4 GHz band and one for 5 GHz band. These
210 * values apply to all channels within each of the bands.
211 *
212 * 2) Factory-measured power supply voltage level. This is assumed to be
213 * constant (i.e. same value applies to all channels/bands) while the
214 * factory measurements are being made.
215 *
216 * 3) Up to 8 sets of factory-measured txpower calibration values.
217 * These are for different frequency ranges, since txpower gain
218 * characteristics of the analog radio circuitry vary with frequency.
219 *
220 * Not all sets need to be filled with data;
221 * struct iwl_eeprom_calib_subband_info contains range of channels
222 * (0 if unused) for each set of data.
223 */
224struct iwl_eeprom_calib_info {
225 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
226 u8 saturation_power52; /* half-dBm */
227 __le16 voltage; /* signed */
228 struct iwl_eeprom_calib_subband_info
229 band_info[EEPROM_TX_POWER_BANDS];
230} __packed;
231
232
233/* General */
234#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
235#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
236#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
237#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
238#define EEPROM_VERSION (2*0x44) /* 2 bytes */
239#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
240#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
241#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
242#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
243#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
244
245/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
246#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
247#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
248#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
249#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
250#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
251#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
252
253#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
254#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
255
256/*
257 * Per-channel regulatory data.
258 *
259 * Each channel that *might* be supported by iwl has a fixed location
260 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
261 * txpower (MSB).
262 *
263 * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
264 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
265 *
266 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
267 */
268#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
269#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
270#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
271
272/*
273 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
274 * 5.0 GHz channels 7, 8, 11, 12, 16
275 * (4915-5080MHz) (none of these is ever supported)
276 */
277#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
278#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
279
280/*
281 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
282 * (5170-5320MHz)
283 */
284#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
285#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
286
287/*
288 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
289 * (5500-5700MHz)
290 */
291#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
292#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
293
294/*
295 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
296 * (5725-5825MHz)
297 */
298#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
299#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
300
301/*
302 * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
303 *
304 * The channel listed is the center of the lower 20 MHz half of the channel.
305 * The overall center frequency is actually 2 channels (10 MHz) above that,
306 * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
307 * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
308 * and the overall HT40 channel width centers on channel 3.
309 *
310 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
311 * control channel to which to tune. RXON also specifies whether the
312 * control channel is the upper or lower half of a HT40 channel.
313 *
314 * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
315 */
316#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
317
318/*
319 * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
320 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
321 */
322#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
323
324#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
325
326struct iwl_eeprom_ops {
327 const u32 regulatory_bands[7];
328 int (*acquire_semaphore) (struct iwl_priv *priv);
329 void (*release_semaphore) (struct iwl_priv *priv);
330};
331
332
333int iwl_legacy_eeprom_init(struct iwl_priv *priv);
334void iwl_legacy_eeprom_free(struct iwl_priv *priv);
335const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
336 size_t offset);
337u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
338int iwl_legacy_init_channel_map(struct iwl_priv *priv);
339void iwl_legacy_free_channel_map(struct iwl_priv *priv);
340const struct iwl_channel_info *iwl_legacy_get_channel_info(
341 const struct iwl_priv *priv,
342 enum ieee80211_band band, u16 channel);
343
344#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e6091816e36..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_legacy_fh_h__
64#define __iwl_legacy_fh_h__
65
66/****************************/
67/* Flow Handler Definitions */
68/****************************/
69
70/**
71 * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
72 * Addresses are offsets from device's PCI hardware base address.
73 */
74#define FH_MEM_LOWER_BOUND (0x1000)
75#define FH_MEM_UPPER_BOUND (0x2000)
76
77/**
78 * Keep-Warm (KW) buffer base address.
79 *
80 * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
81 * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
82 * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
83 * from going into a power-savings mode that would cause higher DRAM latency,
84 * and possible data over/under-runs, before all Tx/Rx is complete.
85 *
86 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
87 * of the buffer, which must be 4K aligned. Once this is set up, the 4965
88 * automatically invokes keep-warm accesses when normal accesses might not
89 * be sufficient to maintain fast DRAM response.
90 *
91 * Bit fields:
92 * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
93 */
94#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
95
96
97/**
98 * TFD Circular Buffers Base (CBBC) addresses
99 *
100 * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
101 * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
102 * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
103 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
104 * aligned (address bits 0-7 must be 0).
105 *
106 * Bit fields in each pointer register:
107 * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
108 */
109#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
110#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
111
112/* Find TFD CB base pointer for given queue (range 0-15). */
113#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
114
115
116/**
117 * Rx SRAM Control and Status Registers (RSCSR)
118 *
119 * These registers provide handshake between driver and 4965 for the Rx queue
120 * (this queue handles *all* command responses, notifications, Rx data, etc.
121 * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
122 * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
123 * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
124 * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
125 * mapping between RBDs and RBs.
126 *
127 * Driver must allocate host DRAM memory for the following, and set the
128 * physical address of each into 4965 registers:
129 *
130 * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
131 * entries (although any power of 2, up to 4096, is selectable by driver).
132 * Each entry (1 dword) points to a receive buffer (RB) of consistent size
133 * (typically 4K, although 8K or 16K are also selectable by driver).
134 * Driver sets up RB size and number of RBDs in the CB via Rx config
135 * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
136 *
137 * Bit fields within one RBD:
138 * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
139 *
140 * Driver sets physical address [35:8] of base of RBD circular buffer
141 * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
142 *
143 * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
144 * (RBs) have been filled, via a "write pointer", actually the index of
145 * the RB's corresponding RBD within the circular buffer. Driver sets
146 * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
147 *
148 * Bit fields in lower dword of Rx status buffer (upper dword not used
149 * by driver; see struct iwl4965_shared, val0):
150 * 31-12: Not used by driver
151 * 11- 0: Index of last filled Rx buffer descriptor
152 * (4965 writes, driver reads this value)
153 *
154 * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
155 * enter pointers to these RBs into contiguous RBD circular buffer entries,
156 * and update the 4965's "write" index register,
157 * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
158 *
159 * This "write" index corresponds to the *next* RBD that the driver will make
160 * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
161 * the circular buffer. This value should initially be 0 (before preparing any
162 * RBs), should be 8 after preparing the first 8 RBs (for example), and must
163 * wrap back to 0 at the end of the circular buffer (but don't wrap before
164 * "read" index has advanced past 1! See below).
165 * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
166 *
167 * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
168 * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
169 * to tell the driver the index of the latest filled RBD. The driver must
170 * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
171 *
172 * The driver must also internally keep track of a third index, which is the
173 * next RBD to process. When receiving an Rx interrupt, driver should process
174 * all filled but unprocessed RBs up to, but not including, the RB
175 * corresponding to the "read" index. For example, if "read" index becomes "1",
176 * driver may process the RB pointed to by RBD 0. Depending on volume of
177 * traffic, there may be many RBs to process.
178 *
179 * If read index == write index, 4965 thinks there is no room to put new data.
180 * Due to this, the maximum number of filled RBs is 255, instead of 256. To
181 * be safe, make sure that there is a gap of at least 2 RBDs between "write"
182 * and "read" indexes; that is, make sure that there are no more than 254
183 * buffers waiting to be filled.
184 */
185#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
186#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
187#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
188
189/**
190 * Physical base address of 8-byte Rx Status buffer.
191 * Bit fields:
192 * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
193 */
194#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
195
196/**
197 * Physical base address of Rx Buffer Descriptor Circular Buffer.
198 * Bit fields:
199 * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
200 */
201#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
202
203/**
204 * Rx write pointer (index, really!).
205 * Bit fields:
206 * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
207 * NOTE: For 256-entry circular buffer, use only bits [7:0].
208 */
209#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
210#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
211
212
213/**
214 * Rx Config/Status Registers (RCSR)
215 * Rx Config Reg for channel 0 (only channel used)
216 *
217 * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
218 * normal operation (see bit fields).
219 *
220 * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
221 * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
222 * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
223 *
224 * Bit fields:
225 * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
226 * '10' operate normally
227 * 29-24: reserved
228 * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
229 * min "5" for 32 RBDs, max "12" for 4096 RBDs.
230 * 19-18: reserved
231 * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
232 * '10' 12K, '11' 16K.
233 * 15-14: reserved
234 * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
235 * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
236 * typical value 0x10 (about 1/2 msec)
237 * 3- 0: reserved
238 */
239#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
240#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
241#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
242
243#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
244
245#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
246#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
247#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
248#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
249#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
250#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
251
252#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
253#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
254#define RX_RB_TIMEOUT (0x10)
255
256#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
257#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
258#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
259
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
262#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
263#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
264
265#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/**
272 * Rx Shared Status Registers (RSSR)
273 *
274 * After stopping Rx DMA channel (writing 0 to
275 * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
276 * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
277 *
278 * Bit fields:
279 * 24: 1 = Channel 0 is idle
280 *
281 * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
282 * contain default values that should not be altered by the driver.
283 */
284#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
285#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
286
287#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
288#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
289#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
290 (FH_MEM_RSSR_LOWER_BOUND + 0x008)
291
292#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
293
294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
303/**
304 * Transmit DMA Channel Control/Status Registers (TCSR)
305 *
306 * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
307 * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
308 * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
309 *
310 * To use a Tx DMA channel, driver must initialize its
311 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
312 *
313 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
314 * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
315 *
316 * All other bits should be 0.
317 *
318 * Bit fields:
319 * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
320 * '10' operate normally
321 * 29- 4: Reserved, set to "0"
322 * 3: Enable internal DMA requests (1, normal operation), disable (0)
323 * 2- 0: Reserved, set to "0"
324 */
325#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
327
328/* Find Control/Status reg for given Tx DMA/FIFO channel */
329#define FH49_TCSR_CHNL_NUM (7)
330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
342
343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
345
346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
349
350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
353
354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
357
358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
361
362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
364
365/**
366 * Tx Shared Status Registers (TSSR)
367 *
368 * After stopping Tx DMA channel (writing 0 to
369 * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
370 * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
371 * (channel's buffers empty | no pending requests).
372 *
373 * Bit fields:
374 * 31-24: 1 = Channel buffers empty (channel 7:0)
375 * 23-16: 1 = No pending requests (channel 7:0)
376 */
377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
379
380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
381
382/**
383 * Bit fields for TSSR(Tx Shared Status & Control) error status register:
384 * 31: Indicates an address error when accessed to internal memory
385 * uCode/driver must write "1" in order to clear this flag
386 * 30: Indicates that Host did not send the expected number of dwords to FH
387 * uCode/driver must write "1" in order to clear this flag
388 * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
389 * command was received from the scheduler while the TRB was already full
390 * with previous command
391 * uCode/driver must write "1" in order to clear this flag
392 * 7-0: Each status bit indicates a channel's TxCredit error. When an error
393 * bit is set, it indicates that the FH has received a full indication
394 * from the RTC TxFIFO and the current value of the TxCredit counter was
395 * not equal to zero. This mean that the credit mechanism was not
396 * synchronized to the TxFIFO status
397 * uCode/driver must write "1" in order to clear this flag
398 */
399#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
400
401#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
402
403/* Tx service channels */
404#define FH_SRVC_CHNL (9)
405#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
406#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
407#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
408 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
409
410#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
411/* Instruct FH to increment the retry count of a packet when
412 * it is brought from the memory to TX-FIFO
413 */
414#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
415
416#define RX_QUEUE_SIZE 256
417#define RX_QUEUE_MASK 255
418#define RX_QUEUE_SIZE_LOG 8
419
420/*
421 * RX related structures and functions
422 */
423#define RX_FREE_BUFFERS 64
424#define RX_LOW_WATERMARK 8
425
426/* Size of one Rx buffer in host DRAM */
427#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
428#define IWL_RX_BUF_SIZE_4K (4 * 1024)
429#define IWL_RX_BUF_SIZE_8K (8 * 1024)
430
431/**
432 * struct iwl_rb_status - reseve buffer status
433 * host memory mapped FH registers
434 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
435 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
436 * @finished_rb_num [0:11] - Indicates the index of the current RB
437 * in which the last frame was written to
438 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
439 * which was transferred
440 */
441struct iwl_rb_status {
442 __le16 closed_rb_num;
443 __le16 closed_fr_num;
444 __le16 finished_rb_num;
445 __le16 finished_fr_nam;
446 __le32 __unused; /* 3945 only */
447} __packed;
448
449
450#define TFD_QUEUE_SIZE_MAX (256)
451#define TFD_QUEUE_SIZE_BC_DUP (64)
452#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
453#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
454#define IWL_NUM_OF_TBS 20
455
456static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
457{
458 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
459}
460/**
461 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
462 *
463 * This structure contains dma address and length of transmission address
464 *
465 * @lo: low [31:0] portion of the dma address of TX buffer
466 * every even is unaligned on 16 bit boundary
467 * @hi_n_len 0-3 [35:32] portion of dma
468 * 4-15 length of the tx buffer
469 */
470struct iwl_tfd_tb {
471 __le32 lo;
472 __le16 hi_n_len;
473} __packed;
474
475/**
476 * struct iwl_tfd
477 *
478 * Transmit Frame Descriptor (TFD)
479 *
480 * @ __reserved1[3] reserved
481 * @ num_tbs 0-4 number of active tbs
482 * 5 reserved
483 * 6-7 padding (not used)
484 * @ tbs[20] transmit frame buffer descriptors
485 * @ __pad padding
486 *
487 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
488 * Both driver and device share these circular buffers, each of which must be
489 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
490 *
491 * Driver must indicate the physical address of the base of each
492 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
493 *
494 * Each TFD contains pointer/size information for up to 20 data buffers
495 * in host DRAM. These buffers collectively contain the (one) frame described
496 * by the TFD. Each buffer must be a single contiguous block of memory within
497 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
498 * of (4K - 4). The concatenates all of a TFD's buffers into a single
499 * Tx frame, up to 8 KBytes in size.
500 *
501 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
502 */
503struct iwl_tfd {
504 u8 __reserved1[3];
505 u8 num_tbs;
506 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
507 __le32 __pad;
508} __packed;
509
510/* Keep Warm Size */
511#define IWL_KW_SIZE 0x1000 /* 4k */
512
513#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9feb61f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/sched.h>
32#include <net/mac80211.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40const char *iwl_legacy_get_cmd_string(u8 cmd)
41{
42 switch (cmd) {
43 IWL_CMD(REPLY_ALIVE);
44 IWL_CMD(REPLY_ERROR);
45 IWL_CMD(REPLY_RXON);
46 IWL_CMD(REPLY_RXON_ASSOC);
47 IWL_CMD(REPLY_QOS_PARAM);
48 IWL_CMD(REPLY_RXON_TIMING);
49 IWL_CMD(REPLY_ADD_STA);
50 IWL_CMD(REPLY_REMOVE_STA);
51 IWL_CMD(REPLY_WEPKEY);
52 IWL_CMD(REPLY_3945_RX);
53 IWL_CMD(REPLY_TX);
54 IWL_CMD(REPLY_RATE_SCALE);
55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(REPLY_CHANNEL_SWITCH);
58 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
59 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
60 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
61 IWL_CMD(POWER_TABLE_CMD);
62 IWL_CMD(PM_SLEEP_NOTIFICATION);
63 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
64 IWL_CMD(REPLY_SCAN_CMD);
65 IWL_CMD(REPLY_SCAN_ABORT_CMD);
66 IWL_CMD(SCAN_START_NOTIFICATION);
67 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
68 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
69 IWL_CMD(BEACON_NOTIFICATION);
70 IWL_CMD(REPLY_TX_BEACON);
71 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
72 IWL_CMD(REPLY_BT_CONFIG);
73 IWL_CMD(REPLY_STATISTICS_CMD);
74 IWL_CMD(STATISTICS_NOTIFICATION);
75 IWL_CMD(CARD_STATE_NOTIFICATION);
76 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
77 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
78 IWL_CMD(SENSITIVITY_CMD);
79 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
80 IWL_CMD(REPLY_RX_PHY_CMD);
81 IWL_CMD(REPLY_RX_MPDU_CMD);
82 IWL_CMD(REPLY_RX);
83 IWL_CMD(REPLY_COMPRESSED_BA);
84 default:
85 return "UNKNOWN";
86
87 }
88}
89EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
90
91#define HOST_COMPLETE_TIMEOUT (HZ / 2)
92
93static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
94 struct iwl_device_cmd *cmd,
95 struct iwl_rx_packet *pkt)
96{
97 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
98 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
99 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
100 return;
101 }
102
103#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
104 switch (cmd->hdr.cmd) {
105 case REPLY_TX_LINK_QUALITY_CMD:
106 case SENSITIVITY_CMD:
107 IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
108 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
109 break;
110 default:
111 IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
112 iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
113 }
114#endif
115}
116
117static int
118iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
119{
120 int ret;
121
122 BUG_ON(!(cmd->flags & CMD_ASYNC));
123
124 /* An asynchronous command can not expect an SKB to be set. */
125 BUG_ON(cmd->flags & CMD_WANT_SKB);
126
127 /* Assign a generic callback if one is not provided */
128 if (!cmd->callback)
129 cmd->callback = iwl_legacy_generic_cmd_callback;
130
131 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
132 return -EBUSY;
133
134 ret = iwl_legacy_enqueue_hcmd(priv, cmd);
135 if (ret < 0) {
136 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
137 iwl_legacy_get_cmd_string(cmd->id), ret);
138 return ret;
139 }
140 return 0;
141}
142
143int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
144{
145 int cmd_idx;
146 int ret;
147
148 lockdep_assert_held(&priv->mutex);
149
150 BUG_ON(cmd->flags & CMD_ASYNC);
151
152 /* A synchronous command can not have a callback set. */
153 BUG_ON(cmd->callback);
154
155 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
156 iwl_legacy_get_cmd_string(cmd->id));
157
158 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
159 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
160 iwl_legacy_get_cmd_string(cmd->id));
161
162 cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
163 if (cmd_idx < 0) {
164 ret = cmd_idx;
165 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
166 iwl_legacy_get_cmd_string(cmd->id), ret);
167 goto out;
168 }
169
170 ret = wait_event_timeout(priv->wait_command_queue,
171 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
172 HOST_COMPLETE_TIMEOUT);
173 if (!ret) {
174 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
175 IWL_ERR(priv,
176 "Error sending %s: time out after %dms.\n",
177 iwl_legacy_get_cmd_string(cmd->id),
178 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
179
180 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
181 IWL_DEBUG_INFO(priv,
182 "Clearing HCMD_ACTIVE for command %s\n",
183 iwl_legacy_get_cmd_string(cmd->id));
184 ret = -ETIMEDOUT;
185 goto cancel;
186 }
187 }
188
189 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
190 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
191 iwl_legacy_get_cmd_string(cmd->id));
192 ret = -ECANCELED;
193 goto fail;
194 }
195 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
196 IWL_ERR(priv, "Command %s failed: FW Error\n",
197 iwl_legacy_get_cmd_string(cmd->id));
198 ret = -EIO;
199 goto fail;
200 }
201 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
202 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
203 iwl_legacy_get_cmd_string(cmd->id));
204 ret = -EIO;
205 goto cancel;
206 }
207
208 ret = 0;
209 goto out;
210
211cancel:
212 if (cmd->flags & CMD_WANT_SKB) {
213 /*
214 * Cancel the CMD_WANT_SKB flag for the cmd in the
215 * TX cmd queue. Otherwise in case the cmd comes
216 * in later, it will possibly set an invalid
217 * address (cmd->meta.source).
218 */
219 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
220 ~CMD_WANT_SKB;
221 }
222fail:
223 if (cmd->reply_page) {
224 iwl_legacy_free_pages(priv, cmd->reply_page);
225 cmd->reply_page = 0;
226 }
227out:
228 return ret;
229}
230EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
231
232int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
233{
234 if (cmd->flags & CMD_ASYNC)
235 return iwl_legacy_send_cmd_async(priv, cmd);
236
237 return iwl_legacy_send_cmd_sync(priv, cmd);
238}
239EXPORT_SYMBOL(iwl_legacy_send_cmd);
240
241int
242iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
243{
244 struct iwl_host_cmd cmd = {
245 .id = id,
246 .len = len,
247 .data = data,
248 };
249
250 return iwl_legacy_send_cmd_sync(priv, &cmd);
251}
252EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
253
254int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
255 u8 id, u16 len, const void *data,
256 void (*callback)(struct iwl_priv *priv,
257 struct iwl_device_cmd *cmd,
258 struct iwl_rx_packet *pkt))
259{
260 struct iwl_host_cmd cmd = {
261 .id = id,
262 .len = len,
263 .data = data,
264 };
265
266 cmd.flags |= CMD_ASYNC;
267 cmd.callback = callback;
268
269 return iwl_legacy_send_cmd_async(priv, &cmd);
270}
271EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23eaecbbb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#ifndef __iwl_legacy_helpers_h__
31#define __iwl_legacy_helpers_h__
32
33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
37
38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
39
40
41static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
42 struct ieee80211_hw *hw)
43{
44 return &hw->conf;
45}
46
47/**
48 * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
49 * @index -- current index
50 * @n_bd -- total number of entries in queue (must be power of 2)
51 */
52static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
53{
54 return ++index & (n_bd - 1);
55}
56
57/**
58 * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
59 * @index -- current index
60 * @n_bd -- total number of entries in queue (must be power of 2)
61 */
62static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
63{
64 return --index & (n_bd - 1);
65}
66
67/* TODO: Move fw_desc functions to iwl-pci.ko */
68static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
69 struct fw_desc *desc)
70{
71 if (desc->v_addr)
72 dma_free_coherent(&pci_dev->dev, desc->len,
73 desc->v_addr, desc->p_addr);
74 desc->v_addr = NULL;
75 desc->len = 0;
76}
77
78static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
79 struct fw_desc *desc)
80{
81 if (!desc->len) {
82 desc->v_addr = NULL;
83 return -EINVAL;
84 }
85
86 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
87 &desc->p_addr, GFP_KERNEL);
88 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
89}
90
91/*
92 * we have 8 bits used like this:
93 *
94 * 7 6 5 4 3 2 1 0
95 * | | | | | | | |
96 * | | | | | | +-+-------- AC queue (0-3)
97 * | | | | | |
98 * | +-+-+-+-+------------ HW queue ID
99 * |
100 * +---------------------- unused
101 */
102static inline void
103iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
104{
105 BUG_ON(ac > 3); /* only have 2 bits */
106 BUG_ON(hwq > 31); /* only use 5 bits */
107
108 txq->swq_id = (hwq << 2) | ac;
109}
110
111static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
112 struct iwl_tx_queue *txq)
113{
114 u8 queue = txq->swq_id;
115 u8 ac = queue & 3;
116 u8 hwq = (queue >> 2) & 0x1f;
117
118 if (test_and_clear_bit(hwq, priv->queue_stopped))
119 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
120 ieee80211_wake_queue(priv->hw, ac);
121}
122
123static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
124 struct iwl_tx_queue *txq)
125{
126 u8 queue = txq->swq_id;
127 u8 ac = queue & 3;
128 u8 hwq = (queue >> 2) & 0x1f;
129
130 if (!test_and_set_bit(hwq, priv->queue_stopped))
131 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
132 ieee80211_stop_queue(priv->hw, ac);
133}
134
135#ifdef ieee80211_stop_queue
136#undef ieee80211_stop_queue
137#endif
138
139#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
140
141#ifdef ieee80211_wake_queue
142#undef ieee80211_wake_queue
143#endif
144
145#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
146
147static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
148{
149 clear_bit(STATUS_INT_ENABLED, &priv->status);
150
151 /* disable interrupts from uCode/NIC to host */
152 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
153
154 /* acknowledge/clear/reset any interrupts still pending
155 * from uCode or flow handler (Rx/Tx DMA) */
156 iwl_write32(priv, CSR_INT, 0xffffffff);
157 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
158 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
159}
160
161static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
162{
163 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
164 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
165}
166
167static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
168{
169 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
170 set_bit(STATUS_INT_ENABLED, &priv->status);
171 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
172}
173
174/**
175 * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
176 * @priv -- pointer to iwl_priv data structure
177 * @tsf_bits -- number of bits need to shift for masking)
178 */
179static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
180 u16 tsf_bits)
181{
182 return (1 << tsf_bits) - 1;
183}
184
185/**
186 * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
187 * @priv -- pointer to iwl_priv data structure
188 * @tsf_bits -- number of bits need to shift for masking)
189 */
190static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
191 u16 tsf_bits)
192{
193 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
194}
195
196#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d342914f..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_legacy_io_h__
30#define __iwl_legacy_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h"
37
38/*
39 * IO, register, and NIC memory access functions
40 *
41 * NOTE on naming convention and macro usage for these
42 *
43 * A single _ prefix before a an access function means that no state
44 * check or debug information is printed when that function is called.
45 *
46 * A double __ prefix before an access function means that state is checked
47 * and the current line number and caller function name are printed in addition
48 * to any other debug output.
49 *
50 * The non-prefixed name is the #define that maps the caller into a
51 * #define that provides the caller's name and __LINE__ to the double
52 * prefix version.
53 *
54 * If you wish to call the function without any debug or state checking,
55 * you should use the single _ prefix version (as is used by dependent IO
56 * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
57 * _iwl_legacy_read32.)
58 *
59 * These declarations are *extremely* useful in quickly isolating code deltas
60 * which result in misconfiguration of the hardware I/O. In combination with
61 * git-bisect and the IO debug level you can quickly determine the specific
62 * commit which breaks the IO sequence to the hardware.
63 *
64 */
65
66static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
67{
68 trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
69 iowrite8(val, priv->hw_base + ofs);
70}
71
72#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
73static inline void
74__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
75 u32 ofs, u8 val)
76{
77 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
78 _iwl_legacy_write8(priv, ofs, val);
79}
80#define iwl_write8(priv, ofs, val) \
81 __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
82#else
83#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
84#endif
85
86
87static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
88{
89 trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
90 iowrite32(val, priv->hw_base + ofs);
91}
92
93#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
94static inline void
95__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
96 u32 ofs, u32 val)
97{
98 IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
99 _iwl_legacy_write32(priv, ofs, val);
100}
101#define iwl_write32(priv, ofs, val) \
102 __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
103#else
104#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
105#endif
106
107static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
108{
109 u32 val = ioread32(priv->hw_base + ofs);
110 trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
111 return val;
112}
113
114#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
115static inline u32
116__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
117{
118 IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
119 return _iwl_legacy_read32(priv, ofs);
120}
121#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
122#else
123#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
124#endif
125
126#define IWL_POLL_INTERVAL 10 /* microseconds */
127static inline int
128_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
129 u32 bits, u32 mask, int timeout)
130{
131 int t = 0;
132
133 do {
134 if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
135 return t;
136 udelay(IWL_POLL_INTERVAL);
137 t += IWL_POLL_INTERVAL;
138 } while (t < timeout);
139
140 return -ETIMEDOUT;
141}
142#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
143static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 addr,
145 u32 bits, u32 mask, int timeout)
146{
147 int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
148 IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
149 addr, bits, mask,
150 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
151 return ret;
152}
153#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
154 __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
155 bits, mask, timeout)
156#else
157#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
158#endif
159
160static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
161{
162 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
163}
164#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
165static inline void __iwl_legacy_set_bit(const char *f, u32 l,
166 struct iwl_priv *priv, u32 reg, u32 mask)
167{
168 u32 val = _iwl_legacy_read32(priv, reg) | mask;
169 IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
170 mask, val);
171 _iwl_legacy_write32(priv, reg, val);
172}
173static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
174{
175 unsigned long reg_flags;
176
177 spin_lock_irqsave(&p->reg_lock, reg_flags);
178 __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
179 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
180}
181#else
182static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
183{
184 unsigned long reg_flags;
185
186 spin_lock_irqsave(&p->reg_lock, reg_flags);
187 _iwl_legacy_set_bit(p, r, m);
188 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
189}
190#endif
191
192static inline void
193_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
194{
195 _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
196}
197#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
198static inline void
199__iwl_legacy_clear_bit(const char *f, u32 l,
200 struct iwl_priv *priv, u32 reg, u32 mask)
201{
202 u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
203 IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
204 _iwl_legacy_write32(priv, reg, val);
205}
206static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
207{
208 unsigned long reg_flags;
209
210 spin_lock_irqsave(&p->reg_lock, reg_flags);
211 __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
212 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
213}
214#else
215static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
216{
217 unsigned long reg_flags;
218
219 spin_lock_irqsave(&p->reg_lock, reg_flags);
220 _iwl_legacy_clear_bit(p, r, m);
221 spin_unlock_irqrestore(&p->reg_lock, reg_flags);
222}
223#endif
224
225static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
226{
227 int ret;
228 u32 val;
229
230 /* this bit wakes up the NIC */
231 _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
232 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
233
234 /*
235 * These bits say the device is running, and should keep running for
236 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
237 * but they do not indicate that embedded SRAM is restored yet;
238 * 3945 and 4965 have volatile SRAM, and must save/restore contents
239 * to/from host DRAM when sleeping/waking for power-saving.
240 * Each direction takes approximately 1/4 millisecond; with this
241 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
242 * series of register accesses are expected (e.g. reading Event Log),
243 * to keep device from sleeping.
244 *
245 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
246 * SRAM is okay/restored. We don't check that here because this call
247 * is just for hardware register access; but GP1 MAC_SLEEP check is a
248 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
249 *
250 */
251 ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
252 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
253 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
254 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
255 if (ret < 0) {
256 val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
257 IWL_ERR(priv,
258 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
259 _iwl_legacy_write32(priv, CSR_RESET,
260 CSR_RESET_REG_FLAG_FORCE_NMI);
261 return -EIO;
262 }
263
264 return 0;
265}
266
267#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
268static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
269 struct iwl_priv *priv)
270{
271 IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
272 return _iwl_legacy_grab_nic_access(priv);
273}
274#define iwl_grab_nic_access(priv) \
275 __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
276#else
277#define iwl_grab_nic_access(priv) \
278 _iwl_legacy_grab_nic_access(priv)
279#endif
280
281static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
282{
283 _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
284 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
285}
286#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
287static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
288 struct iwl_priv *priv)
289{
290
291 IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
292 _iwl_legacy_release_nic_access(priv);
293}
294#define iwl_release_nic_access(priv) \
295 __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
296#else
297#define iwl_release_nic_access(priv) \
298 _iwl_legacy_release_nic_access(priv)
299#endif
300
301static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
302{
303 return _iwl_legacy_read32(priv, reg);
304}
305#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
306static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
307 struct iwl_priv *priv, u32 reg)
308{
309 u32 value = _iwl_legacy_read_direct32(priv, reg);
310 IWL_DEBUG_IO(priv,
311 "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
312 f, l);
313 return value;
314}
315static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
316{
317 u32 value;
318 unsigned long reg_flags;
319
320 spin_lock_irqsave(&priv->reg_lock, reg_flags);
321 iwl_grab_nic_access(priv);
322 value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
323 iwl_release_nic_access(priv);
324 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
325 return value;
326}
327
328#else
329static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
330{
331 u32 value;
332 unsigned long reg_flags;
333
334 spin_lock_irqsave(&priv->reg_lock, reg_flags);
335 iwl_grab_nic_access(priv);
336 value = _iwl_legacy_read_direct32(priv, reg);
337 iwl_release_nic_access(priv);
338 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
339 return value;
340
341}
342#endif
343
344static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
345 u32 reg, u32 value)
346{
347 _iwl_legacy_write32(priv, reg, value);
348}
349static inline void
350iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
351{
352 unsigned long reg_flags;
353
354 spin_lock_irqsave(&priv->reg_lock, reg_flags);
355 if (!iwl_grab_nic_access(priv)) {
356 _iwl_legacy_write_direct32(priv, reg, value);
357 iwl_release_nic_access(priv);
358 }
359 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
360}
361
362static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
363 u32 reg, u32 len, u32 *values)
364{
365 u32 count = sizeof(u32);
366
367 if ((priv != NULL) && (values != NULL)) {
368 for (; 0 < len; len -= count, reg += count, values++)
369 iwl_legacy_write_direct32(priv, reg, *values);
370 }
371}
372
373static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
374 u32 mask, int timeout)
375{
376 int t = 0;
377
378 do {
379 if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
380 return t;
381 udelay(IWL_POLL_INTERVAL);
382 t += IWL_POLL_INTERVAL;
383 } while (t < timeout);
384
385 return -ETIMEDOUT;
386}
387
388#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
389static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
390 struct iwl_priv *priv,
391 u32 addr, u32 mask, int timeout)
392{
393 int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
394
395 if (unlikely(ret == -ETIMEDOUT))
396 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
397 "timedout - %s %d\n", addr, mask, f, l);
398 else
399 IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
400 "- %s %d\n", addr, mask, ret, f, l);
401 return ret;
402}
403#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
404__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
405#else
406#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
407#endif
408
409static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
410{
411 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
412 rmb();
413 return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
414}
415static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
416{
417 unsigned long reg_flags;
418 u32 val;
419
420 spin_lock_irqsave(&priv->reg_lock, reg_flags);
421 iwl_grab_nic_access(priv);
422 val = _iwl_legacy_read_prph(priv, reg);
423 iwl_release_nic_access(priv);
424 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
425 return val;
426}
427
428static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
429 u32 addr, u32 val)
430{
431 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
432 ((addr & 0x0000FFFF) | (3 << 24)));
433 wmb();
434 _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
435}
436
437static inline void
438iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
439{
440 unsigned long reg_flags;
441
442 spin_lock_irqsave(&priv->reg_lock, reg_flags);
443 if (!iwl_grab_nic_access(priv)) {
444 _iwl_legacy_write_prph(priv, addr, val);
445 iwl_release_nic_access(priv);
446 }
447 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
448}
449
450#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
451_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
452
453static inline void
454iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
455{
456 unsigned long reg_flags;
457
458 spin_lock_irqsave(&priv->reg_lock, reg_flags);
459 iwl_grab_nic_access(priv);
460 _iwl_legacy_set_bits_prph(priv, reg, mask);
461 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
463}
464
465#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
466_iwl_legacy_write_prph(priv, reg, \
467 ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
468
469static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
470 u32 bits, u32 mask)
471{
472 unsigned long reg_flags;
473
474 spin_lock_irqsave(&priv->reg_lock, reg_flags);
475 iwl_grab_nic_access(priv);
476 _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
477 iwl_release_nic_access(priv);
478 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
479}
480
481static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
482 *priv, u32 reg, u32 mask)
483{
484 unsigned long reg_flags;
485 u32 val;
486
487 spin_lock_irqsave(&priv->reg_lock, reg_flags);
488 iwl_grab_nic_access(priv);
489 val = _iwl_legacy_read_prph(priv, reg);
490 _iwl_legacy_write_prph(priv, reg, (val & ~mask));
491 iwl_release_nic_access(priv);
492 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
493}
494
495static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
496{
497 unsigned long reg_flags;
498 u32 value;
499
500 spin_lock_irqsave(&priv->reg_lock, reg_flags);
501 iwl_grab_nic_access(priv);
502
503 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
504 rmb();
505 value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
506
507 iwl_release_nic_access(priv);
508 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
509 return value;
510}
511
512static inline void
513iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
514{
515 unsigned long reg_flags;
516
517 spin_lock_irqsave(&priv->reg_lock, reg_flags);
518 if (!iwl_grab_nic_access(priv)) {
519 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
520 wmb();
521 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
522 iwl_release_nic_access(priv);
523 }
524 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
525}
526
527static inline void
528iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
529 u32 len, u32 *values)
530{
531 unsigned long reg_flags;
532
533 spin_lock_irqsave(&priv->reg_lock, reg_flags);
534 if (!iwl_grab_nic_access(priv)) {
535 _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
536 wmb();
537 for (; 0 < len; len -= sizeof(u32), values++)
538 _iwl_legacy_write_direct32(priv,
539 HBUS_TARG_MEM_WDAT, *values);
540
541 iwl_release_nic_access(priv);
542 }
543 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
544}
545#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a474c5d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43
44/* default: IWL_LED_BLINK(0) using blinking index table */
45static int led_mode;
46module_param(led_mode, int, S_IRUGO);
47MODULE_PARM_DESC(led_mode, "0=system default, "
48 "1=On(RF On)/Off(RF Off), 2=blinking");
49
50/* Throughput OFF time(ms) ON time (ms)
51 * >300 25 25
52 * >200 to 300 40 40
53 * >100 to 200 55 55
54 * >70 to 100 65 65
55 * >50 to 70 75 75
56 * >20 to 50 85 85
57 * >10 to 20 95 95
58 * >5 to 10 110 110
59 * >1 to 5 130 130
60 * >0 to 1 167 167
61 * <=0 SOLID ON
62 */
63static const struct ieee80211_tpt_blink iwl_blink[] = {
64 { .throughput = 0, .blink_time = 334 },
65 { .throughput = 1 * 1024 - 1, .blink_time = 260 },
66 { .throughput = 5 * 1024 - 1, .blink_time = 220 },
67 { .throughput = 10 * 1024 - 1, .blink_time = 190 },
68 { .throughput = 20 * 1024 - 1, .blink_time = 170 },
69 { .throughput = 50 * 1024 - 1, .blink_time = 150 },
70 { .throughput = 70 * 1024 - 1, .blink_time = 130 },
71 { .throughput = 100 * 1024 - 1, .blink_time = 110 },
72 { .throughput = 200 * 1024 - 1, .blink_time = 80 },
73 { .throughput = 300 * 1024 - 1, .blink_time = 50 },
74};
75
76/*
77 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
78 * Led blink rate analysis showed an average deviation of 0% on 3945,
79 * 5% on 4965 HW.
80 * Need to compensate on the led on/off time per HW according to the deviation
81 * to achieve the desired led frequency
82 * The calculation is: (100-averageDeviation)/100 * blinkTime
83 * For code efficiency the calculation will be:
84 * compensation = (100 - averageDeviation) * 64 / 100
85 * NewBlinkTime = (compensation * BlinkTime) / 64
86 */
87static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
88 u8 time, u16 compensation)
89{
90 if (!compensation) {
91 IWL_ERR(priv, "undefined blink compensation: "
92 "use pre-defined blinking time\n");
93 return time;
94 }
95
96 return (u8)((time * compensation) >> 6);
97}
98
99/* Set led pattern command */
100static int iwl_legacy_led_cmd(struct iwl_priv *priv,
101 unsigned long on,
102 unsigned long off)
103{
104 struct iwl_led_cmd led_cmd = {
105 .id = IWL_LED_LINK,
106 .interval = IWL_DEF_LED_INTRVL
107 };
108 int ret;
109
110 if (!test_bit(STATUS_READY, &priv->status))
111 return -EBUSY;
112
113 if (priv->blink_on == on && priv->blink_off == off)
114 return 0;
115
116 if (off == 0) {
117 /* led is SOLID_ON */
118 on = IWL_LED_SOLID;
119 }
120
121 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
122 priv->cfg->base_params->led_compensation);
123 led_cmd.on = iwl_legacy_blink_compensation(priv, on,
124 priv->cfg->base_params->led_compensation);
125 led_cmd.off = iwl_legacy_blink_compensation(priv, off,
126 priv->cfg->base_params->led_compensation);
127
128 ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
129 if (!ret) {
130 priv->blink_on = on;
131 priv->blink_off = off;
132 }
133 return ret;
134}
135
136static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
137 enum led_brightness brightness)
138{
139 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
140 unsigned long on = 0;
141
142 if (brightness > 0)
143 on = IWL_LED_SOLID;
144
145 iwl_legacy_led_cmd(priv, on, 0);
146}
147
148static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
149 unsigned long *delay_on,
150 unsigned long *delay_off)
151{
152 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
153
154 return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
155}
156
157void iwl_legacy_leds_init(struct iwl_priv *priv)
158{
159 int mode = led_mode;
160 int ret;
161
162 if (mode == IWL_LED_DEFAULT)
163 mode = priv->cfg->led_mode;
164
165 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
166 wiphy_name(priv->hw->wiphy));
167 priv->led.brightness_set = iwl_legacy_led_brightness_set;
168 priv->led.blink_set = iwl_legacy_led_blink_set;
169 priv->led.max_brightness = 1;
170
171 switch (mode) {
172 case IWL_LED_DEFAULT:
173 WARN_ON(1);
174 break;
175 case IWL_LED_BLINK:
176 priv->led.default_trigger =
177 ieee80211_create_tpt_led_trigger(priv->hw,
178 IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
179 iwl_blink, ARRAY_SIZE(iwl_blink));
180 break;
181 case IWL_LED_RF_STATE:
182 priv->led.default_trigger =
183 ieee80211_get_radio_led_name(priv->hw);
184 break;
185 }
186
187 ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
188 if (ret) {
189 kfree(priv->led.name);
190 return;
191 }
192
193 priv->led_registered = true;
194}
195EXPORT_SYMBOL(iwl_legacy_leds_init);
196
197void iwl_legacy_leds_exit(struct iwl_priv *priv)
198{
199 if (!priv->led_registered)
200 return;
201
202 led_classdev_unregister(&priv->led);
203 kfree(priv->led.name);
204}
205EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f70f79d..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_leds_h__
28#define __iwl_legacy_leds_h__
29
30
31struct iwl_priv;
32
33#define IWL_LED_SOLID 11
34#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
35
36#define IWL_LED_ACTIVITY (0<<1)
37#define IWL_LED_LINK (1<<1)
38
39/*
40 * LED mode
41 * IWL_LED_DEFAULT: use device default
42 * IWL_LED_RF_STATE: turn LED on/off based on RF state
43 * LED ON = RF ON
44 * LED OFF = RF OFF
45 * IWL_LED_BLINK: adjust led blink rate based on blink table
46 */
47enum iwl_led_mode {
48 IWL_LED_DEFAULT,
49 IWL_LED_RF_STATE,
50 IWL_LED_BLINK,
51};
52
53void iwl_legacy_leds_init(struct iwl_priv *priv);
54void iwl_legacy_leds_exit(struct iwl_priv *priv);
55
56#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e481eb0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_legacy_rs_h__
28#define __iwl_legacy_rs_h__
29
30struct iwl_rate_info {
31 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
32 u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
33 u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
34 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
35 u8 prev_ieee; /* previous rate in IEEE speeds */
36 u8 next_ieee; /* next rate in IEEE speeds */
37 u8 prev_rs; /* previous rate used in rs algo */
38 u8 next_rs; /* next rate used in rs algo */
39 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
40 u8 next_rs_tgg; /* next rate used in TGG rs algo */
41};
42
43struct iwl3945_rate_info {
44 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
45 u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
46 u8 prev_ieee; /* previous rate in IEEE speeds */
47 u8 next_ieee; /* next rate in IEEE speeds */
48 u8 prev_rs; /* previous rate used in rs algo */
49 u8 next_rs; /* next rate used in rs algo */
50 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
51 u8 next_rs_tgg; /* next rate used in TGG rs algo */
52 u8 table_rs_index; /* index in rate scale table cmd */
53 u8 prev_table_rs; /* prev in rate table cmd */
54};
55
56
57/*
58 * These serve as indexes into
59 * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
60 */
61enum {
62 IWL_RATE_1M_INDEX = 0,
63 IWL_RATE_2M_INDEX,
64 IWL_RATE_5M_INDEX,
65 IWL_RATE_11M_INDEX,
66 IWL_RATE_6M_INDEX,
67 IWL_RATE_9M_INDEX,
68 IWL_RATE_12M_INDEX,
69 IWL_RATE_18M_INDEX,
70 IWL_RATE_24M_INDEX,
71 IWL_RATE_36M_INDEX,
72 IWL_RATE_48M_INDEX,
73 IWL_RATE_54M_INDEX,
74 IWL_RATE_60M_INDEX,
75 IWL_RATE_COUNT,
76 IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
77 IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
78 IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
79 IWL_RATE_INVALID = IWL_RATE_COUNT,
80};
81
82enum {
83 IWL_RATE_6M_INDEX_TABLE = 0,
84 IWL_RATE_9M_INDEX_TABLE,
85 IWL_RATE_12M_INDEX_TABLE,
86 IWL_RATE_18M_INDEX_TABLE,
87 IWL_RATE_24M_INDEX_TABLE,
88 IWL_RATE_36M_INDEX_TABLE,
89 IWL_RATE_48M_INDEX_TABLE,
90 IWL_RATE_54M_INDEX_TABLE,
91 IWL_RATE_1M_INDEX_TABLE,
92 IWL_RATE_2M_INDEX_TABLE,
93 IWL_RATE_5M_INDEX_TABLE,
94 IWL_RATE_11M_INDEX_TABLE,
95 IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
96};
97
98enum {
99 IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
100 IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
101 IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
102 IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
103 IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
104};
105
106/* #define vs. enum to keep from defaulting to 'large integer' */
107#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
108#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
109#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
110#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
111#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
112#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
113#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
114#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
115#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
116#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
117#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
118#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
119#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
120
121/* uCode API values for legacy bit rates, both OFDM and CCK */
122enum {
123 IWL_RATE_6M_PLCP = 13,
124 IWL_RATE_9M_PLCP = 15,
125 IWL_RATE_12M_PLCP = 5,
126 IWL_RATE_18M_PLCP = 7,
127 IWL_RATE_24M_PLCP = 9,
128 IWL_RATE_36M_PLCP = 11,
129 IWL_RATE_48M_PLCP = 1,
130 IWL_RATE_54M_PLCP = 3,
131 IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
132 IWL_RATE_1M_PLCP = 10,
133 IWL_RATE_2M_PLCP = 20,
134 IWL_RATE_5M_PLCP = 55,
135 IWL_RATE_11M_PLCP = 110,
136 /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
137};
138
139/* uCode API values for OFDM high-throughput (HT) bit rates */
140enum {
141 IWL_RATE_SISO_6M_PLCP = 0,
142 IWL_RATE_SISO_12M_PLCP = 1,
143 IWL_RATE_SISO_18M_PLCP = 2,
144 IWL_RATE_SISO_24M_PLCP = 3,
145 IWL_RATE_SISO_36M_PLCP = 4,
146 IWL_RATE_SISO_48M_PLCP = 5,
147 IWL_RATE_SISO_54M_PLCP = 6,
148 IWL_RATE_SISO_60M_PLCP = 7,
149 IWL_RATE_MIMO2_6M_PLCP = 0x8,
150 IWL_RATE_MIMO2_12M_PLCP = 0x9,
151 IWL_RATE_MIMO2_18M_PLCP = 0xa,
152 IWL_RATE_MIMO2_24M_PLCP = 0xb,
153 IWL_RATE_MIMO2_36M_PLCP = 0xc,
154 IWL_RATE_MIMO2_48M_PLCP = 0xd,
155 IWL_RATE_MIMO2_54M_PLCP = 0xe,
156 IWL_RATE_MIMO2_60M_PLCP = 0xf,
157 IWL_RATE_SISO_INVM_PLCP,
158 IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
159};
160
161/* MAC header values for bit rates */
162enum {
163 IWL_RATE_6M_IEEE = 12,
164 IWL_RATE_9M_IEEE = 18,
165 IWL_RATE_12M_IEEE = 24,
166 IWL_RATE_18M_IEEE = 36,
167 IWL_RATE_24M_IEEE = 48,
168 IWL_RATE_36M_IEEE = 72,
169 IWL_RATE_48M_IEEE = 96,
170 IWL_RATE_54M_IEEE = 108,
171 IWL_RATE_60M_IEEE = 120,
172 IWL_RATE_1M_IEEE = 2,
173 IWL_RATE_2M_IEEE = 4,
174 IWL_RATE_5M_IEEE = 11,
175 IWL_RATE_11M_IEEE = 22,
176};
177
178#define IWL_CCK_BASIC_RATES_MASK \
179 (IWL_RATE_1M_MASK | \
180 IWL_RATE_2M_MASK)
181
182#define IWL_CCK_RATES_MASK \
183 (IWL_CCK_BASIC_RATES_MASK | \
184 IWL_RATE_5M_MASK | \
185 IWL_RATE_11M_MASK)
186
187#define IWL_OFDM_BASIC_RATES_MASK \
188 (IWL_RATE_6M_MASK | \
189 IWL_RATE_12M_MASK | \
190 IWL_RATE_24M_MASK)
191
192#define IWL_OFDM_RATES_MASK \
193 (IWL_OFDM_BASIC_RATES_MASK | \
194 IWL_RATE_9M_MASK | \
195 IWL_RATE_18M_MASK | \
196 IWL_RATE_36M_MASK | \
197 IWL_RATE_48M_MASK | \
198 IWL_RATE_54M_MASK)
199
200#define IWL_BASIC_RATES_MASK \
201 (IWL_OFDM_BASIC_RATES_MASK | \
202 IWL_CCK_BASIC_RATES_MASK)
203
204#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
205#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
206
207#define IWL_INVALID_VALUE -1
208
209#define IWL_MIN_RSSI_VAL -100
210#define IWL_MAX_RSSI_VAL 0
211
212/* These values specify how many Tx frame attempts before
213 * searching for a new modulation mode */
214#define IWL_LEGACY_FAILURE_LIMIT 160
215#define IWL_LEGACY_SUCCESS_LIMIT 480
216#define IWL_LEGACY_TABLE_COUNT 160
217
218#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
219#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
220#define IWL_NONE_LEGACY_TABLE_COUNT 1500
221
222/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
223#define IWL_RS_GOOD_RATIO 12800 /* 100% */
224#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
225#define IWL_RATE_HIGH_TH 10880 /* 85% */
226#define IWL_RATE_INCREASE_TH 6400 /* 50% */
227#define IWL_RATE_DECREASE_TH 1920 /* 15% */
228
229/* possible actions when in legacy mode */
230#define IWL_LEGACY_SWITCH_ANTENNA1 0
231#define IWL_LEGACY_SWITCH_ANTENNA2 1
232#define IWL_LEGACY_SWITCH_SISO 2
233#define IWL_LEGACY_SWITCH_MIMO2_AB 3
234#define IWL_LEGACY_SWITCH_MIMO2_AC 4
235#define IWL_LEGACY_SWITCH_MIMO2_BC 5
236
237/* possible actions when in siso mode */
238#define IWL_SISO_SWITCH_ANTENNA1 0
239#define IWL_SISO_SWITCH_ANTENNA2 1
240#define IWL_SISO_SWITCH_MIMO2_AB 2
241#define IWL_SISO_SWITCH_MIMO2_AC 3
242#define IWL_SISO_SWITCH_MIMO2_BC 4
243#define IWL_SISO_SWITCH_GI 5
244
245/* possible actions when in mimo mode */
246#define IWL_MIMO2_SWITCH_ANTENNA1 0
247#define IWL_MIMO2_SWITCH_ANTENNA2 1
248#define IWL_MIMO2_SWITCH_SISO_A 2
249#define IWL_MIMO2_SWITCH_SISO_B 3
250#define IWL_MIMO2_SWITCH_SISO_C 4
251#define IWL_MIMO2_SWITCH_GI 5
252
253#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
254
255#define IWL_ACTION_LIMIT 3 /* # possible actions */
256
257#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
258
259/* load per tid defines for A-MPDU activation */
260#define IWL_AGG_TPT_THREHOLD 0
261#define IWL_AGG_LOAD_THRESHOLD 10
262#define IWL_AGG_ALL_TID 0xff
263#define TID_QUEUE_CELL_SPACING 50 /*mS */
264#define TID_QUEUE_MAX_SIZE 20
265#define TID_ROUND_VALUE 5 /* mS */
266#define TID_MAX_LOAD_COUNT 8
267
268#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
269#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
270
271extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
272
273enum iwl_table_type {
274 LQ_NONE,
275 LQ_G, /* legacy types */
276 LQ_A,
277 LQ_SISO, /* high-throughput types */
278 LQ_MIMO2,
279 LQ_MAX,
280};
281
282#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
283#define is_siso(tbl) ((tbl) == LQ_SISO)
284#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
285#define is_mimo(tbl) (is_mimo2(tbl))
286#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
287#define is_a_band(tbl) ((tbl) == LQ_A)
288#define is_g_and(tbl) ((tbl) == LQ_G)
289
290#define ANT_NONE 0x0
291#define ANT_A BIT(0)
292#define ANT_B BIT(1)
293#define ANT_AB (ANT_A | ANT_B)
294#define ANT_C BIT(2)
295#define ANT_AC (ANT_A | ANT_C)
296#define ANT_BC (ANT_B | ANT_C)
297#define ANT_ABC (ANT_AB | ANT_C)
298
299#define IWL_MAX_MCS_DISPLAY_SIZE 12
300
301struct iwl_rate_mcs_info {
302 char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
303 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
304};
305
306/**
307 * struct iwl_rate_scale_data -- tx success history for one rate
308 */
309struct iwl_rate_scale_data {
310 u64 data; /* bitmap of successful frames */
311 s32 success_counter; /* number of frames successful */
312 s32 success_ratio; /* per-cent * 128 */
313 s32 counter; /* number of frames attempted */
314 s32 average_tpt; /* success ratio * expected throughput */
315 unsigned long stamp;
316};
317
318/**
319 * struct iwl_scale_tbl_info -- tx params and success history for all rates
320 *
321 * There are two of these in struct iwl_lq_sta,
322 * one for "active", and one for "search".
323 */
324struct iwl_scale_tbl_info {
325 enum iwl_table_type lq_type;
326 u8 ant_type;
327 u8 is_SGI; /* 1 = short guard interval */
328 u8 is_ht40; /* 1 = 40 MHz channel width */
329 u8 is_dup; /* 1 = duplicated data streams */
330 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
331 u8 max_search; /* maximun number of tables we can search */
332 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
333 u32 current_rate; /* rate_n_flags, uCode API format */
334 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
335};
336
337struct iwl_traffic_load {
338 unsigned long time_stamp; /* age of the oldest statistics */
339 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
340 * slice */
341 u32 total; /* total num of packets during the
342 * last TID_MAX_TIME_DIFF */
343 u8 queue_count; /* number of queues that has
344 * been used since the last cleanup */
345 u8 head; /* start of the circular buffer */
346};
347
348/**
349 * struct iwl_lq_sta -- driver's rate scaling private structure
350 *
351 * Pointer to this gets passed back and forth between driver and mac80211.
352 */
353struct iwl_lq_sta {
354 u8 active_tbl; /* index of active table, range 0-1 */
355 u8 enable_counter; /* indicates HT mode */
356 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
357 u8 search_better_tbl; /* 1: currently trying alternate mode */
358 s32 last_tpt;
359
360 /* The following determine when to search for a new mode */
361 u32 table_count_limit;
362 u32 max_failure_limit; /* # failed frames before new search */
363 u32 max_success_limit; /* # successful frames before new search */
364 u32 table_count;
365 u32 total_failed; /* total failed frames, any/all rates */
366 u32 total_success; /* total successful frames, any/all rates */
367 u64 flush_timer; /* time staying in mode before new search */
368
369 u8 action_counter; /* # mode-switch actions tried */
370 u8 is_green;
371 u8 is_dup;
372 enum ieee80211_band band;
373
374 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
375 u32 supp_rates;
376 u16 active_legacy_rate;
377 u16 active_siso_rate;
378 u16 active_mimo2_rate;
379 s8 max_rate_idx; /* Max rate set by user */
380 u8 missed_rate_counter;
381
382 struct iwl_link_quality_cmd lq;
383 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
384 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
385 u8 tx_agg_tid_en;
386#ifdef CONFIG_MAC80211_DEBUGFS
387 struct dentry *rs_sta_dbgfs_scale_table_file;
388 struct dentry *rs_sta_dbgfs_stats_table_file;
389 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
390 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
391 u32 dbg_fixed_rate;
392#endif
393 struct iwl_priv *drv;
394
395 /* used to be in sta_info */
396 int last_txrate_idx;
397 /* last tx rate_n_flags */
398 u32 last_rate_n_flags;
399 /* packets destined for this STA are aggregated */
400 u8 is_agg;
401};
402
403static inline u8 iwl4965_num_of_ant(u8 mask)
404{
405 return !!((mask) & ANT_A) +
406 !!((mask) & ANT_B) +
407 !!((mask) & ANT_C);
408}
409
410static inline u8 iwl4965_first_antenna(u8 mask)
411{
412 if (mask & ANT_A)
413 return ANT_A;
414 if (mask & ANT_B)
415 return ANT_B;
416 return ANT_C;
417}
418
419
420/**
421 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
422 *
423 * The specific throughput table used is based on the type of network
424 * the associated with, including A, B, G, and G w/ TGG protection
425 */
426extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
427
428/* Initialize station's rate scaling information after adding station */
429extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
430 struct ieee80211_sta *sta, u8 sta_id);
431extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
432 struct ieee80211_sta *sta, u8 sta_id);
433
434/**
435 * iwl_rate_control_register - Register the rate control algorithm callbacks
436 *
437 * Since the rate control algorithm is hardware specific, there is no need
438 * or reason to place it as a stand alone module. The driver can call
439 * iwl_rate_control_register in order to register the rate control callbacks
440 * with the mac80211 subsystem. This should be performed prior to calling
441 * ieee80211_register_hw
442 *
443 */
444extern int iwl4965_rate_control_register(void);
445extern int iwl3945_rate_control_register(void);
446
447/**
448 * iwl_rate_control_unregister - Unregister the rate control callbacks
449 *
450 * This should be called after calling ieee80211_unregister_hw, but before
451 * the driver is unloaded.
452 */
453extern void iwl4965_rate_control_unregister(void);
454extern void iwl3945_rate_control_unregister(void);
455
456#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d6d6cb..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34
35#include <net/mac80211.h>
36
37#include "iwl-eeprom.h"
38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41#include "iwl-commands.h"
42#include "iwl-debug.h"
43#include "iwl-power.h"
44
45/*
46 * Setting power level allows the card to go to sleep when not busy.
47 *
48 * We calculate a sleep command based on the required latency, which
49 * we get from mac80211. In order to handle thermal throttling, we can
50 * also use pre-defined power levels.
51 */
52
53/*
54 * This defines the old power levels. They are still used by default
55 * (level 1) and for thermal throttle (levels 3 through 5)
56 */
57
58struct iwl_power_vec_entry {
59 struct iwl_powertable_cmd cmd;
60 u8 no_dtim; /* number of skip dtim */
61};
62
63static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
64 struct iwl_powertable_cmd *cmd)
65{
66 memset(cmd, 0, sizeof(*cmd));
67
68 if (priv->power_data.pci_pm)
69 cmd->flags |= IWL_POWER_PCI_PM_MSK;
70
71 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
72}
73
74static int
75iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
76{
77 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
78 IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
79 IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
80 le32_to_cpu(cmd->tx_data_timeout));
81 IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
82 le32_to_cpu(cmd->rx_data_timeout));
83 IWL_DEBUG_POWER(priv,
84 "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
85 le32_to_cpu(cmd->sleep_interval[0]),
86 le32_to_cpu(cmd->sleep_interval[1]),
87 le32_to_cpu(cmd->sleep_interval[2]),
88 le32_to_cpu(cmd->sleep_interval[3]),
89 le32_to_cpu(cmd->sleep_interval[4]));
90
91 return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
92 sizeof(struct iwl_powertable_cmd), cmd);
93}
94
95int
96iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
97 bool force)
98{
99 int ret;
100 bool update_chains;
101
102 lockdep_assert_held(&priv->mutex);
103
104 /* Don't update the RX chain when chain noise calibration is running */
105 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
106 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
107
108 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
109 return 0;
110
111 if (!iwl_legacy_is_ready_rf(priv))
112 return -EIO;
113
114 /* scan complete use sleep_power_next, need to be updated */
115 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
116 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
117 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
118 return 0;
119 }
120
121 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
122 set_bit(STATUS_POWER_PMI, &priv->status);
123
124 ret = iwl_legacy_set_power(priv, cmd);
125 if (!ret) {
126 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
127 clear_bit(STATUS_POWER_PMI, &priv->status);
128
129 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
130 priv->cfg->ops->lib->update_chain_flags(priv);
131 else if (priv->cfg->ops->lib->update_chain_flags)
132 IWL_DEBUG_POWER(priv,
133 "Cannot update the power, chain noise "
134 "calibration running: %d\n",
135 priv->chain_noise_data.state);
136
137 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
138 } else
139 IWL_ERR(priv, "set power fail, ret = %d", ret);
140
141 return ret;
142}
143
144int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
145{
146 struct iwl_powertable_cmd cmd;
147
148 iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
149 return iwl_legacy_power_set_mode(priv, &cmd, force);
150}
151EXPORT_SYMBOL(iwl_legacy_power_update_mode);
152
153/* initialize to default */
154void iwl_legacy_power_initialize(struct iwl_priv *priv)
155{
156 u16 lctl = iwl_legacy_pcie_link_ctl(priv);
157
158 priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
159
160 priv->power_data.debug_sleep_level_override = -1;
161
162 memset(&priv->power_data.sleep_cmd, 0,
163 sizeof(priv->power_data.sleep_cmd));
164}
165EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36acdc4a..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_legacy_power_setting_h__
29#define __iwl_legacy_power_setting_h__
30
31#include "iwl-commands.h"
32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
45 int debug_sleep_level_override;
46 bool pci_pm;
47};
48
49int
50iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
51 bool force);
52int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
53void iwl_legacy_power_initialize(struct iwl_priv *priv);
54
55#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index 9b5d0abe8be9..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,281 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/slab.h>
32#include <net/mac80211.h>
33#include <asm/unaligned.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40/************************** RX-FUNCTIONS ****************************/
41/*
42 * Rx theory of operation
43 *
44 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
45 * each of which point to Receive Buffers to be filled by the NIC. These get
46 * used not only for Rx frames, but for any command response or notification
47 * from the NIC. The driver and NIC manage the Rx buffers by means
48 * of indexes into the circular buffer.
49 *
50 * Rx Queue Indexes
51 * The host/firmware share two index registers for managing the Rx buffers.
52 *
53 * The READ index maps to the first position that the firmware may be writing
54 * to -- the driver can read up to (but not including) this position and get
55 * good data.
56 * The READ index is managed by the firmware once the card is enabled.
57 *
58 * The WRITE index maps to the last position the driver has read from -- the
59 * position preceding WRITE is the last slot the firmware can place a packet.
60 *
61 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
62 * WRITE = READ.
63 *
64 * During initialization, the host sets up the READ queue position to the first
65 * INDEX position, and WRITE to the last (READ - 1 wrapped)
66 *
67 * When the firmware places a packet in a buffer, it will advance the READ index
68 * and fire the RX interrupt. The driver can then query the READ index and
69 * process as many packets as possible, moving the WRITE index forward as it
70 * resets the Rx queue buffers with new memory.
71 *
72 * The management in the driver is as follows:
73 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
74 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
75 * to replenish the iwl->rxq->rx_free.
76 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
77 * iwl->rxq is replenished and the READ INDEX is updated (updating the
78 * 'processed' and 'read' driver indexes as well)
79 * + A received packet is processed and handed to the kernel network stack,
80 * detached from the iwl->rxq. The driver 'processed' index is updated.
81 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
82 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
83 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
84 * were enough free buffers and RX_STALLED is set it is cleared.
85 *
86 *
87 * Driver sequence:
88 *
89 * iwl_legacy_rx_queue_alloc() Allocates rx_free
90 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
91 * iwl_rx_queue_restock
92 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
93 * queue, updates firmware pointers, and updates
94 * the WRITE index. If insufficient rx_free buffers
95 * are available, schedules iwl_rx_replenish
96 *
97 * -- enable interrupts --
98 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
99 * READ INDEX, detaching the SKB from the pool.
100 * Moves the packet buffer from queue to rx_used.
101 * Calls iwl_rx_queue_restock to refill any empty
102 * slots.
103 * ...
104 *
105 */
106
107/**
108 * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
109 */
110int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
111{
112 int s = q->read - q->write;
113 if (s <= 0)
114 s += RX_QUEUE_SIZE;
115 /* keep some buffer to not confuse full and empty queue */
116 s -= 2;
117 if (s < 0)
118 s = 0;
119 return s;
120}
121EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
122
123/**
124 * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
125 */
126void
127iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
128 struct iwl_rx_queue *q)
129{
130 unsigned long flags;
131 u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
132 u32 reg;
133
134 spin_lock_irqsave(&q->lock, flags);
135
136 if (q->need_update == 0)
137 goto exit_unlock;
138
139 /* If power-saving is in use, make sure device is awake */
140 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
141 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
142
143 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
144 IWL_DEBUG_INFO(priv,
145 "Rx queue requesting wakeup,"
146 " GP1 = 0x%x\n", reg);
147 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
148 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
149 goto exit_unlock;
150 }
151
152 q->write_actual = (q->write & ~0x7);
153 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
154 q->write_actual);
155
156 /* Else device is assumed to be awake */
157 } else {
158 /* Device expects a multiple of 8 */
159 q->write_actual = (q->write & ~0x7);
160 iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
161 q->write_actual);
162 }
163
164 q->need_update = 0;
165
166 exit_unlock:
167 spin_unlock_irqrestore(&q->lock, flags);
168}
169EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
170
171int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
172{
173 struct iwl_rx_queue *rxq = &priv->rxq;
174 struct device *dev = &priv->pci_dev->dev;
175 int i;
176
177 spin_lock_init(&rxq->lock);
178 INIT_LIST_HEAD(&rxq->rx_free);
179 INIT_LIST_HEAD(&rxq->rx_used);
180
181 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
182 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
183 GFP_KERNEL);
184 if (!rxq->bd)
185 goto err_bd;
186
187 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
188 &rxq->rb_stts_dma, GFP_KERNEL);
189 if (!rxq->rb_stts)
190 goto err_rb;
191
192 /* Fill the rx_used queue with _all_ of the Rx buffers */
193 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
194 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
195
196 /* Set us so that we have processed and used all buffers, but have
197 * not restocked the Rx queue with fresh buffers */
198 rxq->read = rxq->write = 0;
199 rxq->write_actual = 0;
200 rxq->free_count = 0;
201 rxq->need_update = 0;
202 return 0;
203
204err_rb:
205 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
206 rxq->bd_dma);
207err_bd:
208 return -ENOMEM;
209}
210EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
211
212
213void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb)
215{
216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
217 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
218
219 if (!report->state) {
220 IWL_DEBUG_11H(priv,
221 "Spectrum Measure Notification: Start\n");
222 return;
223 }
224
225 memcpy(&priv->measure_report, report, sizeof(*report));
226 priv->measurement_status |= MEASUREMENT_READY;
227}
228EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
229
230/*
231 * returns non-zero if packet should be dropped
232 */
233int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
234 struct ieee80211_hdr *hdr,
235 u32 decrypt_res,
236 struct ieee80211_rx_status *stats)
237{
238 u16 fc = le16_to_cpu(hdr->frame_control);
239
240 /*
241 * All contexts have the same setting here due to it being
242 * a module parameter, so OK to check any context.
243 */
244 if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
245 RXON_FILTER_DIS_DECRYPT_MSK)
246 return 0;
247
248 if (!(fc & IEEE80211_FCTL_PROTECTED))
249 return 0;
250
251 IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
252 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
253 case RX_RES_STATUS_SEC_TYPE_TKIP:
254 /* The uCode has got a bad phase 1 Key, pushes the packet.
255 * Decryption will be done in SW. */
256 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
257 RX_RES_STATUS_BAD_KEY_TTAK)
258 break;
259
260 case RX_RES_STATUS_SEC_TYPE_WEP:
261 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
262 RX_RES_STATUS_BAD_ICV_MIC) {
263 /* bad ICV, the packet is destroyed since the
264 * decryption is inplace, drop it */
265 IWL_DEBUG_RX(priv, "Packet destroyed\n");
266 return -1;
267 }
268 case RX_RES_STATUS_SEC_TYPE_CCMP:
269 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
270 RX_RES_STATUS_DECRYPT_OK) {
271 IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
272 stats->flag |= RX_FLAG_DECRYPTED;
273 }
274 break;
275
276 default:
277 break;
278 }
279 return 0;
280}
281EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index a6b5222fc59e..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,549 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/slab.h>
29#include <linux/types.h>
30#include <linux/etherdevice.h>
31#include <net/mac80211.h>
32
33#include "iwl-eeprom.h"
34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-sta.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
41 * sending probe req. This should be set long enough to hear probe responses
42 * from more than one AP. */
43#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
44#define IWL_ACTIVE_DWELL_TIME_52 (20)
45
46#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
47#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
48
49/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
50 * Must be set longer than active dwell time.
51 * For the most reliable scan, set > AP beacon interval (typically 100msec). */
52#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
53#define IWL_PASSIVE_DWELL_TIME_52 (10)
54#define IWL_PASSIVE_DWELL_BASE (100)
55#define IWL_CHANNEL_TUNE_TIME 5
56
57static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
58{
59 int ret;
60 struct iwl_rx_packet *pkt;
61 struct iwl_host_cmd cmd = {
62 .id = REPLY_SCAN_ABORT_CMD,
63 .flags = CMD_WANT_SKB,
64 };
65
66 /* Exit instantly with error when device is not ready
67 * to receive scan abort command or it does not perform
68 * hardware scan currently */
69 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->status) ||
73 test_bit(STATUS_EXIT_PENDING, &priv->status))
74 return -EIO;
75
76 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
77 if (ret)
78 return ret;
79
80 pkt = (struct iwl_rx_packet *)cmd.reply_page;
81 if (pkt->u.status != CAN_ABORT_STATUS) {
82 /* The scan abort will return 1 for success or
83 * 2 for "failure". A failure condition can be
84 * due to simply not being in an active scan which
85 * can occur if we send the scan abort before we
86 * the microcode has notified us that a scan is
87 * completed. */
88 IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
89 ret = -EIO;
90 }
91
92 iwl_legacy_free_pages(priv, cmd.reply_page);
93 return ret;
94}
95
96static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
97{
98 /* check if scan was requested from mac80211 */
99 if (priv->scan_request) {
100 IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
101 ieee80211_scan_completed(priv->hw, aborted);
102 }
103
104 priv->scan_vif = NULL;
105 priv->scan_request = NULL;
106}
107
108void iwl_legacy_force_scan_end(struct iwl_priv *priv)
109{
110 lockdep_assert_held(&priv->mutex);
111
112 if (!test_bit(STATUS_SCANNING, &priv->status)) {
113 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
114 return;
115 }
116
117 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
118 clear_bit(STATUS_SCANNING, &priv->status);
119 clear_bit(STATUS_SCAN_HW, &priv->status);
120 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
121 iwl_legacy_complete_scan(priv, true);
122}
123
124static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
125{
126 int ret;
127
128 lockdep_assert_held(&priv->mutex);
129
130 if (!test_bit(STATUS_SCANNING, &priv->status)) {
131 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
132 return;
133 }
134
135 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
136 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
137 return;
138 }
139
140 ret = iwl_legacy_send_scan_abort(priv);
141 if (ret) {
142 IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
143 iwl_legacy_force_scan_end(priv);
144 } else
145 IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
146}
147
148/**
149 * iwl_scan_cancel - Cancel any currently executing HW scan
150 */
151int iwl_legacy_scan_cancel(struct iwl_priv *priv)
152{
153 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
154 queue_work(priv->workqueue, &priv->abort_scan);
155 return 0;
156}
157EXPORT_SYMBOL(iwl_legacy_scan_cancel);
158
159/**
160 * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
161 * @ms: amount of time to wait (in milliseconds) for scan to abort
162 *
163 */
164int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
165{
166 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
167
168 lockdep_assert_held(&priv->mutex);
169
170 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
171
172 iwl_legacy_do_scan_abort(priv);
173
174 while (time_before_eq(jiffies, timeout)) {
175 if (!test_bit(STATUS_SCAN_HW, &priv->status))
176 break;
177 msleep(20);
178 }
179
180 return test_bit(STATUS_SCAN_HW, &priv->status);
181}
182EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
183
184/* Service response to REPLY_SCAN_CMD (0x80) */
185static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
186 struct iwl_rx_mem_buffer *rxb)
187{
188#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
190 struct iwl_scanreq_notification *notif =
191 (struct iwl_scanreq_notification *)pkt->u.raw;
192
193 IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
194#endif
195}
196
197/* Service SCAN_START_NOTIFICATION (0x82) */
198static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
199 struct iwl_rx_mem_buffer *rxb)
200{
201 struct iwl_rx_packet *pkt = rxb_addr(rxb);
202 struct iwl_scanstart_notification *notif =
203 (struct iwl_scanstart_notification *)pkt->u.raw;
204 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
205 IWL_DEBUG_SCAN(priv, "Scan start: "
206 "%d [802.11%s] "
207 "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
208 notif->channel,
209 notif->band ? "bg" : "a",
210 le32_to_cpu(notif->tsf_high),
211 le32_to_cpu(notif->tsf_low),
212 notif->status, notif->beacon_timer);
213}
214
215/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
216static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
217 struct iwl_rx_mem_buffer *rxb)
218{
219#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
220 struct iwl_rx_packet *pkt = rxb_addr(rxb);
221 struct iwl_scanresults_notification *notif =
222 (struct iwl_scanresults_notification *)pkt->u.raw;
223
224 IWL_DEBUG_SCAN(priv, "Scan ch.res: "
225 "%d [802.11%s] "
226 "(TSF: 0x%08X:%08X) - %d "
227 "elapsed=%lu usec\n",
228 notif->channel,
229 notif->band ? "bg" : "a",
230 le32_to_cpu(notif->tsf_high),
231 le32_to_cpu(notif->tsf_low),
232 le32_to_cpu(notif->statistics[0]),
233 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
234#endif
235}
236
237/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
238static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
239 struct iwl_rx_mem_buffer *rxb)
240{
241
242#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
245#endif
246
247 IWL_DEBUG_SCAN(priv,
248 "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
249 scan_notif->scanned_channels,
250 scan_notif->tsf_low,
251 scan_notif->tsf_high, scan_notif->status);
252
253 /* The HW is no longer scanning */
254 clear_bit(STATUS_SCAN_HW, &priv->status);
255
256 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
257 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
258 jiffies_to_msecs(jiffies - priv->scan_start));
259
260 queue_work(priv->workqueue, &priv->scan_completed);
261}
262
263void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
264{
265 /* scan handlers */
266 priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
267 priv->rx_handlers[SCAN_START_NOTIFICATION] =
268 iwl_legacy_rx_scan_start_notif;
269 priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
270 iwl_legacy_rx_scan_results_notif;
271 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
272 iwl_legacy_rx_scan_complete_notif;
273}
274EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
275
276inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
277 enum ieee80211_band band,
278 u8 n_probes)
279{
280 if (band == IEEE80211_BAND_5GHZ)
281 return IWL_ACTIVE_DWELL_TIME_52 +
282 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
283 else
284 return IWL_ACTIVE_DWELL_TIME_24 +
285 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
286}
287EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
288
289u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
290 enum ieee80211_band band,
291 struct ieee80211_vif *vif)
292{
293 struct iwl_rxon_context *ctx;
294 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
295 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
296 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
297
298 if (iwl_legacy_is_any_associated(priv)) {
299 /*
300 * If we're associated, we clamp the maximum passive
301 * dwell time to be 98% of the smallest beacon interval
302 * (minus 2 * channel tune time)
303 */
304 for_each_context(priv, ctx) {
305 u16 value;
306
307 if (!iwl_legacy_is_associated_ctx(ctx))
308 continue;
309 value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
310 if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
311 value = IWL_PASSIVE_DWELL_BASE;
312 value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
313 passive = min(value, passive);
314 }
315 }
316
317 return passive;
318}
319EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
320
321void iwl_legacy_init_scan_params(struct iwl_priv *priv)
322{
323 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
324 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
325 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
326 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
327 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
328}
329EXPORT_SYMBOL(iwl_legacy_init_scan_params);
330
331static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
332 struct ieee80211_vif *vif)
333{
334 int ret;
335
336 lockdep_assert_held(&priv->mutex);
337
338 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
339 return -EOPNOTSUPP;
340
341 cancel_delayed_work(&priv->scan_check);
342
343 if (!iwl_legacy_is_ready_rf(priv)) {
344 IWL_WARN(priv, "Request scan called when driver not ready.\n");
345 return -EIO;
346 }
347
348 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
349 IWL_DEBUG_SCAN(priv,
350 "Multiple concurrent scan requests in parallel.\n");
351 return -EBUSY;
352 }
353
354 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
355 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
356 return -EBUSY;
357 }
358
359 IWL_DEBUG_SCAN(priv, "Starting scan...\n");
360
361 set_bit(STATUS_SCANNING, &priv->status);
362 priv->scan_start = jiffies;
363
364 ret = priv->cfg->ops->utils->request_scan(priv, vif);
365 if (ret) {
366 clear_bit(STATUS_SCANNING, &priv->status);
367 return ret;
368 }
369
370 queue_delayed_work(priv->workqueue, &priv->scan_check,
371 IWL_SCAN_CHECK_WATCHDOG);
372
373 return 0;
374}
375
376int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
377 struct ieee80211_vif *vif,
378 struct cfg80211_scan_request *req)
379{
380 struct iwl_priv *priv = hw->priv;
381 int ret;
382
383 IWL_DEBUG_MAC80211(priv, "enter\n");
384
385 if (req->n_channels == 0)
386 return -EINVAL;
387
388 mutex_lock(&priv->mutex);
389
390 if (test_bit(STATUS_SCANNING, &priv->status)) {
391 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
392 ret = -EAGAIN;
393 goto out_unlock;
394 }
395
396 /* mac80211 will only ask for one band at a time */
397 priv->scan_request = req;
398 priv->scan_vif = vif;
399 priv->scan_band = req->channels[0]->band;
400
401 ret = iwl_legacy_scan_initiate(priv, vif);
402
403 IWL_DEBUG_MAC80211(priv, "leave\n");
404
405out_unlock:
406 mutex_unlock(&priv->mutex);
407
408 return ret;
409}
410EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
411
412static void iwl_legacy_bg_scan_check(struct work_struct *data)
413{
414 struct iwl_priv *priv =
415 container_of(data, struct iwl_priv, scan_check.work);
416
417 IWL_DEBUG_SCAN(priv, "Scan check work\n");
418
419 /* Since we are here firmware does not finish scan and
420 * most likely is in bad shape, so we don't bother to
421 * send abort command, just force scan complete to mac80211 */
422 mutex_lock(&priv->mutex);
423 iwl_legacy_force_scan_end(priv);
424 mutex_unlock(&priv->mutex);
425}
426
427/**
428 * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
429 */
430
431u16
432iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
433 const u8 *ta, const u8 *ies, int ie_len, int left)
434{
435 int len = 0;
436 u8 *pos = NULL;
437
438 /* Make sure there is enough space for the probe request,
439 * two mandatory IEs and the data */
440 left -= 24;
441 if (left < 0)
442 return 0;
443
444 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
445 memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
446 memcpy(frame->sa, ta, ETH_ALEN);
447 memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
448 frame->seq_ctrl = 0;
449
450 len += 24;
451
452 /* ...next IE... */
453 pos = &frame->u.probe_req.variable[0];
454
455 /* fill in our indirect SSID IE */
456 left -= 2;
457 if (left < 0)
458 return 0;
459 *pos++ = WLAN_EID_SSID;
460 *pos++ = 0;
461
462 len += 2;
463
464 if (WARN_ON(left < ie_len))
465 return len;
466
467 if (ies && ie_len) {
468 memcpy(pos, ies, ie_len);
469 len += ie_len;
470 }
471
472 return (u16)len;
473}
474EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
475
476static void iwl_legacy_bg_abort_scan(struct work_struct *work)
477{
478 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
479
480 IWL_DEBUG_SCAN(priv, "Abort scan work\n");
481
482 /* We keep scan_check work queued in case when firmware will not
483 * report back scan completed notification */
484 mutex_lock(&priv->mutex);
485 iwl_legacy_scan_cancel_timeout(priv, 200);
486 mutex_unlock(&priv->mutex);
487}
488
489static void iwl_legacy_bg_scan_completed(struct work_struct *work)
490{
491 struct iwl_priv *priv =
492 container_of(work, struct iwl_priv, scan_completed);
493 bool aborted;
494
495 IWL_DEBUG_SCAN(priv, "Completed scan.\n");
496
497 cancel_delayed_work(&priv->scan_check);
498
499 mutex_lock(&priv->mutex);
500
501 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
502 if (aborted)
503 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
504
505 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
506 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
507 goto out_settings;
508 }
509
510 iwl_legacy_complete_scan(priv, aborted);
511
512out_settings:
513 /* Can we still talk to firmware ? */
514 if (!iwl_legacy_is_ready_rf(priv))
515 goto out;
516
517 /*
518 * We do not commit power settings while scan is pending,
519 * do it now if the settings changed.
520 */
521 iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
522 iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
523
524 priv->cfg->ops->utils->post_scan(priv);
525
526out:
527 mutex_unlock(&priv->mutex);
528}
529
530void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
531{
532 INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
533 INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
534 INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
535}
536EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
537
538void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
539{
540 cancel_work_sync(&priv->abort_scan);
541 cancel_work_sync(&priv->scan_completed);
542
543 if (cancel_delayed_work_sync(&priv->scan_check)) {
544 mutex_lock(&priv->mutex);
545 iwl_legacy_force_scan_end(priv);
546 mutex_unlock(&priv->mutex);
547 }
548}
549EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a4723103..85fe48e520f9 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
29#ifndef __iwl_legacy_spectrum_h__ 29#ifndef __il_spectrum_h__
30#define __iwl_legacy_spectrum_h__ 30#define __il_spectrum_h__
31enum { /* ieee80211_basic_report.map */ 31enum { /* ieee80211_basic_report.map */
32 IEEE80211_BASIC_MAP_BSS = (1 << 0), 32 IEEE80211_BASIC_MAP_BSS = (1 << 0),
33 IEEE80211_BASIC_MAP_OFDM = (1 << 1), 33 IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
index 66f0fb2bbe00..58762e795f36 100644
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -36,76 +36,76 @@
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-sta.h" 37#include "iwl-sta.h"
38 38
39/* priv->sta_lock must be held */ 39/* il->sta_lock must be held */
40static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 40static void il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
41{ 41{
42 42
43 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) 43 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
44 IWL_ERR(priv, 44 IL_ERR(
45 "ACTIVATE a non DRIVER active station id %u addr %pM\n", 45 "ACTIVATE a non DRIVER active station id %u addr %pM\n",
46 sta_id, priv->stations[sta_id].sta.sta.addr); 46 sta_id, il->stations[sta_id].sta.sta.addr);
47 47
48 if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { 48 if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
49 IWL_DEBUG_ASSOC(priv, 49 D_ASSOC(
50 "STA id %u addr %pM already present" 50 "STA id %u addr %pM already present"
51 " in uCode (according to driver)\n", 51 " in uCode (according to driver)\n",
52 sta_id, priv->stations[sta_id].sta.sta.addr); 52 sta_id, il->stations[sta_id].sta.sta.addr);
53 } else { 53 } else {
54 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; 54 il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
55 IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", 55 D_ASSOC("Added STA id %u addr %pM to uCode\n",
56 sta_id, priv->stations[sta_id].sta.sta.addr); 56 sta_id, il->stations[sta_id].sta.sta.addr);
57 } 57 }
58} 58}
59 59
60static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, 60static int il_process_add_sta_resp(struct il_priv *il,
61 struct iwl_legacy_addsta_cmd *addsta, 61 struct il_addsta_cmd *addsta,
62 struct iwl_rx_packet *pkt, 62 struct il_rx_pkt *pkt,
63 bool sync) 63 bool sync)
64{ 64{
65 u8 sta_id = addsta->sta.sta_id; 65 u8 sta_id = addsta->sta.sta_id;
66 unsigned long flags; 66 unsigned long flags;
67 int ret = -EIO; 67 int ret = -EIO;
68 68
69 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 69 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
70 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 70 IL_ERR("Bad return from C_ADD_STA (0x%08X)\n",
71 pkt->hdr.flags); 71 pkt->hdr.flags);
72 return ret; 72 return ret;
73 } 73 }
74 74
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", 75 D_INFO("Processing response for adding station %u\n",
76 sta_id); 76 sta_id);
77 77
78 spin_lock_irqsave(&priv->sta_lock, flags); 78 spin_lock_irqsave(&il->sta_lock, flags);
79 79
80 switch (pkt->u.add_sta.status) { 80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK: 81 case ADD_STA_SUCCESS_MSK:
82 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 82 D_INFO("C_ADD_STA PASSED\n");
83 iwl_legacy_sta_ucode_activate(priv, sta_id); 83 il_sta_ucode_activate(il, sta_id);
84 ret = 0; 84 ret = 0;
85 break; 85 break;
86 case ADD_STA_NO_ROOM_IN_TABLE: 86 case ADD_STA_NO_ROOM_IN_TBL:
87 IWL_ERR(priv, "Adding station %d failed, no room in table.\n", 87 IL_ERR("Adding station %d failed, no room in table.\n",
88 sta_id); 88 sta_id);
89 break; 89 break;
90 case ADD_STA_NO_BLOCK_ACK_RESOURCE: 90 case ADD_STA_NO_BLOCK_ACK_RESOURCE:
91 IWL_ERR(priv, 91 IL_ERR(
92 "Adding station %d failed, no block ack resource.\n", 92 "Adding station %d failed, no block ack resource.\n",
93 sta_id); 93 sta_id);
94 break; 94 break;
95 case ADD_STA_MODIFY_NON_EXIST_STA: 95 case ADD_STA_MODIFY_NON_EXIST_STA:
96 IWL_ERR(priv, "Attempting to modify non-existing station %d\n", 96 IL_ERR("Attempting to modify non-existing station %d\n",
97 sta_id); 97 sta_id);
98 break; 98 break;
99 default: 99 default:
100 IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 100 D_ASSOC("Received C_ADD_STA:(0x%08X)\n",
101 pkt->u.add_sta.status); 101 pkt->u.add_sta.status);
102 break; 102 break;
103 } 103 }
104 104
105 IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", 105 D_INFO("%s station id %u addr %pM\n",
106 priv->stations[sta_id].sta.mode == 106 il->stations[sta_id].sta.mode ==
107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 107 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
108 sta_id, priv->stations[sta_id].sta.sta.addr); 108 sta_id, il->stations[sta_id].sta.sta.addr);
109 109
110 /* 110 /*
111 * XXX: The MAC address in the command buffer is often changed from 111 * XXX: The MAC address in the command buffer is often changed from
@@ -115,68 +115,68 @@ static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
115 * issue has not yet been resolved and this debugging is left to 115 * issue has not yet been resolved and this debugging is left to
116 * observe the problem. 116 * observe the problem.
117 */ 117 */
118 IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", 118 D_INFO("%s station according to cmd buffer %pM\n",
119 priv->stations[sta_id].sta.mode == 119 il->stations[sta_id].sta.mode ==
120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 120 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
121 addsta->sta.addr); 121 addsta->sta.addr);
122 spin_unlock_irqrestore(&priv->sta_lock, flags); 122 spin_unlock_irqrestore(&il->sta_lock, flags);
123 123
124 return ret; 124 return ret;
125} 125}
126 126
127static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, 127static void il_add_sta_callback(struct il_priv *il,
128 struct iwl_device_cmd *cmd, 128 struct il_device_cmd *cmd,
129 struct iwl_rx_packet *pkt) 129 struct il_rx_pkt *pkt)
130{ 130{
131 struct iwl_legacy_addsta_cmd *addsta = 131 struct il_addsta_cmd *addsta =
132 (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; 132 (struct il_addsta_cmd *)cmd->cmd.payload;
133 133
134 iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); 134 il_process_add_sta_resp(il, addsta, pkt, false);
135 135
136} 136}
137 137
138int iwl_legacy_send_add_sta(struct iwl_priv *priv, 138int il_send_add_sta(struct il_priv *il,
139 struct iwl_legacy_addsta_cmd *sta, u8 flags) 139 struct il_addsta_cmd *sta, u8 flags)
140{ 140{
141 struct iwl_rx_packet *pkt = NULL; 141 struct il_rx_pkt *pkt = NULL;
142 int ret = 0; 142 int ret = 0;
143 u8 data[sizeof(*sta)]; 143 u8 data[sizeof(*sta)];
144 struct iwl_host_cmd cmd = { 144 struct il_host_cmd cmd = {
145 .id = REPLY_ADD_STA, 145 .id = C_ADD_STA,
146 .flags = flags, 146 .flags = flags,
147 .data = data, 147 .data = data,
148 }; 148 };
149 u8 sta_id __maybe_unused = sta->sta.sta_id; 149 u8 sta_id __maybe_unused = sta->sta.sta_id;
150 150
151 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", 151 D_INFO("Adding sta %u (%pM) %ssynchronously\n",
152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 152 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
153 153
154 if (flags & CMD_ASYNC) 154 if (flags & CMD_ASYNC)
155 cmd.callback = iwl_legacy_add_sta_callback; 155 cmd.callback = il_add_sta_callback;
156 else { 156 else {
157 cmd.flags |= CMD_WANT_SKB; 157 cmd.flags |= CMD_WANT_SKB;
158 might_sleep(); 158 might_sleep();
159 } 159 }
160 160
161 cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); 161 cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
162 ret = iwl_legacy_send_cmd(priv, &cmd); 162 ret = il_send_cmd(il, &cmd);
163 163
164 if (ret || (flags & CMD_ASYNC)) 164 if (ret || (flags & CMD_ASYNC))
165 return ret; 165 return ret;
166 166
167 if (ret == 0) { 167 if (ret == 0) {
168 pkt = (struct iwl_rx_packet *)cmd.reply_page; 168 pkt = (struct il_rx_pkt *)cmd.reply_page;
169 ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); 169 ret = il_process_add_sta_resp(il, sta, pkt, true);
170 } 170 }
171 iwl_legacy_free_pages(priv, cmd.reply_page); 171 il_free_pages(il, cmd.reply_page);
172 172
173 return ret; 173 return ret;
174} 174}
175EXPORT_SYMBOL(iwl_legacy_send_add_sta); 175EXPORT_SYMBOL(il_send_add_sta);
176 176
177static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, 177static void il_set_ht_add_station(struct il_priv *il, u8 idx,
178 struct ieee80211_sta *sta, 178 struct ieee80211_sta *sta,
179 struct iwl_rxon_context *ctx) 179 struct il_rxon_context *ctx)
180{ 180{
181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; 181 struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
182 __le32 sta_flags; 182 __le32 sta_flags;
@@ -186,13 +186,13 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
186 goto done; 186 goto done;
187 187
188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 188 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
189 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", 189 D_ASSOC("spatial multiplexing power save mode: %s\n",
190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? 190 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
191 "static" : 191 "static" :
192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? 192 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
193 "dynamic" : "disabled"); 193 "dynamic" : "disabled");
194 194
195 sta_flags = priv->stations[index].sta.station_flags; 195 sta_flags = il->stations[idx].sta.station_flags;
196 196
197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 197 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
198 198
@@ -206,7 +206,7 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
206 case WLAN_HT_CAP_SM_PS_DISABLED: 206 case WLAN_HT_CAP_SM_PS_DISABLED:
207 break; 207 break;
208 default: 208 default:
209 IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); 209 IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
210 break; 210 break;
211 } 211 }
212 212
@@ -216,27 +216,27 @@ static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
216 sta_flags |= cpu_to_le32( 216 sta_flags |= cpu_to_le32(
217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); 217 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
218 218
219 if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) 219 if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
220 sta_flags |= STA_FLG_HT40_EN_MSK; 220 sta_flags |= STA_FLG_HT40_EN_MSK;
221 else 221 else
222 sta_flags &= ~STA_FLG_HT40_EN_MSK; 222 sta_flags &= ~STA_FLG_HT40_EN_MSK;
223 223
224 priv->stations[index].sta.station_flags = sta_flags; 224 il->stations[idx].sta.station_flags = sta_flags;
225 done: 225 done:
226 return; 226 return;
227} 227}
228 228
229/** 229/**
230 * iwl_legacy_prep_station - Prepare station information for addition 230 * il_prep_station - Prepare station information for addition
231 * 231 *
232 * should be called with sta_lock held 232 * should be called with sta_lock held
233 */ 233 */
234u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 234u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta) 235 const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
236{ 236{
237 struct iwl_station_entry *station; 237 struct il_station_entry *station;
238 int i; 238 int i;
239 u8 sta_id = IWL_INVALID_STATION; 239 u8 sta_id = IL_INVALID_STATION;
240 u16 rate; 240 u16 rate;
241 241
242 if (is_ap) 242 if (is_ap)
@@ -244,15 +244,15 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
244 else if (is_broadcast_ether_addr(addr)) 244 else if (is_broadcast_ether_addr(addr))
245 sta_id = ctx->bcast_sta_id; 245 sta_id = ctx->bcast_sta_id;
246 else 246 else
247 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { 247 for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
248 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 248 if (!compare_ether_addr(il->stations[i].sta.sta.addr,
249 addr)) { 249 addr)) {
250 sta_id = i; 250 sta_id = i;
251 break; 251 break;
252 } 252 }
253 253
254 if (!priv->stations[i].used && 254 if (!il->stations[i].used &&
255 sta_id == IWL_INVALID_STATION) 255 sta_id == IL_INVALID_STATION)
256 sta_id = i; 256 sta_id = i;
257 } 257 }
258 258
@@ -260,7 +260,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
260 * These two conditions have the same outcome, but keep them 260 * These two conditions have the same outcome, but keep them
261 * separate 261 * separate
262 */ 262 */
263 if (unlikely(sta_id == IWL_INVALID_STATION)) 263 if (unlikely(sta_id == IL_INVALID_STATION))
264 return sta_id; 264 return sta_id;
265 265
266 /* 266 /*
@@ -268,30 +268,30 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
268 * station. Keep track if one is in progress so that we do not send 268 * station. Keep track if one is in progress so that we do not send
269 * another. 269 * another.
270 */ 270 */
271 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 271 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
272 IWL_DEBUG_INFO(priv, 272 D_INFO(
273 "STA %d already in process of being added.\n", 273 "STA %d already in process of being added.\n",
274 sta_id); 274 sta_id);
275 return sta_id; 275 return sta_id;
276 } 276 }
277 277
278 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 278 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
279 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && 279 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
280 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { 280 !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
281 IWL_DEBUG_ASSOC(priv, 281 D_ASSOC(
282 "STA %d (%pM) already added, not adding again.\n", 282 "STA %d (%pM) already added, not adding again.\n",
283 sta_id, addr); 283 sta_id, addr);
284 return sta_id; 284 return sta_id;
285 } 285 }
286 286
287 station = &priv->stations[sta_id]; 287 station = &il->stations[sta_id];
288 station->used = IWL_STA_DRIVER_ACTIVE; 288 station->used = IL_STA_DRIVER_ACTIVE;
289 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", 289 D_ASSOC("Add STA to driver ID %d: %pM\n",
290 sta_id, addr); 290 sta_id, addr);
291 priv->num_stations++; 291 il->num_stations++;
292 292
293 /* Set up the REPLY_ADD_STA command to send to device */ 293 /* Set up the C_ADD_STA command to send to device */
294 memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); 294 memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
295 memcpy(station->sta.sta.addr, addr, ETH_ALEN); 295 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
296 station->sta.mode = 0; 296 station->sta.mode = 0;
297 station->sta.sta.sta_id = sta_id; 297 station->sta.sta.sta_id = sta_id;
@@ -299,7 +299,7 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
299 station->ctxid = ctx->ctxid; 299 station->ctxid = ctx->ctxid;
300 300
301 if (sta) { 301 if (sta) {
302 struct iwl_station_priv_common *sta_priv; 302 struct il_station_priv_common *sta_priv;
303 303
304 sta_priv = (void *)sta->drv_priv; 304 sta_priv = (void *)sta->drv_priv;
305 sta_priv->ctx = ctx; 305 sta_priv->ctx = ctx;
@@ -310,42 +310,42 @@ u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
310 * STA and broadcast STA) pass in a NULL sta, and mac80211 310 * STA and broadcast STA) pass in a NULL sta, and mac80211
311 * doesn't allow HT IBSS. 311 * doesn't allow HT IBSS.
312 */ 312 */
313 iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); 313 il_set_ht_add_station(il, sta_id, sta, ctx);
314 314
315 /* 3945 only */ 315 /* 3945 only */
316 rate = (priv->band == IEEE80211_BAND_5GHZ) ? 316 rate = (il->band == IEEE80211_BAND_5GHZ) ?
317 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; 317 RATE_6M_PLCP : RATE_1M_PLCP;
318 /* Turn on both antennas for the station... */ 318 /* Turn on both antennas for the station... */
319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 319 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
320 320
321 return sta_id; 321 return sta_id;
322 322
323} 323}
324EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); 324EXPORT_SYMBOL_GPL(il_prep_station);
325 325
326#define STA_WAIT_TIMEOUT (HZ/2) 326#define STA_WAIT_TIMEOUT (HZ/2)
327 327
328/** 328/**
329 * iwl_legacy_add_station_common - 329 * il_add_station_common -
330 */ 330 */
331int 331int
332iwl_legacy_add_station_common(struct iwl_priv *priv, 332il_add_station_common(struct il_priv *il,
333 struct iwl_rxon_context *ctx, 333 struct il_rxon_context *ctx,
334 const u8 *addr, bool is_ap, 334 const u8 *addr, bool is_ap,
335 struct ieee80211_sta *sta, u8 *sta_id_r) 335 struct ieee80211_sta *sta, u8 *sta_id_r)
336{ 336{
337 unsigned long flags_spin; 337 unsigned long flags_spin;
338 int ret = 0; 338 int ret = 0;
339 u8 sta_id; 339 u8 sta_id;
340 struct iwl_legacy_addsta_cmd sta_cmd; 340 struct il_addsta_cmd sta_cmd;
341 341
342 *sta_id_r = 0; 342 *sta_id_r = 0;
343 spin_lock_irqsave(&priv->sta_lock, flags_spin); 343 spin_lock_irqsave(&il->sta_lock, flags_spin);
344 sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); 344 sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
345 if (sta_id == IWL_INVALID_STATION) { 345 if (sta_id == IL_INVALID_STATION) {
346 IWL_ERR(priv, "Unable to prepare station %pM for addition\n", 346 IL_ERR("Unable to prepare station %pM for addition\n",
347 addr); 347 addr);
348 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 348 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
349 return -EINVAL; 349 return -EINVAL;
350 } 350 }
351 351
@@ -354,75 +354,75 @@ iwl_legacy_add_station_common(struct iwl_priv *priv,
354 * station. Keep track if one is in progress so that we do not send 354 * station. Keep track if one is in progress so that we do not send
355 * another. 355 * another.
356 */ 356 */
357 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 357 if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
358 IWL_DEBUG_INFO(priv, 358 D_INFO(
359 "STA %d already in process of being added.\n", 359 "STA %d already in process of being added.\n",
360 sta_id); 360 sta_id);
361 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 361 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
362 return -EEXIST; 362 return -EEXIST;
363 } 363 }
364 364
365 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && 365 if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
366 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 366 (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
367 IWL_DEBUG_ASSOC(priv, 367 D_ASSOC(
368 "STA %d (%pM) already added, not adding again.\n", 368 "STA %d (%pM) already added, not adding again.\n",
369 sta_id, addr); 369 sta_id, addr);
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 370 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
371 return -EEXIST; 371 return -EEXIST;
372 } 372 }
373 373
374 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; 374 il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
375 memcpy(&sta_cmd, &priv->stations[sta_id].sta, 375 memcpy(&sta_cmd, &il->stations[sta_id].sta,
376 sizeof(struct iwl_legacy_addsta_cmd)); 376 sizeof(struct il_addsta_cmd));
377 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 377 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
378 378
379 /* Add station to device's station table */ 379 /* Add station to device's station table */
380 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); 380 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
381 if (ret) { 381 if (ret) {
382 spin_lock_irqsave(&priv->sta_lock, flags_spin); 382 spin_lock_irqsave(&il->sta_lock, flags_spin);
383 IWL_ERR(priv, "Adding station %pM failed.\n", 383 IL_ERR("Adding station %pM failed.\n",
384 priv->stations[sta_id].sta.sta.addr); 384 il->stations[sta_id].sta.sta.addr);
385 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 385 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
386 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 386 il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
387 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 387 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
388 } 388 }
389 *sta_id_r = sta_id; 389 *sta_id_r = sta_id;
390 return ret; 390 return ret;
391} 391}
392EXPORT_SYMBOL(iwl_legacy_add_station_common); 392EXPORT_SYMBOL(il_add_station_common);
393 393
394/** 394/**
395 * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station 395 * il_sta_ucode_deactivate - deactivate ucode status for a station
396 * 396 *
397 * priv->sta_lock must be held 397 * il->sta_lock must be held
398 */ 398 */
399static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) 399static void il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
400{ 400{
401 /* Ucode must be active and driver must be non active */ 401 /* Ucode must be active and driver must be non active */
402 if ((priv->stations[sta_id].used & 402 if ((il->stations[sta_id].used &
403 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != 403 (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
404 IWL_STA_UCODE_ACTIVE) 404 IL_STA_UCODE_ACTIVE)
405 IWL_ERR(priv, "removed non active STA %u\n", sta_id); 405 IL_ERR("removed non active STA %u\n", sta_id);
406 406
407 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; 407 il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
408 408
409 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); 409 memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
410 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); 410 D_ASSOC("Removed STA %u\n", sta_id);
411} 411}
412 412
413static int iwl_legacy_send_remove_station(struct iwl_priv *priv, 413static int il_send_remove_station(struct il_priv *il,
414 const u8 *addr, int sta_id, 414 const u8 *addr, int sta_id,
415 bool temporary) 415 bool temporary)
416{ 416{
417 struct iwl_rx_packet *pkt; 417 struct il_rx_pkt *pkt;
418 int ret; 418 int ret;
419 419
420 unsigned long flags_spin; 420 unsigned long flags_spin;
421 struct iwl_rem_sta_cmd rm_sta_cmd; 421 struct il_rem_sta_cmd rm_sta_cmd;
422 422
423 struct iwl_host_cmd cmd = { 423 struct il_host_cmd cmd = {
424 .id = REPLY_REMOVE_STA, 424 .id = C_REM_STA,
425 .len = sizeof(struct iwl_rem_sta_cmd), 425 .len = sizeof(struct il_rem_sta_cmd),
426 .flags = CMD_SYNC, 426 .flags = CMD_SYNC,
427 .data = &rm_sta_cmd, 427 .data = &rm_sta_cmd,
428 }; 428 };
@@ -433,14 +433,14 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
433 433
434 cmd.flags |= CMD_WANT_SKB; 434 cmd.flags |= CMD_WANT_SKB;
435 435
436 ret = iwl_legacy_send_cmd(priv, &cmd); 436 ret = il_send_cmd(il, &cmd);
437 437
438 if (ret) 438 if (ret)
439 return ret; 439 return ret;
440 440
441 pkt = (struct iwl_rx_packet *)cmd.reply_page; 441 pkt = (struct il_rx_pkt *)cmd.reply_page;
442 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 442 if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
443 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 443 IL_ERR("Bad return from C_REM_STA (0x%08X)\n",
444 pkt->hdr.flags); 444 pkt->hdr.flags);
445 ret = -EIO; 445 ret = -EIO;
446 } 446 }
@@ -449,34 +449,34 @@ static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
449 switch (pkt->u.rem_sta.status) { 449 switch (pkt->u.rem_sta.status) {
450 case REM_STA_SUCCESS_MSK: 450 case REM_STA_SUCCESS_MSK:
451 if (!temporary) { 451 if (!temporary) {
452 spin_lock_irqsave(&priv->sta_lock, flags_spin); 452 spin_lock_irqsave(&il->sta_lock, flags_spin);
453 iwl_legacy_sta_ucode_deactivate(priv, sta_id); 453 il_sta_ucode_deactivate(il, sta_id);
454 spin_unlock_irqrestore(&priv->sta_lock, 454 spin_unlock_irqrestore(&il->sta_lock,
455 flags_spin); 455 flags_spin);
456 } 456 }
457 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 457 D_ASSOC("C_REM_STA PASSED\n");
458 break; 458 break;
459 default: 459 default:
460 ret = -EIO; 460 ret = -EIO;
461 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); 461 IL_ERR("C_REM_STA failed\n");
462 break; 462 break;
463 } 463 }
464 } 464 }
465 iwl_legacy_free_pages(priv, cmd.reply_page); 465 il_free_pages(il, cmd.reply_page);
466 466
467 return ret; 467 return ret;
468} 468}
469 469
470/** 470/**
471 * iwl_legacy_remove_station - Remove driver's knowledge of station. 471 * il_remove_station - Remove driver's knowledge of station.
472 */ 472 */
473int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, 473int il_remove_station(struct il_priv *il, const u8 sta_id,
474 const u8 *addr) 474 const u8 *addr)
475{ 475{
476 unsigned long flags; 476 unsigned long flags;
477 477
478 if (!iwl_legacy_is_ready(priv)) { 478 if (!il_is_ready(il)) {
479 IWL_DEBUG_INFO(priv, 479 D_INFO(
480 "Unable to remove station %pM, device not ready.\n", 480 "Unable to remove station %pM, device not ready.\n",
481 addr); 481 addr);
482 /* 482 /*
@@ -487,85 +487,85 @@ int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
487 return 0; 487 return 0;
488 } 488 }
489 489
490 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", 490 D_ASSOC("Removing STA from driver:%d %pM\n",
491 sta_id, addr); 491 sta_id, addr);
492 492
493 if (WARN_ON(sta_id == IWL_INVALID_STATION)) 493 if (WARN_ON(sta_id == IL_INVALID_STATION))
494 return -EINVAL; 494 return -EINVAL;
495 495
496 spin_lock_irqsave(&priv->sta_lock, flags); 496 spin_lock_irqsave(&il->sta_lock, flags);
497 497
498 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 498 if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
499 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", 499 D_INFO("Removing %pM but non DRIVER active\n",
500 addr); 500 addr);
501 goto out_err; 501 goto out_err;
502 } 502 }
503 503
504 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 504 if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
505 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", 505 D_INFO("Removing %pM but non UCODE active\n",
506 addr); 506 addr);
507 goto out_err; 507 goto out_err;
508 } 508 }
509 509
510 if (priv->stations[sta_id].used & IWL_STA_LOCAL) { 510 if (il->stations[sta_id].used & IL_STA_LOCAL) {
511 kfree(priv->stations[sta_id].lq); 511 kfree(il->stations[sta_id].lq);
512 priv->stations[sta_id].lq = NULL; 512 il->stations[sta_id].lq = NULL;
513 } 513 }
514 514
515 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 515 il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
516 516
517 priv->num_stations--; 517 il->num_stations--;
518 518
519 BUG_ON(priv->num_stations < 0); 519 BUG_ON(il->num_stations < 0);
520 520
521 spin_unlock_irqrestore(&priv->sta_lock, flags); 521 spin_unlock_irqrestore(&il->sta_lock, flags);
522 522
523 return iwl_legacy_send_remove_station(priv, addr, sta_id, false); 523 return il_send_remove_station(il, addr, sta_id, false);
524out_err: 524out_err:
525 spin_unlock_irqrestore(&priv->sta_lock, flags); 525 spin_unlock_irqrestore(&il->sta_lock, flags);
526 return -EINVAL; 526 return -EINVAL;
527} 527}
528EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); 528EXPORT_SYMBOL_GPL(il_remove_station);
529 529
530/** 530/**
531 * iwl_legacy_clear_ucode_stations - clear ucode station table bits 531 * il_clear_ucode_stations - clear ucode station table bits
532 * 532 *
533 * This function clears all the bits in the driver indicating 533 * This function clears all the bits in the driver indicating
534 * which stations are active in the ucode. Call when something 534 * which stations are active in the ucode. Call when something
535 * other than explicit station management would cause this in 535 * other than explicit station management would cause this in
536 * the ucode, e.g. unassociated RXON. 536 * the ucode, e.g. unassociated RXON.
537 */ 537 */
538void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, 538void il_clear_ucode_stations(struct il_priv *il,
539 struct iwl_rxon_context *ctx) 539 struct il_rxon_context *ctx)
540{ 540{
541 int i; 541 int i;
542 unsigned long flags_spin; 542 unsigned long flags_spin;
543 bool cleared = false; 543 bool cleared = false;
544 544
545 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); 545 D_INFO("Clearing ucode stations in driver\n");
546 546
547 spin_lock_irqsave(&priv->sta_lock, flags_spin); 547 spin_lock_irqsave(&il->sta_lock, flags_spin);
548 for (i = 0; i < priv->hw_params.max_stations; i++) { 548 for (i = 0; i < il->hw_params.max_stations; i++) {
549 if (ctx && ctx->ctxid != priv->stations[i].ctxid) 549 if (ctx && ctx->ctxid != il->stations[i].ctxid)
550 continue; 550 continue;
551 551
552 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { 552 if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
553 IWL_DEBUG_INFO(priv, 553 D_INFO(
554 "Clearing ucode active for station %d\n", i); 554 "Clearing ucode active for station %d\n", i);
555 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 555 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
556 cleared = true; 556 cleared = true;
557 } 557 }
558 } 558 }
559 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 559 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
560 560
561 if (!cleared) 561 if (!cleared)
562 IWL_DEBUG_INFO(priv, 562 D_INFO(
563 "No active stations found to be cleared\n"); 563 "No active stations found to be cleared\n");
564} 564}
565EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); 565EXPORT_SYMBOL(il_clear_ucode_stations);
566 566
567/** 567/**
568 * iwl_legacy_restore_stations() - Restore driver known stations to device 568 * il_restore_stations() - Restore driver known stations to device
569 * 569 *
570 * All stations considered active by driver, but not present in ucode, is 570 * All stations considered active by driver, but not present in ucode, is
571 * restored. 571 * restored.
@@ -573,58 +573,58 @@ EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
573 * Function sleeps. 573 * Function sleeps.
574 */ 574 */
575void 575void
576iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 576il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
577{ 577{
578 struct iwl_legacy_addsta_cmd sta_cmd; 578 struct il_addsta_cmd sta_cmd;
579 struct iwl_link_quality_cmd lq; 579 struct il_link_quality_cmd lq;
580 unsigned long flags_spin; 580 unsigned long flags_spin;
581 int i; 581 int i;
582 bool found = false; 582 bool found = false;
583 int ret; 583 int ret;
584 bool send_lq; 584 bool send_lq;
585 585
586 if (!iwl_legacy_is_ready(priv)) { 586 if (!il_is_ready(il)) {
587 IWL_DEBUG_INFO(priv, 587 D_INFO(
588 "Not ready yet, not restoring any stations.\n"); 588 "Not ready yet, not restoring any stations.\n");
589 return; 589 return;
590 } 590 }
591 591
592 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); 592 D_ASSOC("Restoring all known stations ... start.\n");
593 spin_lock_irqsave(&priv->sta_lock, flags_spin); 593 spin_lock_irqsave(&il->sta_lock, flags_spin);
594 for (i = 0; i < priv->hw_params.max_stations; i++) { 594 for (i = 0; i < il->hw_params.max_stations; i++) {
595 if (ctx->ctxid != priv->stations[i].ctxid) 595 if (ctx->ctxid != il->stations[i].ctxid)
596 continue; 596 continue;
597 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && 597 if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
598 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { 598 !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
599 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", 599 D_ASSOC("Restoring sta %pM\n",
600 priv->stations[i].sta.sta.addr); 600 il->stations[i].sta.sta.addr);
601 priv->stations[i].sta.mode = 0; 601 il->stations[i].sta.mode = 0;
602 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; 602 il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
603 found = true; 603 found = true;
604 } 604 }
605 } 605 }
606 606
607 for (i = 0; i < priv->hw_params.max_stations; i++) { 607 for (i = 0; i < il->hw_params.max_stations; i++) {
608 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { 608 if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
609 memcpy(&sta_cmd, &priv->stations[i].sta, 609 memcpy(&sta_cmd, &il->stations[i].sta,
610 sizeof(struct iwl_legacy_addsta_cmd)); 610 sizeof(struct il_addsta_cmd));
611 send_lq = false; 611 send_lq = false;
612 if (priv->stations[i].lq) { 612 if (il->stations[i].lq) {
613 memcpy(&lq, priv->stations[i].lq, 613 memcpy(&lq, il->stations[i].lq,
614 sizeof(struct iwl_link_quality_cmd)); 614 sizeof(struct il_link_quality_cmd));
615 send_lq = true; 615 send_lq = true;
616 } 616 }
617 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 617 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
618 ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); 618 ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
619 if (ret) { 619 if (ret) {
620 spin_lock_irqsave(&priv->sta_lock, flags_spin); 620 spin_lock_irqsave(&il->sta_lock, flags_spin);
621 IWL_ERR(priv, "Adding station %pM failed.\n", 621 IL_ERR("Adding station %pM failed.\n",
622 priv->stations[i].sta.sta.addr); 622 il->stations[i].sta.sta.addr);
623 priv->stations[i].used &= 623 il->stations[i].used &=
624 ~IWL_STA_DRIVER_ACTIVE; 624 ~IL_STA_DRIVER_ACTIVE;
625 priv->stations[i].used &= 625 il->stations[i].used &=
626 ~IWL_STA_UCODE_INPROGRESS; 626 ~IL_STA_UCODE_INPROGRESS;
627 spin_unlock_irqrestore(&priv->sta_lock, 627 spin_unlock_irqrestore(&il->sta_lock,
628 flags_spin); 628 flags_spin);
629 } 629 }
630 /* 630 /*
@@ -632,78 +632,78 @@ iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
632 * current LQ command 632 * current LQ command
633 */ 633 */
634 if (send_lq) 634 if (send_lq)
635 iwl_legacy_send_lq_cmd(priv, ctx, &lq, 635 il_send_lq_cmd(il, ctx, &lq,
636 CMD_SYNC, true); 636 CMD_SYNC, true);
637 spin_lock_irqsave(&priv->sta_lock, flags_spin); 637 spin_lock_irqsave(&il->sta_lock, flags_spin);
638 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 638 il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
639 } 639 }
640 } 640 }
641 641
642 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 642 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
643 if (!found) 643 if (!found)
644 IWL_DEBUG_INFO(priv, "Restoring all known stations" 644 D_INFO("Restoring all known stations"
645 " .... no stations to be restored.\n"); 645 " .... no stations to be restored.\n");
646 else 646 else
647 IWL_DEBUG_INFO(priv, "Restoring all known stations" 647 D_INFO("Restoring all known stations"
648 " .... complete.\n"); 648 " .... complete.\n");
649} 649}
650EXPORT_SYMBOL(iwl_legacy_restore_stations); 650EXPORT_SYMBOL(il_restore_stations);
651 651
652int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) 652int il_get_free_ucode_key_idx(struct il_priv *il)
653{ 653{
654 int i; 654 int i;
655 655
656 for (i = 0; i < priv->sta_key_max_num; i++) 656 for (i = 0; i < il->sta_key_max_num; i++)
657 if (!test_and_set_bit(i, &priv->ucode_key_table)) 657 if (!test_and_set_bit(i, &il->ucode_key_table))
658 return i; 658 return i;
659 659
660 return WEP_INVALID_OFFSET; 660 return WEP_INVALID_OFFSET;
661} 661}
662EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); 662EXPORT_SYMBOL(il_get_free_ucode_key_idx);
663 663
664void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) 664void il_dealloc_bcast_stations(struct il_priv *il)
665{ 665{
666 unsigned long flags; 666 unsigned long flags;
667 int i; 667 int i;
668 668
669 spin_lock_irqsave(&priv->sta_lock, flags); 669 spin_lock_irqsave(&il->sta_lock, flags);
670 for (i = 0; i < priv->hw_params.max_stations; i++) { 670 for (i = 0; i < il->hw_params.max_stations; i++) {
671 if (!(priv->stations[i].used & IWL_STA_BCAST)) 671 if (!(il->stations[i].used & IL_STA_BCAST))
672 continue; 672 continue;
673 673
674 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; 674 il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
675 priv->num_stations--; 675 il->num_stations--;
676 BUG_ON(priv->num_stations < 0); 676 BUG_ON(il->num_stations < 0);
677 kfree(priv->stations[i].lq); 677 kfree(il->stations[i].lq);
678 priv->stations[i].lq = NULL; 678 il->stations[i].lq = NULL;
679 } 679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags); 680 spin_unlock_irqrestore(&il->sta_lock, flags);
681} 681}
682EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); 682EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
683 683
684#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG 684#ifdef CONFIG_IWLEGACY_DEBUG
685static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, 685static void il_dump_lq_cmd(struct il_priv *il,
686 struct iwl_link_quality_cmd *lq) 686 struct il_link_quality_cmd *lq)
687{ 687{
688 int i; 688 int i;
689 IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); 689 D_RATE("lq station id 0x%x\n", lq->sta_id);
690 IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", 690 D_RATE("lq ant 0x%X 0x%X\n",
691 lq->general_params.single_stream_ant_msk, 691 lq->general_params.single_stream_ant_msk,
692 lq->general_params.dual_stream_ant_msk); 692 lq->general_params.dual_stream_ant_msk);
693 693
694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 694 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
695 IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", 695 D_RATE("lq idx %d 0x%X\n",
696 i, lq->rs_table[i].rate_n_flags); 696 i, lq->rs_table[i].rate_n_flags);
697} 697}
698#else 698#else
699static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, 699static inline void il_dump_lq_cmd(struct il_priv *il,
700 struct iwl_link_quality_cmd *lq) 700 struct il_link_quality_cmd *lq)
701{ 701{
702} 702}
703#endif 703#endif
704 704
705/** 705/**
706 * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity 706 * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
707 * 707 *
708 * It sometimes happens when a HT rate has been in use and we 708 * It sometimes happens when a HT rate has been in use and we
709 * loose connectivity with AP then mac80211 will first tell us that the 709 * loose connectivity with AP then mac80211 will first tell us that the
@@ -713,22 +713,22 @@ static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
713 * Test for this to prevent driver from sending LQ command between the time 713 * Test for this to prevent driver from sending LQ command between the time
714 * RXON flags are updated and when LQ command is updated. 714 * RXON flags are updated and when LQ command is updated.
715 */ 715 */
716static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, 716static bool il_is_lq_table_valid(struct il_priv *il,
717 struct iwl_rxon_context *ctx, 717 struct il_rxon_context *ctx,
718 struct iwl_link_quality_cmd *lq) 718 struct il_link_quality_cmd *lq)
719{ 719{
720 int i; 720 int i;
721 721
722 if (ctx->ht.enabled) 722 if (ctx->ht.enabled)
723 return true; 723 return true;
724 724
725 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", 725 D_INFO("Channel %u is not an HT channel\n",
726 ctx->active.channel); 726 ctx->active.channel);
727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 727 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & 728 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
729 RATE_MCS_HT_MSK) { 729 RATE_MCS_HT_MSK) {
730 IWL_DEBUG_INFO(priv, 730 D_INFO(
731 "index %d of LQ expects HT channel\n", 731 "idx %d of LQ expects HT channel\n",
732 i); 732 i);
733 return false; 733 return false;
734 } 734 }
@@ -737,7 +737,7 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
737} 737}
738 738
739/** 739/**
740 * iwl_legacy_send_lq_cmd() - Send link quality command 740 * il_send_lq_cmd() - Send link quality command
741 * @init: This command is sent as part of station initialization right 741 * @init: This command is sent as part of station initialization right
742 * after station has been added. 742 * after station has been added.
743 * 743 *
@@ -746,35 +746,35 @@ static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
746 * this case to clear the state indicating that station creation is in 746 * this case to clear the state indicating that station creation is in
747 * progress. 747 * progress.
748 */ 748 */
749int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 749int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
750 struct iwl_link_quality_cmd *lq, u8 flags, bool init) 750 struct il_link_quality_cmd *lq, u8 flags, bool init)
751{ 751{
752 int ret = 0; 752 int ret = 0;
753 unsigned long flags_spin; 753 unsigned long flags_spin;
754 754
755 struct iwl_host_cmd cmd = { 755 struct il_host_cmd cmd = {
756 .id = REPLY_TX_LINK_QUALITY_CMD, 756 .id = C_TX_LINK_QUALITY_CMD,
757 .len = sizeof(struct iwl_link_quality_cmd), 757 .len = sizeof(struct il_link_quality_cmd),
758 .flags = flags, 758 .flags = flags,
759 .data = lq, 759 .data = lq,
760 }; 760 };
761 761
762 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) 762 if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
763 return -EINVAL; 763 return -EINVAL;
764 764
765 765
766 spin_lock_irqsave(&priv->sta_lock, flags_spin); 766 spin_lock_irqsave(&il->sta_lock, flags_spin);
767 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 767 if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
768 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 768 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
769 return -EINVAL; 769 return -EINVAL;
770 } 770 }
771 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 771 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
772 772
773 iwl_legacy_dump_lq_cmd(priv, lq); 773 il_dump_lq_cmd(il, lq);
774 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 774 BUG_ON(init && (cmd.flags & CMD_ASYNC));
775 775
776 if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) 776 if (il_is_lq_table_valid(il, ctx, lq))
777 ret = iwl_legacy_send_cmd(priv, &cmd); 777 ret = il_send_cmd(il, &cmd);
778 else 778 else
779 ret = -EINVAL; 779 ret = -EINVAL;
780 780
@@ -782,35 +782,35 @@ int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
782 return ret; 782 return ret;
783 783
784 if (init) { 784 if (init) {
785 IWL_DEBUG_INFO(priv, "init LQ command complete," 785 D_INFO("init LQ command complete,"
786 " clearing sta addition status for sta %d\n", 786 " clearing sta addition status for sta %d\n",
787 lq->sta_id); 787 lq->sta_id);
788 spin_lock_irqsave(&priv->sta_lock, flags_spin); 788 spin_lock_irqsave(&il->sta_lock, flags_spin);
789 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 789 il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
790 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 790 spin_unlock_irqrestore(&il->sta_lock, flags_spin);
791 } 791 }
792 return ret; 792 return ret;
793} 793}
794EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); 794EXPORT_SYMBOL(il_send_lq_cmd);
795 795
796int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, 796int il_mac_sta_remove(struct ieee80211_hw *hw,
797 struct ieee80211_vif *vif, 797 struct ieee80211_vif *vif,
798 struct ieee80211_sta *sta) 798 struct ieee80211_sta *sta)
799{ 799{
800 struct iwl_priv *priv = hw->priv; 800 struct il_priv *il = hw->priv;
801 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; 801 struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
802 int ret; 802 int ret;
803 803
804 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", 804 D_INFO("received request to remove station %pM\n",
805 sta->addr); 805 sta->addr);
806 mutex_lock(&priv->mutex); 806 mutex_lock(&il->mutex);
807 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", 807 D_INFO("proceeding to remove station %pM\n",
808 sta->addr); 808 sta->addr);
809 ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); 809 ret = il_remove_station(il, sta_common->sta_id, sta->addr);
810 if (ret) 810 if (ret)
811 IWL_ERR(priv, "Error removing station %pM\n", 811 IL_ERR("Error removing station %pM\n",
812 sta->addr); 812 sta->addr);
813 mutex_unlock(&priv->mutex); 813 mutex_unlock(&il->mutex);
814 return ret; 814 return ret;
815} 815}
816EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); 816EXPORT_SYMBOL(il_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75fe01a1..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_legacy_sta_h__
30#define __iwl_legacy_sta_h__
31
32#include "iwl-dev.h"
33
34#define HW_KEY_DYNAMIC 0
35#define HW_KEY_DEFAULT 1
36
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
40 being activated */
41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
45
46void iwl_legacy_restore_stations(struct iwl_priv *priv,
47 struct iwl_rxon_context *ctx);
48void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
49 struct iwl_rxon_context *ctx);
50void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
51int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
52int iwl_legacy_send_add_sta(struct iwl_priv *priv,
53 struct iwl_legacy_addsta_cmd *sta, u8 flags);
54int iwl_legacy_add_station_common(struct iwl_priv *priv,
55 struct iwl_rxon_context *ctx,
56 const u8 *addr, bool is_ap,
57 struct ieee80211_sta *sta, u8 *sta_id_r);
58int iwl_legacy_remove_station(struct iwl_priv *priv,
59 const u8 sta_id,
60 const u8 *addr);
61int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
62 struct ieee80211_vif *vif,
63 struct ieee80211_sta *sta);
64
65u8 iwl_legacy_prep_station(struct iwl_priv *priv,
66 struct iwl_rxon_context *ctx,
67 const u8 *addr, bool is_ap,
68 struct ieee80211_sta *sta);
69
70int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
71 struct iwl_rxon_context *ctx,
72 struct iwl_link_quality_cmd *lq,
73 u8 flags, bool init);
74
75/**
76 * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
77 * @priv: iwl priv struct
78 *
79 * This is called during iwl_down() to make sure that in the case
80 * we're coming there from a hardware restart mac80211 will be
81 * able to reconfigure stations -- if we're getting there in the
82 * normal down flow then the stations will already be cleared.
83 */
84static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
85{
86 unsigned long flags;
87 struct iwl_rxon_context *ctx;
88
89 spin_lock_irqsave(&priv->sta_lock, flags);
90 memset(priv->stations, 0, sizeof(priv->stations));
91 priv->num_stations = 0;
92
93 priv->ucode_key_table = 0;
94
95 for_each_context(priv, ctx) {
96 /*
97 * Remove all key information that is not stored as part
98 * of station information since mac80211 may not have had
99 * a chance to remove all the keys. When device is
100 * reconfigured by mac80211 after an error all keys will
101 * be reconfigured.
102 */
103 memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
104 ctx->key_mapping_keys = 0;
105 }
106
107 spin_unlock_irqrestore(&priv->sta_lock, flags);
108}
109
110static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
111{
112 if (WARN_ON(!sta))
113 return IWL_INVALID_STATION;
114
115 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
116}
117
118/**
119 * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
120 * @priv: iwl priv
121 * @context: the current context
122 * @sta: mac80211 station
123 *
124 * In certain circumstances mac80211 passes a station pointer
125 * that may be %NULL, for example during TX or key setup. In
126 * that case, we need to use the broadcast station, so this
127 * inline wraps that pattern.
128 */
129static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
130 struct iwl_rxon_context *context,
131 struct ieee80211_sta *sta)
132{
133 int sta_id;
134
135 if (!sta)
136 return context->bcast_sta_id;
137
138 sta_id = iwl_legacy_sta_id(sta);
139
140 /*
141 * mac80211 should not be passing a partially
142 * initialised station!
143 */
144 WARN_ON(sta_id == IWL_INVALID_STATION);
145
146 return sta_id;
147}
148#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index ef9e268bf8a0..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,658 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/etherdevice.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <net/mac80211.h>
34#include "iwl-eeprom.h"
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40
41/**
42 * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
43 */
44void
45iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
46{
47 u32 reg = 0;
48 int txq_id = txq->q.id;
49
50 if (txq->need_update == 0)
51 return;
52
53 /* if we're trying to save power */
54 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
55 /* wake up nic if it's powered down ...
56 * uCode will wake up, and interrupt us again, so next
57 * time we'll skip this part. */
58 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
59
60 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
61 IWL_DEBUG_INFO(priv,
62 "Tx queue %d requesting wakeup,"
63 " GP1 = 0x%x\n", txq_id, reg);
64 iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
65 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
66 return;
67 }
68
69 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
70 txq->q.write_ptr | (txq_id << 8));
71
72 /*
73 * else not in power-save mode,
74 * uCode will never sleep when we're
75 * trying to tx (during RFKILL, we're not trying to tx).
76 */
77 } else
78 iwl_write32(priv, HBUS_TARG_WRPTR,
79 txq->q.write_ptr | (txq_id << 8));
80 txq->need_update = 0;
81}
82EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
83
84/**
85 * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
86 */
87void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
88{
89 struct iwl_tx_queue *txq = &priv->txq[txq_id];
90 struct iwl_queue *q = &txq->q;
91
92 if (q->n_bd == 0)
93 return;
94
95 while (q->write_ptr != q->read_ptr) {
96 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
97 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
98 }
99}
100EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
101
102/**
103 * iwl_legacy_tx_queue_free - Deallocate DMA queue.
104 * @txq: Transmit queue to deallocate.
105 *
106 * Empty queue by removing and destroying all BD's.
107 * Free all buffers.
108 * 0-fill, but do not free "txq" descriptor structure.
109 */
110void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
111{
112 struct iwl_tx_queue *txq = &priv->txq[txq_id];
113 struct device *dev = &priv->pci_dev->dev;
114 int i;
115
116 iwl_legacy_tx_queue_unmap(priv, txq_id);
117
118 /* De-alloc array of command/tx buffers */
119 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
120 kfree(txq->cmd[i]);
121
122 /* De-alloc circular buffer of TFDs */
123 if (txq->q.n_bd)
124 dma_free_coherent(dev, priv->hw_params.tfd_size *
125 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
126
127 /* De-alloc array of per-TFD driver data */
128 kfree(txq->txb);
129 txq->txb = NULL;
130
131 /* deallocate arrays */
132 kfree(txq->cmd);
133 kfree(txq->meta);
134 txq->cmd = NULL;
135 txq->meta = NULL;
136
137 /* 0-fill queue descriptor structure */
138 memset(txq, 0, sizeof(*txq));
139}
140EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
141
142/**
143 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
144 */
145void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
146{
147 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
148 struct iwl_queue *q = &txq->q;
149 int i;
150
151 if (q->n_bd == 0)
152 return;
153
154 while (q->read_ptr != q->write_ptr) {
155 i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
156
157 if (txq->meta[i].flags & CMD_MAPPED) {
158 pci_unmap_single(priv->pci_dev,
159 dma_unmap_addr(&txq->meta[i], mapping),
160 dma_unmap_len(&txq->meta[i], len),
161 PCI_DMA_BIDIRECTIONAL);
162 txq->meta[i].flags = 0;
163 }
164
165 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
166 }
167
168 i = q->n_window;
169 if (txq->meta[i].flags & CMD_MAPPED) {
170 pci_unmap_single(priv->pci_dev,
171 dma_unmap_addr(&txq->meta[i], mapping),
172 dma_unmap_len(&txq->meta[i], len),
173 PCI_DMA_BIDIRECTIONAL);
174 txq->meta[i].flags = 0;
175 }
176}
177EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
178
179/**
180 * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
181 * @txq: Transmit queue to deallocate.
182 *
183 * Empty queue by removing and destroying all BD's.
184 * Free all buffers.
185 * 0-fill, but do not free "txq" descriptor structure.
186 */
187void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
188{
189 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
190 struct device *dev = &priv->pci_dev->dev;
191 int i;
192
193 iwl_legacy_cmd_queue_unmap(priv);
194
195 /* De-alloc array of command/tx buffers */
196 for (i = 0; i <= TFD_CMD_SLOTS; i++)
197 kfree(txq->cmd[i]);
198
199 /* De-alloc circular buffer of TFDs */
200 if (txq->q.n_bd)
201 dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
202 txq->tfds, txq->q.dma_addr);
203
204 /* deallocate arrays */
205 kfree(txq->cmd);
206 kfree(txq->meta);
207 txq->cmd = NULL;
208 txq->meta = NULL;
209
210 /* 0-fill queue descriptor structure */
211 memset(txq, 0, sizeof(*txq));
212}
213EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
214
215/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
216 * DMA services
217 *
218 * Theory of operation
219 *
220 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
221 * of buffer descriptors, each of which points to one or more data buffers for
222 * the device to read from or fill. Driver and device exchange status of each
223 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
224 * entries in each circular buffer, to protect against confusing empty and full
225 * queue states.
226 *
227 * The device reads or writes the data in the queues via the device's several
228 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
229 *
230 * For Tx queue, there are low mark and high mark limits. If, after queuing
231 * the packet for Tx, free space become < low mark, Tx queue stopped. When
232 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
233 * Tx queue resumed.
234 *
235 * See more detailed info in iwl-4965-hw.h.
236 ***************************************************/
237
238int iwl_legacy_queue_space(const struct iwl_queue *q)
239{
240 int s = q->read_ptr - q->write_ptr;
241
242 if (q->read_ptr > q->write_ptr)
243 s -= q->n_bd;
244
245 if (s <= 0)
246 s += q->n_window;
247 /* keep some reserve to not confuse empty and full situations */
248 s -= 2;
249 if (s < 0)
250 s = 0;
251 return s;
252}
253EXPORT_SYMBOL(iwl_legacy_queue_space);
254
255
256/**
257 * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
258 */
259static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
260 int count, int slots_num, u32 id)
261{
262 q->n_bd = count;
263 q->n_window = slots_num;
264 q->id = id;
265
266 /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
267 * and iwl_legacy_queue_dec_wrap are broken. */
268 BUG_ON(!is_power_of_2(count));
269
270 /* slots_num must be power-of-two size, otherwise
271 * iwl_legacy_get_cmd_index is broken. */
272 BUG_ON(!is_power_of_2(slots_num));
273
274 q->low_mark = q->n_window / 4;
275 if (q->low_mark < 4)
276 q->low_mark = 4;
277
278 q->high_mark = q->n_window / 8;
279 if (q->high_mark < 2)
280 q->high_mark = 2;
281
282 q->write_ptr = q->read_ptr = 0;
283
284 return 0;
285}
286
287/**
288 * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
289 */
290static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
291 struct iwl_tx_queue *txq, u32 id)
292{
293 struct device *dev = &priv->pci_dev->dev;
294 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
295
296 /* Driver private data, only for Tx (not command) queues,
297 * not shared with device. */
298 if (id != priv->cmd_queue) {
299 txq->txb = kzalloc(sizeof(txq->txb[0]) *
300 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
301 if (!txq->txb) {
302 IWL_ERR(priv, "kmalloc for auxiliary BD "
303 "structures failed\n");
304 goto error;
305 }
306 } else {
307 txq->txb = NULL;
308 }
309
310 /* Circular buffer of transmit frame descriptors (TFDs),
311 * shared with device */
312 txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
313 GFP_KERNEL);
314 if (!txq->tfds) {
315 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
316 goto error;
317 }
318 txq->q.id = id;
319
320 return 0;
321
322 error:
323 kfree(txq->txb);
324 txq->txb = NULL;
325
326 return -ENOMEM;
327}
328
329/**
330 * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
331 */
332int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
333 int slots_num, u32 txq_id)
334{
335 int i, len;
336 int ret;
337 int actual_slots = slots_num;
338
339 /*
340 * Alloc buffer array for commands (Tx or other types of commands).
341 * For the command queue (#4/#9), allocate command space + one big
342 * command for scan, since scan command is very huge; the system will
343 * not have two scans at the same time, so only one is needed.
344 * For normal Tx queues (all other queues), no super-size command
345 * space is needed.
346 */
347 if (txq_id == priv->cmd_queue)
348 actual_slots++;
349
350 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
351 GFP_KERNEL);
352 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
353 GFP_KERNEL);
354
355 if (!txq->meta || !txq->cmd)
356 goto out_free_arrays;
357
358 len = sizeof(struct iwl_device_cmd);
359 for (i = 0; i < actual_slots; i++) {
360 /* only happens for cmd queue */
361 if (i == slots_num)
362 len = IWL_MAX_CMD_SIZE;
363
364 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
365 if (!txq->cmd[i])
366 goto err;
367 }
368
369 /* Alloc driver data array and TFD circular buffer */
370 ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
371 if (ret)
372 goto err;
373
374 txq->need_update = 0;
375
376 /*
377 * For the default queues 0-3, set up the swq_id
378 * already -- all others need to get one later
379 * (if they need one at all).
380 */
381 if (txq_id < 4)
382 iwl_legacy_set_swq_id(txq, txq_id, txq_id);
383
384 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
385 * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
386 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
387
388 /* Initialize queue's high/low-water marks, and head/tail indexes */
389 iwl_legacy_queue_init(priv, &txq->q,
390 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
391
392 /* Tell device where to find queue */
393 priv->cfg->ops->lib->txq_init(priv, txq);
394
395 return 0;
396err:
397 for (i = 0; i < actual_slots; i++)
398 kfree(txq->cmd[i]);
399out_free_arrays:
400 kfree(txq->meta);
401 kfree(txq->cmd);
402
403 return -ENOMEM;
404}
405EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
406
407void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
408 int slots_num, u32 txq_id)
409{
410 int actual_slots = slots_num;
411
412 if (txq_id == priv->cmd_queue)
413 actual_slots++;
414
415 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
416
417 txq->need_update = 0;
418
419 /* Initialize queue's high/low-water marks, and head/tail indexes */
420 iwl_legacy_queue_init(priv, &txq->q,
421 TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
422
423 /* Tell device where to find queue */
424 priv->cfg->ops->lib->txq_init(priv, txq);
425}
426EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
427
428/*************** HOST COMMAND QUEUE FUNCTIONS *****/
429
430/**
431 * iwl_legacy_enqueue_hcmd - enqueue a uCode command
432 * @priv: device private data point
433 * @cmd: a point to the ucode command structure
434 *
435 * The function returns < 0 values to indicate the operation is
436 * failed. On success, it turns the index (> 0) of command in the
437 * command queue.
438 */
439int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
440{
441 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
442 struct iwl_queue *q = &txq->q;
443 struct iwl_device_cmd *out_cmd;
444 struct iwl_cmd_meta *out_meta;
445 dma_addr_t phys_addr;
446 unsigned long flags;
447 int len;
448 u32 idx;
449 u16 fix_size;
450
451 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
452 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
453
454 /* If any of the command structures end up being larger than
455 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
456 * we will need to increase the size of the TFD entries
457 * Also, check to see if command buffer should not exceed the size
458 * of device_cmd and max_cmd_size. */
459 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
460 !(cmd->flags & CMD_SIZE_HUGE));
461 BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
462
463 if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
464 IWL_WARN(priv, "Not sending command - %s KILL\n",
465 iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
466 return -EIO;
467 }
468
469 spin_lock_irqsave(&priv->hcmd_lock, flags);
470
471 if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
472 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
473
474 IWL_ERR(priv, "Restarting adapter due to command queue full\n");
475 queue_work(priv->workqueue, &priv->restart);
476 return -ENOSPC;
477 }
478
479 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
480 out_cmd = txq->cmd[idx];
481 out_meta = &txq->meta[idx];
482
483 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
484 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
485 return -ENOSPC;
486 }
487
488 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
489 out_meta->flags = cmd->flags | CMD_MAPPED;
490 if (cmd->flags & CMD_WANT_SKB)
491 out_meta->source = cmd;
492 if (cmd->flags & CMD_ASYNC)
493 out_meta->callback = cmd->callback;
494
495 out_cmd->hdr.cmd = cmd->id;
496 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
497
498 /* At this point, the out_cmd now has all of the incoming cmd
499 * information */
500
501 out_cmd->hdr.flags = 0;
502 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
503 INDEX_TO_SEQ(q->write_ptr));
504 if (cmd->flags & CMD_SIZE_HUGE)
505 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
506 len = sizeof(struct iwl_device_cmd);
507 if (idx == TFD_CMD_SLOTS)
508 len = IWL_MAX_CMD_SIZE;
509
510#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
511 switch (out_cmd->hdr.cmd) {
512 case REPLY_TX_LINK_QUALITY_CMD:
513 case SENSITIVITY_CMD:
514 IWL_DEBUG_HC_DUMP(priv,
515 "Sending command %s (#%x), seq: 0x%04X, "
516 "%d bytes at %d[%d]:%d\n",
517 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
518 out_cmd->hdr.cmd,
519 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
520 q->write_ptr, idx, priv->cmd_queue);
521 break;
522 default:
523 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
524 "%d bytes at %d[%d]:%d\n",
525 iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
526 out_cmd->hdr.cmd,
527 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
528 q->write_ptr, idx, priv->cmd_queue);
529 }
530#endif
531 txq->need_update = 1;
532
533 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
534 /* Set up entry in queue's byte count circular buffer */
535 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
536
537 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
538 fix_size, PCI_DMA_BIDIRECTIONAL);
539 dma_unmap_addr_set(out_meta, mapping, phys_addr);
540 dma_unmap_len_set(out_meta, len, fix_size);
541
542 trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
543 fix_size, cmd->flags);
544
545 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
546 phys_addr, fix_size, 1,
547 U32_PAD(cmd->len));
548
549 /* Increment and update queue's write index */
550 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
551 iwl_legacy_txq_update_write_ptr(priv, txq);
552
553 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
554 return idx;
555}
556
557/**
558 * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
559 *
560 * When FW advances 'R' index, all entries between old and new 'R' index
561 * need to be reclaimed. As result, some free space forms. If there is
562 * enough free space (> low mark), wake the stack that feeds us.
563 */
564static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
565 int idx, int cmd_idx)
566{
567 struct iwl_tx_queue *txq = &priv->txq[txq_id];
568 struct iwl_queue *q = &txq->q;
569 int nfreed = 0;
570
571 if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
572 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
573 "is out of range [0-%d] %d %d.\n", txq_id,
574 idx, q->n_bd, q->write_ptr, q->read_ptr);
575 return;
576 }
577
578 for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
579 q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
580
581 if (nfreed++ > 0) {
582 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
583 q->write_ptr, q->read_ptr);
584 queue_work(priv->workqueue, &priv->restart);
585 }
586
587 }
588}
589
590/**
591 * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
592 * @rxb: Rx buffer to reclaim
593 *
594 * If an Rx buffer has an async callback associated with it the callback
595 * will be executed. The attached skb (if present) will only be freed
596 * if the callback returns 1
597 */
598void
599iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
600{
601 struct iwl_rx_packet *pkt = rxb_addr(rxb);
602 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
603 int txq_id = SEQ_TO_QUEUE(sequence);
604 int index = SEQ_TO_INDEX(sequence);
605 int cmd_index;
606 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
607 struct iwl_device_cmd *cmd;
608 struct iwl_cmd_meta *meta;
609 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
610 unsigned long flags;
611
612 /* If a Tx command is being handled and it isn't in the actual
613 * command queue then there a command routing bug has been introduced
614 * in the queue management code. */
615 if (WARN(txq_id != priv->cmd_queue,
616 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
617 txq_id, priv->cmd_queue, sequence,
618 priv->txq[priv->cmd_queue].q.read_ptr,
619 priv->txq[priv->cmd_queue].q.write_ptr)) {
620 iwl_print_hex_error(priv, pkt, 32);
621 return;
622 }
623
624 cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
625 cmd = txq->cmd[cmd_index];
626 meta = &txq->meta[cmd_index];
627
628 txq->time_stamp = jiffies;
629
630 pci_unmap_single(priv->pci_dev,
631 dma_unmap_addr(meta, mapping),
632 dma_unmap_len(meta, len),
633 PCI_DMA_BIDIRECTIONAL);
634
635 /* Input error checking is done when commands are added to queue. */
636 if (meta->flags & CMD_WANT_SKB) {
637 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
638 rxb->page = NULL;
639 } else if (meta->callback)
640 meta->callback(priv, cmd, pkt);
641
642 spin_lock_irqsave(&priv->hcmd_lock, flags);
643
644 iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
645
646 if (!(meta->flags & CMD_ASYNC)) {
647 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
648 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
649 iwl_legacy_get_cmd_string(cmd->hdr.cmd));
650 wake_up(&priv->wait_command_queue);
651 }
652
653 /* Mark as unmapped */
654 meta->flags = 0;
655
656 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
657}
658EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d869a546..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/ieee80211_radiotap.h>
48#include <net/mac80211.h>
49
50#include <asm/div64.h>
51
52#define DRV_NAME "iwl3945"
53
54#include "iwl-fh.h"
55#include "iwl-3945-fh.h"
56#include "iwl-commands.h"
57#include "iwl-sta.h"
58#include "iwl-3945.h"
59#include "iwl-core.h"
60#include "iwl-helpers.h"
61#include "iwl-dev.h"
62#include "iwl-spectrum.h"
63
64/*
65 * module name, copyright, version, etc.
66 */
67
68#define DRV_DESCRIPTION \
69"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
70
71#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
72#define VD "d"
73#else
74#define VD
75#endif
76
77/*
78 * add "s" to indicate spectrum measurement included.
79 * we add it here to be consistent with previous releases in which
80 * this was configurable.
81 */
82#define DRV_VERSION IWLWIFI_VERSION VD "s"
83#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
84#define DRV_AUTHOR "<ilw@linux.intel.com>"
85
86MODULE_DESCRIPTION(DRV_DESCRIPTION);
87MODULE_VERSION(DRV_VERSION);
88MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
89MODULE_LICENSE("GPL");
90
91 /* module parameters */
92struct iwl_mod_params iwl3945_mod_params = {
93 .sw_crypto = 1,
94 .restart_fw = 1,
95 .disable_hw_scan = 1,
96 /* the rest are 0 by default */
97};
98
99/**
100 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
101 * @priv: eeprom and antenna fields are used to determine antenna flags
102 *
103 * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
104 * iwl3945_mod_params.antenna specifies the antenna diversity mode:
105 *
106 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
107 * IWL_ANTENNA_MAIN - Force MAIN antenna
108 * IWL_ANTENNA_AUX - Force AUX antenna
109 */
110__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
111{
112 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
113
114 switch (iwl3945_mod_params.antenna) {
115 case IWL_ANTENNA_DIVERSITY:
116 return 0;
117
118 case IWL_ANTENNA_MAIN:
119 if (eeprom->antenna_switch_type)
120 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
121 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
122
123 case IWL_ANTENNA_AUX:
124 if (eeprom->antenna_switch_type)
125 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
126 return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
127 }
128
129 /* bad antenna selector value */
130 IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
131 iwl3945_mod_params.antenna);
132
133 return 0; /* "diversity" is default if error */
134}
135
136static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
137 struct ieee80211_key_conf *keyconf,
138 u8 sta_id)
139{
140 unsigned long flags;
141 __le16 key_flags = 0;
142 int ret;
143
144 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
145 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
146
147 if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
148 key_flags |= STA_KEY_MULTICAST_MSK;
149
150 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152 key_flags &= ~STA_KEY_FLG_INVALID;
153
154 spin_lock_irqsave(&priv->sta_lock, flags);
155 priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
156 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
157 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
158 keyconf->keylen);
159
160 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
161 keyconf->keylen);
162
163 if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
164 == STA_KEY_FLG_NO_ENC)
165 priv->stations[sta_id].sta.key.key_offset =
166 iwl_legacy_get_free_ucode_key_index(priv);
167 /* else, we are overriding an existing key => no need to allocated room
168 * in uCode. */
169
170 WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
171 "no space for a new key");
172
173 priv->stations[sta_id].sta.key.key_flags = key_flags;
174 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
175 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
176
177 IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
178
179 ret = iwl_legacy_send_add_sta(priv,
180 &priv->stations[sta_id].sta, CMD_ASYNC);
181
182 spin_unlock_irqrestore(&priv->sta_lock, flags);
183
184 return ret;
185}
186
187static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
188 struct ieee80211_key_conf *keyconf,
189 u8 sta_id)
190{
191 return -EOPNOTSUPP;
192}
193
194static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
195 struct ieee80211_key_conf *keyconf,
196 u8 sta_id)
197{
198 return -EOPNOTSUPP;
199}
200
201static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
202{
203 unsigned long flags;
204 struct iwl_legacy_addsta_cmd sta_cmd;
205
206 spin_lock_irqsave(&priv->sta_lock, flags);
207 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
208 memset(&priv->stations[sta_id].sta.key, 0,
209 sizeof(struct iwl4965_keyinfo));
210 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
211 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
212 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
213 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
214 spin_unlock_irqrestore(&priv->sta_lock, flags);
215
216 IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
217 return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
218}
219
220static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
221 struct ieee80211_key_conf *keyconf, u8 sta_id)
222{
223 int ret = 0;
224
225 keyconf->hw_key_idx = HW_KEY_DYNAMIC;
226
227 switch (keyconf->cipher) {
228 case WLAN_CIPHER_SUITE_CCMP:
229 ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
230 break;
231 case WLAN_CIPHER_SUITE_TKIP:
232 ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
233 break;
234 case WLAN_CIPHER_SUITE_WEP40:
235 case WLAN_CIPHER_SUITE_WEP104:
236 ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
237 break;
238 default:
239 IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
240 keyconf->cipher);
241 ret = -EINVAL;
242 }
243
244 IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
245 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
246 sta_id, ret);
247
248 return ret;
249}
250
251static int iwl3945_remove_static_key(struct iwl_priv *priv)
252{
253 int ret = -EOPNOTSUPP;
254
255 return ret;
256}
257
258static int iwl3945_set_static_key(struct iwl_priv *priv,
259 struct ieee80211_key_conf *key)
260{
261 if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
262 key->cipher == WLAN_CIPHER_SUITE_WEP104)
263 return -EOPNOTSUPP;
264
265 IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
266 return -EINVAL;
267}
268
269static void iwl3945_clear_free_frames(struct iwl_priv *priv)
270{
271 struct list_head *element;
272
273 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
274 priv->frames_count);
275
276 while (!list_empty(&priv->free_frames)) {
277 element = priv->free_frames.next;
278 list_del(element);
279 kfree(list_entry(element, struct iwl3945_frame, list));
280 priv->frames_count--;
281 }
282
283 if (priv->frames_count) {
284 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
285 priv->frames_count);
286 priv->frames_count = 0;
287 }
288}
289
290static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
291{
292 struct iwl3945_frame *frame;
293 struct list_head *element;
294 if (list_empty(&priv->free_frames)) {
295 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
296 if (!frame) {
297 IWL_ERR(priv, "Could not allocate frame!\n");
298 return NULL;
299 }
300
301 priv->frames_count++;
302 return frame;
303 }
304
305 element = priv->free_frames.next;
306 list_del(element);
307 return list_entry(element, struct iwl3945_frame, list);
308}
309
310static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
311{
312 memset(frame, 0, sizeof(*frame));
313 list_add(&frame->list, &priv->free_frames);
314}
315
316unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
317 struct ieee80211_hdr *hdr,
318 int left)
319{
320
321 if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
322 return 0;
323
324 if (priv->beacon_skb->len > left)
325 return 0;
326
327 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
328
329 return priv->beacon_skb->len;
330}
331
332static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
333{
334 struct iwl3945_frame *frame;
335 unsigned int frame_size;
336 int rc;
337 u8 rate;
338
339 frame = iwl3945_get_free_frame(priv);
340
341 if (!frame) {
342 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
343 "command.\n");
344 return -ENOMEM;
345 }
346
347 rate = iwl_legacy_get_lowest_plcp(priv,
348 &priv->contexts[IWL_RXON_CTX_BSS]);
349
350 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
351
352 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
353 &frame->u.cmd[0]);
354
355 iwl3945_free_frame(priv, frame);
356
357 return rc;
358}
359
360static void iwl3945_unset_hw_params(struct iwl_priv *priv)
361{
362 if (priv->_3945.shared_virt)
363 dma_free_coherent(&priv->pci_dev->dev,
364 sizeof(struct iwl3945_shared),
365 priv->_3945.shared_virt,
366 priv->_3945.shared_phys);
367}
368
369static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
370 struct ieee80211_tx_info *info,
371 struct iwl_device_cmd *cmd,
372 struct sk_buff *skb_frag,
373 int sta_id)
374{
375 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
376 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
377
378 tx_cmd->sec_ctl = 0;
379
380 switch (keyinfo->cipher) {
381 case WLAN_CIPHER_SUITE_CCMP:
382 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
383 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
384 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
385 break;
386
387 case WLAN_CIPHER_SUITE_TKIP:
388 break;
389
390 case WLAN_CIPHER_SUITE_WEP104:
391 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
392 /* fall through */
393 case WLAN_CIPHER_SUITE_WEP40:
394 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
395 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
396
397 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
398
399 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
400 "with key %d\n", info->control.hw_key->hw_key_idx);
401 break;
402
403 default:
404 IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
405 break;
406 }
407}
408
409/*
410 * handle build REPLY_TX command notification.
411 */
412static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
413 struct iwl_device_cmd *cmd,
414 struct ieee80211_tx_info *info,
415 struct ieee80211_hdr *hdr, u8 std_id)
416{
417 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
418 __le32 tx_flags = tx_cmd->tx_flags;
419 __le16 fc = hdr->frame_control;
420
421 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 tx_flags |= TX_CMD_FLG_ACK_MSK;
424 if (ieee80211_is_mgmt(fc))
425 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
426 if (ieee80211_is_probe_resp(fc) &&
427 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
428 tx_flags |= TX_CMD_FLG_TSF_MSK;
429 } else {
430 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
431 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
432 }
433
434 tx_cmd->sta_id = std_id;
435 if (ieee80211_has_morefrags(fc))
436 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
437
438 if (ieee80211_is_data_qos(fc)) {
439 u8 *qc = ieee80211_get_qos_ctl(hdr);
440 tx_cmd->tid_tspec = qc[0] & 0xf;
441 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
442 } else {
443 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
444 }
445
446 iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
447
448 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
449 if (ieee80211_is_mgmt(fc)) {
450 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
451 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
452 else
453 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
454 } else {
455 tx_cmd->timeout.pm_frame_timeout = 0;
456 }
457
458 tx_cmd->driver_txop = 0;
459 tx_cmd->tx_flags = tx_flags;
460 tx_cmd->next_frame_len = 0;
461}
462
463/*
464 * start REPLY_TX command process
465 */
466static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
467{
468 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
470 struct iwl3945_tx_cmd *tx_cmd;
471 struct iwl_tx_queue *txq = NULL;
472 struct iwl_queue *q = NULL;
473 struct iwl_device_cmd *out_cmd;
474 struct iwl_cmd_meta *out_meta;
475 dma_addr_t phys_addr;
476 dma_addr_t txcmd_phys;
477 int txq_id = skb_get_queue_mapping(skb);
478 u16 len, idx, hdr_len;
479 u8 id;
480 u8 unicast;
481 u8 sta_id;
482 u8 tid = 0;
483 __le16 fc;
484 u8 wait_write_ptr = 0;
485 unsigned long flags;
486
487 spin_lock_irqsave(&priv->lock, flags);
488 if (iwl_legacy_is_rfkill(priv)) {
489 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
490 goto drop_unlock;
491 }
492
493 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
494 IWL_ERR(priv, "ERROR: No TX rate available.\n");
495 goto drop_unlock;
496 }
497
498 unicast = !is_multicast_ether_addr(hdr->addr1);
499 id = 0;
500
501 fc = hdr->frame_control;
502
503#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
504 if (ieee80211_is_auth(fc))
505 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
506 else if (ieee80211_is_assoc_req(fc))
507 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
508 else if (ieee80211_is_reassoc_req(fc))
509 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
510#endif
511
512 spin_unlock_irqrestore(&priv->lock, flags);
513
514 hdr_len = ieee80211_hdrlen(fc);
515
516 /* Find index into station table for destination station */
517 sta_id = iwl_legacy_sta_id_or_broadcast(
518 priv, &priv->contexts[IWL_RXON_CTX_BSS],
519 info->control.sta);
520 if (sta_id == IWL_INVALID_STATION) {
521 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
522 hdr->addr1);
523 goto drop;
524 }
525
526 IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
527
528 if (ieee80211_is_data_qos(fc)) {
529 u8 *qc = ieee80211_get_qos_ctl(hdr);
530 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
531 if (unlikely(tid >= MAX_TID_COUNT))
532 goto drop;
533 }
534
535 /* Descriptor for chosen Tx queue */
536 txq = &priv->txq[txq_id];
537 q = &txq->q;
538
539 if ((iwl_legacy_queue_space(q) < q->high_mark))
540 goto drop;
541
542 spin_lock_irqsave(&priv->lock, flags);
543
544 idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
545
546 /* Set up driver data for this TFD */
547 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
548 txq->txb[q->write_ptr].skb = skb;
549 txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
550
551 /* Init first empty entry in queue's array of Tx/cmd buffers */
552 out_cmd = txq->cmd[idx];
553 out_meta = &txq->meta[idx];
554 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
555 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
556 memset(tx_cmd, 0, sizeof(*tx_cmd));
557
558 /*
559 * Set up the Tx-command (not MAC!) header.
560 * Store the chosen Tx queue and TFD index within the sequence field;
561 * after Tx, uCode's Tx response will return this value so driver can
562 * locate the frame within the tx queue and do post-tx processing.
563 */
564 out_cmd->hdr.cmd = REPLY_TX;
565 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
566 INDEX_TO_SEQ(q->write_ptr)));
567
568 /* Copy MAC header from skb into command buffer */
569 memcpy(tx_cmd->hdr, hdr, hdr_len);
570
571
572 if (info->control.hw_key)
573 iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
574
575 /* TODO need this for burst mode later on */
576 iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
577
578 /* set is_hcca to 0; it probably will never be implemented */
579 iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
580
581 /* Total # bytes to be transmitted */
582 len = (u16)skb->len;
583 tx_cmd->len = cpu_to_le16(len);
584
585 iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
586 iwl_legacy_update_stats(priv, true, fc, len);
587 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
588 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
589
590 if (!ieee80211_has_morefrags(hdr->frame_control)) {
591 txq->need_update = 1;
592 } else {
593 wait_write_ptr = 1;
594 txq->need_update = 0;
595 }
596
597 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
598 le16_to_cpu(out_cmd->hdr.sequence));
599 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
600 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
601 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
602 ieee80211_hdrlen(fc));
603
604 /*
605 * Use the first empty entry in this queue's command buffer array
606 * to contain the Tx command and MAC header concatenated together
607 * (payload data will be in another buffer).
608 * Size of this varies, due to varying MAC header length.
609 * If end is not dword aligned, we'll have 2 extra bytes at the end
610 * of the MAC header (device reads on dword boundaries).
611 * We'll tell device about this padding later.
612 */
613 len = sizeof(struct iwl3945_tx_cmd) +
614 sizeof(struct iwl_cmd_header) + hdr_len;
615 len = (len + 3) & ~3;
616
617 /* Physical address of this Tx command's header (not MAC header!),
618 * within command buffer array. */
619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
620 len, PCI_DMA_TODEVICE);
621 /* we do not map meta data ... so we can safely access address to
622 * provide to unmap command*/
623 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
624 dma_unmap_len_set(out_meta, len, len);
625
626 /* Add buffer containing Tx command and MAC(!) header to TFD's
627 * first entry */
628 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
629 txcmd_phys, len, 1, 0);
630
631
632 /* Set up TFD's 2nd entry to point directly to remainder of skb,
633 * if any (802.11 null frames have no payload). */
634 len = skb->len - hdr_len;
635 if (len) {
636 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
637 len, PCI_DMA_TODEVICE);
638 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
639 phys_addr, len,
640 0, U32_PAD(len));
641 }
642
643
644 /* Tell device the write index *just past* this latest filled TFD */
645 q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
646 iwl_legacy_txq_update_write_ptr(priv, txq);
647 spin_unlock_irqrestore(&priv->lock, flags);
648
649 if ((iwl_legacy_queue_space(q) < q->high_mark)
650 && priv->mac80211_registered) {
651 if (wait_write_ptr) {
652 spin_lock_irqsave(&priv->lock, flags);
653 txq->need_update = 1;
654 iwl_legacy_txq_update_write_ptr(priv, txq);
655 spin_unlock_irqrestore(&priv->lock, flags);
656 }
657
658 iwl_legacy_stop_queue(priv, txq);
659 }
660
661 return 0;
662
663drop_unlock:
664 spin_unlock_irqrestore(&priv->lock, flags);
665drop:
666 return -1;
667}
668
669static int iwl3945_get_measurement(struct iwl_priv *priv,
670 struct ieee80211_measurement_params *params,
671 u8 type)
672{
673 struct iwl_spectrum_cmd spectrum;
674 struct iwl_rx_packet *pkt;
675 struct iwl_host_cmd cmd = {
676 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
677 .data = (void *)&spectrum,
678 .flags = CMD_WANT_SKB,
679 };
680 u32 add_time = le64_to_cpu(params->start_time);
681 int rc;
682 int spectrum_resp_status;
683 int duration = le16_to_cpu(params->duration);
684 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
685
686 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
687 add_time = iwl_legacy_usecs_to_beacons(priv,
688 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
689 le16_to_cpu(ctx->timing.beacon_interval));
690
691 memset(&spectrum, 0, sizeof(spectrum));
692
693 spectrum.channel_count = cpu_to_le16(1);
694 spectrum.flags =
695 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
696 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
697 cmd.len = sizeof(spectrum);
698 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
699
700 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
701 spectrum.start_time =
702 iwl_legacy_add_beacon_time(priv,
703 priv->_3945.last_beacon_time, add_time,
704 le16_to_cpu(ctx->timing.beacon_interval));
705 else
706 spectrum.start_time = 0;
707
708 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
709 spectrum.channels[0].channel = params->channel;
710 spectrum.channels[0].type = type;
711 if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
712 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
713 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
714
715 rc = iwl_legacy_send_cmd_sync(priv, &cmd);
716 if (rc)
717 return rc;
718
719 pkt = (struct iwl_rx_packet *)cmd.reply_page;
720 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
721 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
722 rc = -EIO;
723 }
724
725 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
726 switch (spectrum_resp_status) {
727 case 0: /* Command will be handled */
728 if (pkt->u.spectrum.id != 0xff) {
729 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
730 pkt->u.spectrum.id);
731 priv->measurement_status &= ~MEASUREMENT_READY;
732 }
733 priv->measurement_status |= MEASUREMENT_ACTIVE;
734 rc = 0;
735 break;
736
737 case 1: /* Command will not be handled */
738 rc = -EAGAIN;
739 break;
740 }
741
742 iwl_legacy_free_pages(priv, cmd.reply_page);
743
744 return rc;
745}
746
747static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
748 struct iwl_rx_mem_buffer *rxb)
749{
750 struct iwl_rx_packet *pkt = rxb_addr(rxb);
751 struct iwl_alive_resp *palive;
752 struct delayed_work *pwork;
753
754 palive = &pkt->u.alive_frame;
755
756 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
757 "0x%01X 0x%01X\n",
758 palive->is_valid, palive->ver_type,
759 palive->ver_subtype);
760
761 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
762 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
763 memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
764 sizeof(struct iwl_alive_resp));
765 pwork = &priv->init_alive_start;
766 } else {
767 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
768 memcpy(&priv->card_alive, &pkt->u.alive_frame,
769 sizeof(struct iwl_alive_resp));
770 pwork = &priv->alive_start;
771 iwl3945_disable_events(priv);
772 }
773
774 /* We delay the ALIVE response by 5ms to
775 * give the HW RF Kill time to activate... */
776 if (palive->is_valid == UCODE_VALID_OK)
777 queue_delayed_work(priv->workqueue, pwork,
778 msecs_to_jiffies(5));
779 else
780 IWL_WARN(priv, "uCode did not respond OK.\n");
781}
782
783static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
784 struct iwl_rx_mem_buffer *rxb)
785{
786#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
787 struct iwl_rx_packet *pkt = rxb_addr(rxb);
788#endif
789
790 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
791}
792
793static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
794 struct iwl_rx_mem_buffer *rxb)
795{
796 struct iwl_rx_packet *pkt = rxb_addr(rxb);
797 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
798#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
799 u8 rate = beacon->beacon_notify_hdr.rate;
800
801 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
802 "tsf %d %d rate %d\n",
803 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
804 beacon->beacon_notify_hdr.failure_frame,
805 le32_to_cpu(beacon->ibss_mgr_status),
806 le32_to_cpu(beacon->high_tsf),
807 le32_to_cpu(beacon->low_tsf), rate);
808#endif
809
810 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
811
812}
813
814/* Handle notification from uCode that card's power state is changing
815 * due to software, hardware, or critical temperature RFKILL */
816static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
817 struct iwl_rx_mem_buffer *rxb)
818{
819 struct iwl_rx_packet *pkt = rxb_addr(rxb);
820 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
821 unsigned long status = priv->status;
822
823 IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
824 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
825 (flags & SW_CARD_DISABLED) ? "Kill" : "On");
826
827 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
828 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
829
830 if (flags & HW_CARD_DISABLED)
831 set_bit(STATUS_RF_KILL_HW, &priv->status);
832 else
833 clear_bit(STATUS_RF_KILL_HW, &priv->status);
834
835
836 iwl_legacy_scan_cancel(priv);
837
838 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
839 test_bit(STATUS_RF_KILL_HW, &priv->status)))
840 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
841 test_bit(STATUS_RF_KILL_HW, &priv->status));
842 else
843 wake_up(&priv->wait_command_queue);
844}
845
846/**
847 * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
848 *
849 * Setup the RX handlers for each of the reply types sent from the uCode
850 * to the host.
851 *
852 * This function chains into the hardware specific files for them to setup
853 * any hardware specific handlers as well.
854 */
855static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
856{
857 priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
858 priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
859 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
860 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
861 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
862 iwl_legacy_rx_spectrum_measure_notif;
863 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
864 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
865 iwl_legacy_rx_pm_debug_statistics_notif;
866 priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
867
868 /*
869 * The same handler is used for both the REPLY to a discrete
870 * statistics request from the host as well as for the periodic
871 * statistics notifications (after received beacons) from the uCode.
872 */
873 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
874 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
875
876 iwl_legacy_setup_rx_scan_handlers(priv);
877 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
878
879 /* Set up hardware specific Rx handlers */
880 iwl3945_hw_rx_handler_setup(priv);
881}
882
883/************************** RX-FUNCTIONS ****************************/
884/*
885 * Rx theory of operation
886 *
887 * The host allocates 32 DMA target addresses and passes the host address
888 * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
889 * 0 to 31
890 *
891 * Rx Queue Indexes
892 * The host/firmware share two index registers for managing the Rx buffers.
893 *
894 * The READ index maps to the first position that the firmware may be writing
895 * to -- the driver can read up to (but not including) this position and get
896 * good data.
897 * The READ index is managed by the firmware once the card is enabled.
898 *
899 * The WRITE index maps to the last position the driver has read from -- the
900 * position preceding WRITE is the last slot the firmware can place a packet.
901 *
902 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
903 * WRITE = READ.
904 *
905 * During initialization, the host sets up the READ queue position to the first
906 * INDEX position, and WRITE to the last (READ - 1 wrapped)
907 *
908 * When the firmware places a packet in a buffer, it will advance the READ index
909 * and fire the RX interrupt. The driver can then query the READ index and
910 * process as many packets as possible, moving the WRITE index forward as it
911 * resets the Rx queue buffers with new memory.
912 *
913 * The management in the driver is as follows:
914 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
915 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
916 * to replenish the iwl->rxq->rx_free.
917 * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
918 * iwl->rxq is replenished and the READ INDEX is updated (updating the
919 * 'processed' and 'read' driver indexes as well)
920 * + A received packet is processed and handed to the kernel network stack,
921 * detached from the iwl->rxq. The driver 'processed' index is updated.
922 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
923 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
924 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
925 * were enough free buffers and RX_STALLED is set it is cleared.
926 *
927 *
928 * Driver sequence:
929 *
930 * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
931 * iwl3945_rx_queue_restock
932 * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
933 * queue, updates firmware pointers, and updates
934 * the WRITE index. If insufficient rx_free buffers
935 * are available, schedules iwl3945_rx_replenish
936 *
937 * -- enable interrupts --
938 * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
939 * READ INDEX, detaching the SKB from the pool.
940 * Moves the packet buffer from queue to rx_used.
941 * Calls iwl3945_rx_queue_restock to refill any empty
942 * slots.
943 * ...
944 *
945 */
946
947/**
948 * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
949 */
950static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
951 dma_addr_t dma_addr)
952{
953 return cpu_to_le32((u32)dma_addr);
954}
955
956/**
957 * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
958 *
959 * If there are slots in the RX queue that need to be restocked,
960 * and we have free pre-allocated buffers, fill the ranks as much
961 * as we can, pulling from rx_free.
962 *
963 * This moves the 'write' index forward to catch up with 'processed', and
964 * also updates the memory address in the firmware to reference the new
965 * target buffer.
966 */
967static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
968{
969 struct iwl_rx_queue *rxq = &priv->rxq;
970 struct list_head *element;
971 struct iwl_rx_mem_buffer *rxb;
972 unsigned long flags;
973 int write;
974
975 spin_lock_irqsave(&rxq->lock, flags);
976 write = rxq->write & ~0x7;
977 while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
978 /* Get next free Rx buffer, remove from free list */
979 element = rxq->rx_free.next;
980 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
981 list_del(element);
982
983 /* Point to Rx buffer via next RBD in circular buffer */
984 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
985 rxq->queue[rxq->write] = rxb;
986 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
987 rxq->free_count--;
988 }
989 spin_unlock_irqrestore(&rxq->lock, flags);
990 /* If the pre-allocated buffer pool is dropping low, schedule to
991 * refill it */
992 if (rxq->free_count <= RX_LOW_WATERMARK)
993 queue_work(priv->workqueue, &priv->rx_replenish);
994
995
996 /* If we've added more space for the firmware to place data, tell it.
997 * Increment device's write pointer in multiples of 8. */
998 if ((rxq->write_actual != (rxq->write & ~0x7))
999 || (abs(rxq->write - rxq->read) > 7)) {
1000 spin_lock_irqsave(&rxq->lock, flags);
1001 rxq->need_update = 1;
1002 spin_unlock_irqrestore(&rxq->lock, flags);
1003 iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
1004 }
1005}
1006
1007/**
1008 * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
1009 *
1010 * When moving to rx_free an SKB is allocated for the slot.
1011 *
1012 * Also restock the Rx queue via iwl3945_rx_queue_restock.
1013 * This is called as a scheduled work item (except for during initialization)
1014 */
1015static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1016{
1017 struct iwl_rx_queue *rxq = &priv->rxq;
1018 struct list_head *element;
1019 struct iwl_rx_mem_buffer *rxb;
1020 struct page *page;
1021 unsigned long flags;
1022 gfp_t gfp_mask = priority;
1023
1024 while (1) {
1025 spin_lock_irqsave(&rxq->lock, flags);
1026
1027 if (list_empty(&rxq->rx_used)) {
1028 spin_unlock_irqrestore(&rxq->lock, flags);
1029 return;
1030 }
1031 spin_unlock_irqrestore(&rxq->lock, flags);
1032
1033 if (rxq->free_count > RX_LOW_WATERMARK)
1034 gfp_mask |= __GFP_NOWARN;
1035
1036 if (priv->hw_params.rx_page_order > 0)
1037 gfp_mask |= __GFP_COMP;
1038
1039 /* Alloc a new receive buffer */
1040 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1041 if (!page) {
1042 if (net_ratelimit())
1043 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1044 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
1045 net_ratelimit())
1046 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
1047 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
1048 rxq->free_count);
1049 /* We don't reschedule replenish work here -- we will
1050 * call the restock method and if it still needs
1051 * more buffers it will schedule replenish */
1052 break;
1053 }
1054
1055 spin_lock_irqsave(&rxq->lock, flags);
1056 if (list_empty(&rxq->rx_used)) {
1057 spin_unlock_irqrestore(&rxq->lock, flags);
1058 __free_pages(page, priv->hw_params.rx_page_order);
1059 return;
1060 }
1061 element = rxq->rx_used.next;
1062 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
1063 list_del(element);
1064 spin_unlock_irqrestore(&rxq->lock, flags);
1065
1066 rxb->page = page;
1067 /* Get physical address of RB/SKB */
1068 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1069 PAGE_SIZE << priv->hw_params.rx_page_order,
1070 PCI_DMA_FROMDEVICE);
1071
1072 spin_lock_irqsave(&rxq->lock, flags);
1073
1074 list_add_tail(&rxb->list, &rxq->rx_free);
1075 rxq->free_count++;
1076 priv->alloc_rxb_page++;
1077
1078 spin_unlock_irqrestore(&rxq->lock, flags);
1079 }
1080}
1081
1082void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1083{
1084 unsigned long flags;
1085 int i;
1086 spin_lock_irqsave(&rxq->lock, flags);
1087 INIT_LIST_HEAD(&rxq->rx_free);
1088 INIT_LIST_HEAD(&rxq->rx_used);
1089 /* Fill the rx_used queue with _all_ of the Rx buffers */
1090 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1091 /* In the reset function, these buffers may have been allocated
1092 * to an SKB, so we need to unmap and free potential storage */
1093 if (rxq->pool[i].page != NULL) {
1094 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1095 PAGE_SIZE << priv->hw_params.rx_page_order,
1096 PCI_DMA_FROMDEVICE);
1097 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1098 rxq->pool[i].page = NULL;
1099 }
1100 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1101 }
1102
1103 /* Set us so that we have processed and used all buffers, but have
1104 * not restocked the Rx queue with fresh buffers */
1105 rxq->read = rxq->write = 0;
1106 rxq->write_actual = 0;
1107 rxq->free_count = 0;
1108 spin_unlock_irqrestore(&rxq->lock, flags);
1109}
1110
1111void iwl3945_rx_replenish(void *data)
1112{
1113 struct iwl_priv *priv = data;
1114 unsigned long flags;
1115
1116 iwl3945_rx_allocate(priv, GFP_KERNEL);
1117
1118 spin_lock_irqsave(&priv->lock, flags);
1119 iwl3945_rx_queue_restock(priv);
1120 spin_unlock_irqrestore(&priv->lock, flags);
1121}
1122
1123static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
1124{
1125 iwl3945_rx_allocate(priv, GFP_ATOMIC);
1126
1127 iwl3945_rx_queue_restock(priv);
1128}
1129
1130
1131/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
1132 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
1133 * This free routine walks the list of POOL entries and if SKB is set to
1134 * non NULL it is unmapped and freed
1135 */
1136static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1137{
1138 int i;
1139 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1140 if (rxq->pool[i].page != NULL) {
1141 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1142 PAGE_SIZE << priv->hw_params.rx_page_order,
1143 PCI_DMA_FROMDEVICE);
1144 __iwl_legacy_free_pages(priv, rxq->pool[i].page);
1145 rxq->pool[i].page = NULL;
1146 }
1147 }
1148
1149 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
1150 rxq->bd_dma);
1151 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
1152 rxq->rb_stts, rxq->rb_stts_dma);
1153 rxq->bd = NULL;
1154 rxq->rb_stts = NULL;
1155}
1156
1157
1158/* Convert linear signal-to-noise ratio into dB */
1159static u8 ratio2dB[100] = {
1160/* 0 1 2 3 4 5 6 7 8 9 */
1161 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
1162 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
1163 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
1164 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
1165 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
1166 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
1167 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
1168 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
1169 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
1170 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
1171};
1172
1173/* Calculates a relative dB value from a ratio of linear
1174 * (i.e. not dB) signal levels.
1175 * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
1176int iwl3945_calc_db_from_ratio(int sig_ratio)
1177{
1178 /* 1000:1 or higher just report as 60 dB */
1179 if (sig_ratio >= 1000)
1180 return 60;
1181
1182 /* 100:1 or higher, divide by 10 and use table,
1183 * add 20 dB to make up for divide by 10 */
1184 if (sig_ratio >= 100)
1185 return 20 + (int)ratio2dB[sig_ratio/10];
1186
1187 /* We shouldn't see this */
1188 if (sig_ratio < 1)
1189 return 0;
1190
1191 /* Use table for ratios 1:1 - 99:1 */
1192 return (int)ratio2dB[sig_ratio];
1193}
1194
1195/**
1196 * iwl3945_rx_handle - Main entry function for receiving responses from uCode
1197 *
1198 * Uses the priv->rx_handlers callback function array to invoke
1199 * the appropriate handlers, including command responses,
1200 * frame-received notifications, and other notifications.
1201 */
1202static void iwl3945_rx_handle(struct iwl_priv *priv)
1203{
1204 struct iwl_rx_mem_buffer *rxb;
1205 struct iwl_rx_packet *pkt;
1206 struct iwl_rx_queue *rxq = &priv->rxq;
1207 u32 r, i;
1208 int reclaim;
1209 unsigned long flags;
1210 u8 fill_rx = 0;
1211 u32 count = 8;
1212 int total_empty = 0;
1213
1214 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1215 * buffer that the driver may process (last buffer filled by ucode). */
1216 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1217 i = rxq->read;
1218
1219 /* calculate total frames need to be restock after handling RX */
1220 total_empty = r - rxq->write_actual;
1221 if (total_empty < 0)
1222 total_empty += RX_QUEUE_SIZE;
1223
1224 if (total_empty > (RX_QUEUE_SIZE / 2))
1225 fill_rx = 1;
1226 /* Rx interrupt, but nothing sent from uCode */
1227 if (i == r)
1228 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
1229
1230 while (i != r) {
1231 int len;
1232
1233 rxb = rxq->queue[i];
1234
1235 /* If an RXB doesn't have a Rx queue slot associated with it,
1236 * then a bug has been introduced in the queue refilling
1237 * routines -- catch it here */
1238 BUG_ON(rxb == NULL);
1239
1240 rxq->queue[i] = NULL;
1241
1242 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1243 PAGE_SIZE << priv->hw_params.rx_page_order,
1244 PCI_DMA_FROMDEVICE);
1245 pkt = rxb_addr(rxb);
1246
1247 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
1248 len += sizeof(u32); /* account for status word */
1249 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
1250
1251 /* Reclaim a command buffer only if this packet is a response
1252 * to a (driver-originated) command.
1253 * If the packet (e.g. Rx frame) originated from uCode,
1254 * there is no command buffer to reclaim.
1255 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1256 * but apparently a few don't get set; catch them here. */
1257 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1258 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1259 (pkt->hdr.cmd != REPLY_TX);
1260
1261 /* Based on type of command response or notification,
1262 * handle those that need handling via function in
1263 * rx_handlers table. See iwl3945_setup_rx_handlers() */
1264 if (priv->rx_handlers[pkt->hdr.cmd]) {
1265 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1266 iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1267 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1268 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1269 } else {
1270 /* No handling needed */
1271 IWL_DEBUG_RX(priv,
1272 "r %d i %d No handler needed for %s, 0x%02x\n",
1273 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
1274 pkt->hdr.cmd);
1275 }
1276
1277 /*
1278 * XXX: After here, we should always check rxb->page
1279 * against NULL before touching it or its virtual
1280 * memory (pkt). Because some rx_handler might have
1281 * already taken or freed the pages.
1282 */
1283
1284 if (reclaim) {
1285 /* Invoke any callbacks, transfer the buffer to caller,
1286 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
1287 * as we reclaim the driver command queue */
1288 if (rxb->page)
1289 iwl_legacy_tx_cmd_complete(priv, rxb);
1290 else
1291 IWL_WARN(priv, "Claim null rxb?\n");
1292 }
1293
1294 /* Reuse the page if possible. For notification packets and
1295 * SKBs that fail to Rx correctly, add them back into the
1296 * rx_free list for reuse later. */
1297 spin_lock_irqsave(&rxq->lock, flags);
1298 if (rxb->page != NULL) {
1299 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1300 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1301 PCI_DMA_FROMDEVICE);
1302 list_add_tail(&rxb->list, &rxq->rx_free);
1303 rxq->free_count++;
1304 } else
1305 list_add_tail(&rxb->list, &rxq->rx_used);
1306
1307 spin_unlock_irqrestore(&rxq->lock, flags);
1308
1309 i = (i + 1) & RX_QUEUE_MASK;
1310 /* If there are a lot of unused frames,
1311 * restock the Rx queue so ucode won't assert. */
1312 if (fill_rx) {
1313 count++;
1314 if (count >= 8) {
1315 rxq->read = i;
1316 iwl3945_rx_replenish_now(priv);
1317 count = 0;
1318 }
1319 }
1320 }
1321
1322 /* Backtrack one entry */
1323 rxq->read = i;
1324 if (fill_rx)
1325 iwl3945_rx_replenish_now(priv);
1326 else
1327 iwl3945_rx_queue_restock(priv);
1328}
1329
1330/* call this function to flush any scheduled tasklet */
1331static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
1332{
1333 /* wait to make sure we flush pending tasklet*/
1334 synchronize_irq(priv->pci_dev->irq);
1335 tasklet_kill(&priv->irq_tasklet);
1336}
1337
1338static const char *iwl3945_desc_lookup(int i)
1339{
1340 switch (i) {
1341 case 1:
1342 return "FAIL";
1343 case 2:
1344 return "BAD_PARAM";
1345 case 3:
1346 return "BAD_CHECKSUM";
1347 case 4:
1348 return "NMI_INTERRUPT";
1349 case 5:
1350 return "SYSASSERT";
1351 case 6:
1352 return "FATAL_ERROR";
1353 }
1354
1355 return "UNKNOWN";
1356}
1357
1358#define ERROR_START_OFFSET (1 * sizeof(u32))
1359#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1360
1361void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1362{
1363 u32 i;
1364 u32 desc, time, count, base, data1;
1365 u32 blink1, blink2, ilink1, ilink2;
1366
1367 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1368
1369 if (!iwl3945_hw_valid_rtc_data_addr(base)) {
1370 IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
1371 return;
1372 }
1373
1374
1375 count = iwl_legacy_read_targ_mem(priv, base);
1376
1377 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1378 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1379 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1380 priv->status, count);
1381 }
1382
1383 IWL_ERR(priv, "Desc Time asrtPC blink2 "
1384 "ilink1 nmiPC Line\n");
1385 for (i = ERROR_START_OFFSET;
1386 i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
1387 i += ERROR_ELEM_SIZE) {
1388 desc = iwl_legacy_read_targ_mem(priv, base + i);
1389 time =
1390 iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
1391 blink1 =
1392 iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
1393 blink2 =
1394 iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
1395 ilink1 =
1396 iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
1397 ilink2 =
1398 iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
1399 data1 =
1400 iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
1401
1402 IWL_ERR(priv,
1403 "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1404 iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
1405 ilink1, ilink2, data1);
1406 trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
1407 0, blink1, blink2, ilink1, ilink2);
1408 }
1409}
1410
1411static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1412{
1413 u32 inta, handled = 0;
1414 u32 inta_fh;
1415 unsigned long flags;
1416#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1417 u32 inta_mask;
1418#endif
1419
1420 spin_lock_irqsave(&priv->lock, flags);
1421
1422 /* Ack/clear/reset pending uCode interrupts.
1423 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1424 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
1425 inta = iwl_read32(priv, CSR_INT);
1426 iwl_write32(priv, CSR_INT, inta);
1427
1428 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
1429 * Any new interrupts that happen after this, either while we're
1430 * in this tasklet, or later, will show up in next ISR/tasklet. */
1431 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1432 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
1433
1434#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1435 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
1436 /* just for debug */
1437 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1438 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1439 inta, inta_mask, inta_fh);
1440 }
1441#endif
1442
1443 spin_unlock_irqrestore(&priv->lock, flags);
1444
1445 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1446 * atomic, make sure that inta covers all the interrupts that
1447 * we've discovered, even if FH interrupt came in just after
1448 * reading CSR_INT. */
1449 if (inta_fh & CSR39_FH_INT_RX_MASK)
1450 inta |= CSR_INT_BIT_FH_RX;
1451 if (inta_fh & CSR39_FH_INT_TX_MASK)
1452 inta |= CSR_INT_BIT_FH_TX;
1453
1454 /* Now service all interrupt bits discovered above. */
1455 if (inta & CSR_INT_BIT_HW_ERR) {
1456 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
1457
1458 /* Tell the device to stop sending interrupts */
1459 iwl_legacy_disable_interrupts(priv);
1460
1461 priv->isr_stats.hw++;
1462 iwl_legacy_irq_handle_error(priv);
1463
1464 handled |= CSR_INT_BIT_HW_ERR;
1465
1466 return;
1467 }
1468
1469#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1470 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1471 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1472 if (inta & CSR_INT_BIT_SCD) {
1473 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
1474 "the frame/frames.\n");
1475 priv->isr_stats.sch++;
1476 }
1477
1478 /* Alive notification via Rx interrupt will do the real work */
1479 if (inta & CSR_INT_BIT_ALIVE) {
1480 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
1481 priv->isr_stats.alive++;
1482 }
1483 }
1484#endif
1485 /* Safely ignore these bits for debug checks below */
1486 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1487
1488 /* Error detected by uCode */
1489 if (inta & CSR_INT_BIT_SW_ERR) {
1490 IWL_ERR(priv, "Microcode SW error detected. "
1491 "Restarting 0x%X.\n", inta);
1492 priv->isr_stats.sw++;
1493 iwl_legacy_irq_handle_error(priv);
1494 handled |= CSR_INT_BIT_SW_ERR;
1495 }
1496
1497 /* uCode wakes up after power-down sleep */
1498 if (inta & CSR_INT_BIT_WAKEUP) {
1499 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1500 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
1501 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
1502 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
1503 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
1504 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
1505 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
1506 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
1507
1508 priv->isr_stats.wakeup++;
1509 handled |= CSR_INT_BIT_WAKEUP;
1510 }
1511
1512 /* All uCode command responses, including Tx command responses,
1513 * Rx "responses" (frame-received notification), and other
1514 * notifications from uCode come through here*/
1515 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1516 iwl3945_rx_handle(priv);
1517 priv->isr_stats.rx++;
1518 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1519 }
1520
1521 if (inta & CSR_INT_BIT_FH_TX) {
1522 IWL_DEBUG_ISR(priv, "Tx interrupt\n");
1523 priv->isr_stats.tx++;
1524
1525 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
1526 iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
1527 (FH39_SRVC_CHNL), 0x0);
1528 handled |= CSR_INT_BIT_FH_TX;
1529 }
1530
1531 if (inta & ~handled) {
1532 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1533 priv->isr_stats.unhandled++;
1534 }
1535
1536 if (inta & ~priv->inta_mask) {
1537 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1538 inta & ~priv->inta_mask);
1539 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
1540 }
1541
1542 /* Re-enable all interrupts */
1543 /* only Re-enable if disabled by irq */
1544 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1545 iwl_legacy_enable_interrupts(priv);
1546
1547#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1548 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
1549 inta = iwl_read32(priv, CSR_INT);
1550 inta_mask = iwl_read32(priv, CSR_INT_MASK);
1551 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1552 IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1553 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1554 }
1555#endif
1556}
1557
1558static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1559 enum ieee80211_band band,
1560 u8 is_active, u8 n_probes,
1561 struct iwl3945_scan_channel *scan_ch,
1562 struct ieee80211_vif *vif)
1563{
1564 struct ieee80211_channel *chan;
1565 const struct ieee80211_supported_band *sband;
1566 const struct iwl_channel_info *ch_info;
1567 u16 passive_dwell = 0;
1568 u16 active_dwell = 0;
1569 int added, i;
1570
1571 sband = iwl_get_hw_mode(priv, band);
1572 if (!sband)
1573 return 0;
1574
1575 active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
1576 passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
1577
1578 if (passive_dwell <= active_dwell)
1579 passive_dwell = active_dwell + 1;
1580
1581 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1582 chan = priv->scan_request->channels[i];
1583
1584 if (chan->band != band)
1585 continue;
1586
1587 scan_ch->channel = chan->hw_value;
1588
1589 ch_info = iwl_legacy_get_channel_info(priv, band,
1590 scan_ch->channel);
1591 if (!iwl_legacy_is_channel_valid(ch_info)) {
1592 IWL_DEBUG_SCAN(priv,
1593 "Channel %d is INVALID for this band.\n",
1594 scan_ch->channel);
1595 continue;
1596 }
1597
1598 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1599 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1600 /* If passive , set up for auto-switch
1601 * and use long active_dwell time.
1602 */
1603 if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
1604 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
1605 scan_ch->type = 0; /* passive */
1606 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1607 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
1608 } else {
1609 scan_ch->type = 1; /* active */
1610 }
1611
1612 /* Set direct probe bits. These may be used both for active
1613 * scan channels (probes gets sent right away),
1614 * or for passive channels (probes get se sent only after
1615 * hearing clear Rx packet).*/
1616 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
1617 if (n_probes)
1618 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1619 } else {
1620 /* uCode v1 does not allow setting direct probe bits on
1621 * passive channel. */
1622 if ((scan_ch->type & 1) && n_probes)
1623 scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
1624 }
1625
1626 /* Set txpower levels to defaults */
1627 scan_ch->tpc.dsp_atten = 110;
1628 /* scan_pwr_info->tpc.dsp_atten; */
1629
1630 /*scan_pwr_info->tpc.tx_gain; */
1631 if (band == IEEE80211_BAND_5GHZ)
1632 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
1633 else {
1634 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
1635 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1636 * power level:
1637 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
1638 */
1639 }
1640
1641 IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
1642 scan_ch->channel,
1643 (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
1644 (scan_ch->type & 1) ?
1645 active_dwell : passive_dwell);
1646
1647 scan_ch++;
1648 added++;
1649 }
1650
1651 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1652 return added;
1653}
1654
1655static void iwl3945_init_hw_rates(struct iwl_priv *priv,
1656 struct ieee80211_rate *rates)
1657{
1658 int i;
1659
1660 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
1661 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
1662 rates[i].hw_value = i; /* Rate scaling will work on indexes */
1663 rates[i].hw_value_short = i;
1664 rates[i].flags = 0;
1665 if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
1666 /*
1667 * If CCK != 1M then set short preamble rate flag.
1668 */
1669 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
1670 0 : IEEE80211_RATE_SHORT_PREAMBLE;
1671 }
1672 }
1673}
1674
1675/******************************************************************************
1676 *
1677 * uCode download functions
1678 *
1679 ******************************************************************************/
1680
1681static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
1682{
1683 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1684 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1685 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1686 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1687 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1688 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1689}
1690
1691/**
1692 * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
1693 * looking at all data.
1694 */
1695static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
1696{
1697 u32 val;
1698 u32 save_len = len;
1699 int rc = 0;
1700 u32 errcnt;
1701
1702 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1703
1704 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1705 IWL39_RTC_INST_LOWER_BOUND);
1706
1707 errcnt = 0;
1708 for (; len > 0; len -= sizeof(u32), image++) {
1709 /* read data comes through single port, auto-incr addr */
1710 /* NOTE: Use the debugless read so we don't flood kernel log
1711 * if IWL_DL_IO is set */
1712 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1713 if (val != le32_to_cpu(*image)) {
1714 IWL_ERR(priv, "uCode INST section is invalid at "
1715 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1716 save_len - len, val, le32_to_cpu(*image));
1717 rc = -EIO;
1718 errcnt++;
1719 if (errcnt >= 20)
1720 break;
1721 }
1722 }
1723
1724
1725 if (!errcnt)
1726 IWL_DEBUG_INFO(priv,
1727 "ucode image in INSTRUCTION memory is good\n");
1728
1729 return rc;
1730}
1731
1732
1733/**
1734 * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
1735 * using sample data 100 bytes apart. If these sample points are good,
1736 * it's a pretty good bet that everything between them is good, too.
1737 */
1738static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
1739{
1740 u32 val;
1741 int rc = 0;
1742 u32 errcnt = 0;
1743 u32 i;
1744
1745 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
1746
1747 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
1748 /* read data comes through single port, auto-incr addr */
1749 /* NOTE: Use the debugless read so we don't flood kernel log
1750 * if IWL_DL_IO is set */
1751 iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
1752 i + IWL39_RTC_INST_LOWER_BOUND);
1753 val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1754 if (val != le32_to_cpu(*image)) {
1755#if 0 /* Enable this if you want to see details */
1756 IWL_ERR(priv, "uCode INST section is invalid at "
1757 "offset 0x%x, is 0x%x, s/b 0x%x\n",
1758 i, val, *image);
1759#endif
1760 rc = -EIO;
1761 errcnt++;
1762 if (errcnt >= 3)
1763 break;
1764 }
1765 }
1766
1767 return rc;
1768}
1769
1770
1771/**
1772 * iwl3945_verify_ucode - determine which instruction image is in SRAM,
1773 * and verify its contents
1774 */
1775static int iwl3945_verify_ucode(struct iwl_priv *priv)
1776{
1777 __le32 *image;
1778 u32 len;
1779 int rc = 0;
1780
1781 /* Try bootstrap */
1782 image = (__le32 *)priv->ucode_boot.v_addr;
1783 len = priv->ucode_boot.len;
1784 rc = iwl3945_verify_inst_sparse(priv, image, len);
1785 if (rc == 0) {
1786 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
1787 return 0;
1788 }
1789
1790 /* Try initialize */
1791 image = (__le32 *)priv->ucode_init.v_addr;
1792 len = priv->ucode_init.len;
1793 rc = iwl3945_verify_inst_sparse(priv, image, len);
1794 if (rc == 0) {
1795 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
1796 return 0;
1797 }
1798
1799 /* Try runtime/protocol */
1800 image = (__le32 *)priv->ucode_code.v_addr;
1801 len = priv->ucode_code.len;
1802 rc = iwl3945_verify_inst_sparse(priv, image, len);
1803 if (rc == 0) {
1804 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
1805 return 0;
1806 }
1807
1808 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
1809
1810 /* Since nothing seems to match, show first several data entries in
1811 * instruction SRAM, so maybe visual inspection will give a clue.
1812 * Selection of bootstrap image (vs. other images) is arbitrary. */
1813 image = (__le32 *)priv->ucode_boot.v_addr;
1814 len = priv->ucode_boot.len;
1815 rc = iwl3945_verify_inst_full(priv, image, len);
1816
1817 return rc;
1818}
1819
1820static void iwl3945_nic_start(struct iwl_priv *priv)
1821{
1822 /* Remove all resets to allow NIC to operate */
1823 iwl_write32(priv, CSR_RESET, 0);
1824}
1825
1826#define IWL3945_UCODE_GET(item) \
1827static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
1828{ \
1829 return le32_to_cpu(ucode->v1.item); \
1830}
1831
1832static u32 iwl3945_ucode_get_header_size(u32 api_ver)
1833{
1834 return 24;
1835}
1836
1837static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
1838{
1839 return (u8 *) ucode->v1.data;
1840}
1841
1842IWL3945_UCODE_GET(inst_size);
1843IWL3945_UCODE_GET(data_size);
1844IWL3945_UCODE_GET(init_size);
1845IWL3945_UCODE_GET(init_data_size);
1846IWL3945_UCODE_GET(boot_size);
1847
1848/**
1849 * iwl3945_read_ucode - Read uCode images from disk file.
1850 *
1851 * Copy into buffers for card to fetch via bus-mastering
1852 */
1853static int iwl3945_read_ucode(struct iwl_priv *priv)
1854{
1855 const struct iwl_ucode_header *ucode;
1856 int ret = -EINVAL, index;
1857 const struct firmware *ucode_raw;
1858 /* firmware file name contains uCode/driver compatibility version */
1859 const char *name_pre = priv->cfg->fw_name_pre;
1860 const unsigned int api_max = priv->cfg->ucode_api_max;
1861 const unsigned int api_min = priv->cfg->ucode_api_min;
1862 char buf[25];
1863 u8 *src;
1864 size_t len;
1865 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1866
1867 /* Ask kernel firmware_class module to get the boot firmware off disk.
1868 * request_firmware() is synchronous, file is in memory on return. */
1869 for (index = api_max; index >= api_min; index--) {
1870 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
1871 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1872 if (ret < 0) {
1873 IWL_ERR(priv, "%s firmware file req failed: %d\n",
1874 buf, ret);
1875 if (ret == -ENOENT)
1876 continue;
1877 else
1878 goto error;
1879 } else {
1880 if (index < api_max)
1881 IWL_ERR(priv, "Loaded firmware %s, "
1882 "which is deprecated. "
1883 " Please use API v%u instead.\n",
1884 buf, api_max);
1885 IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
1886 "(%zd bytes) from disk\n",
1887 buf, ucode_raw->size);
1888 break;
1889 }
1890 }
1891
1892 if (ret < 0)
1893 goto error;
1894
1895 /* Make sure that we got at least our header! */
1896 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
1897 IWL_ERR(priv, "File size way too small!\n");
1898 ret = -EINVAL;
1899 goto err_release;
1900 }
1901
1902 /* Data from ucode file: header followed by uCode images */
1903 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1904
1905 priv->ucode_ver = le32_to_cpu(ucode->ver);
1906 api_ver = IWL_UCODE_API(priv->ucode_ver);
1907 inst_size = iwl3945_ucode_get_inst_size(ucode);
1908 data_size = iwl3945_ucode_get_data_size(ucode);
1909 init_size = iwl3945_ucode_get_init_size(ucode);
1910 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
1911 boot_size = iwl3945_ucode_get_boot_size(ucode);
1912 src = iwl3945_ucode_get_data(ucode);
1913
1914 /* api_ver should match the api version forming part of the
1915 * firmware filename ... but we don't check for that and only rely
1916 * on the API version read from firmware header from here on forward */
1917
1918 if (api_ver < api_min || api_ver > api_max) {
1919 IWL_ERR(priv, "Driver unable to support your firmware API. "
1920 "Driver supports v%u, firmware is v%u.\n",
1921 api_max, api_ver);
1922 priv->ucode_ver = 0;
1923 ret = -EINVAL;
1924 goto err_release;
1925 }
1926 if (api_ver != api_max)
1927 IWL_ERR(priv, "Firmware has old API version. Expected %u, "
1928 "got %u. New firmware can be obtained "
1929 "from http://www.intellinuxwireless.org.\n",
1930 api_max, api_ver);
1931
1932 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1933 IWL_UCODE_MAJOR(priv->ucode_ver),
1934 IWL_UCODE_MINOR(priv->ucode_ver),
1935 IWL_UCODE_API(priv->ucode_ver),
1936 IWL_UCODE_SERIAL(priv->ucode_ver));
1937
1938 snprintf(priv->hw->wiphy->fw_version,
1939 sizeof(priv->hw->wiphy->fw_version),
1940 "%u.%u.%u.%u",
1941 IWL_UCODE_MAJOR(priv->ucode_ver),
1942 IWL_UCODE_MINOR(priv->ucode_ver),
1943 IWL_UCODE_API(priv->ucode_ver),
1944 IWL_UCODE_SERIAL(priv->ucode_ver));
1945
1946 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1947 priv->ucode_ver);
1948 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1949 inst_size);
1950 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1951 data_size);
1952 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1953 init_size);
1954 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1955 init_data_size);
1956 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1957 boot_size);
1958
1959
1960 /* Verify size of file vs. image size info in file's header */
1961 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
1962 inst_size + data_size + init_size +
1963 init_data_size + boot_size) {
1964
1965 IWL_DEBUG_INFO(priv,
1966 "uCode file size %zd does not match expected size\n",
1967 ucode_raw->size);
1968 ret = -EINVAL;
1969 goto err_release;
1970 }
1971
1972 /* Verify that uCode images will fit in card's SRAM */
1973 if (inst_size > IWL39_MAX_INST_SIZE) {
1974 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
1975 inst_size);
1976 ret = -EINVAL;
1977 goto err_release;
1978 }
1979
1980 if (data_size > IWL39_MAX_DATA_SIZE) {
1981 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
1982 data_size);
1983 ret = -EINVAL;
1984 goto err_release;
1985 }
1986 if (init_size > IWL39_MAX_INST_SIZE) {
1987 IWL_DEBUG_INFO(priv,
1988 "uCode init instr len %d too large to fit in\n",
1989 init_size);
1990 ret = -EINVAL;
1991 goto err_release;
1992 }
1993 if (init_data_size > IWL39_MAX_DATA_SIZE) {
1994 IWL_DEBUG_INFO(priv,
1995 "uCode init data len %d too large to fit in\n",
1996 init_data_size);
1997 ret = -EINVAL;
1998 goto err_release;
1999 }
2000 if (boot_size > IWL39_MAX_BSM_SIZE) {
2001 IWL_DEBUG_INFO(priv,
2002 "uCode boot instr len %d too large to fit in\n",
2003 boot_size);
2004 ret = -EINVAL;
2005 goto err_release;
2006 }
2007
2008 /* Allocate ucode buffers for card's bus-master loading ... */
2009
2010 /* Runtime instructions and 2 copies of data:
2011 * 1) unmodified from disk
2012 * 2) backup cache for save/restore during power-downs */
2013 priv->ucode_code.len = inst_size;
2014 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
2015
2016 priv->ucode_data.len = data_size;
2017 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
2018
2019 priv->ucode_data_backup.len = data_size;
2020 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
2021
2022 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
2023 !priv->ucode_data_backup.v_addr)
2024 goto err_pci_alloc;
2025
2026 /* Initialization instructions and data */
2027 if (init_size && init_data_size) {
2028 priv->ucode_init.len = init_size;
2029 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
2030
2031 priv->ucode_init_data.len = init_data_size;
2032 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
2033
2034 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
2035 goto err_pci_alloc;
2036 }
2037
2038 /* Bootstrap (instructions only, no data) */
2039 if (boot_size) {
2040 priv->ucode_boot.len = boot_size;
2041 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
2042
2043 if (!priv->ucode_boot.v_addr)
2044 goto err_pci_alloc;
2045 }
2046
2047 /* Copy images into buffers for card's bus-master reads ... */
2048
2049 /* Runtime instructions (first block of data in file) */
2050 len = inst_size;
2051 IWL_DEBUG_INFO(priv,
2052 "Copying (but not loading) uCode instr len %zd\n", len);
2053 memcpy(priv->ucode_code.v_addr, src, len);
2054 src += len;
2055
2056 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
2057 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
2058
2059 /* Runtime data (2nd block)
2060 * NOTE: Copy into backup buffer will be done in iwl3945_up() */
2061 len = data_size;
2062 IWL_DEBUG_INFO(priv,
2063 "Copying (but not loading) uCode data len %zd\n", len);
2064 memcpy(priv->ucode_data.v_addr, src, len);
2065 memcpy(priv->ucode_data_backup.v_addr, src, len);
2066 src += len;
2067
2068 /* Initialization instructions (3rd block) */
2069 if (init_size) {
2070 len = init_size;
2071 IWL_DEBUG_INFO(priv,
2072 "Copying (but not loading) init instr len %zd\n", len);
2073 memcpy(priv->ucode_init.v_addr, src, len);
2074 src += len;
2075 }
2076
2077 /* Initialization data (4th block) */
2078 if (init_data_size) {
2079 len = init_data_size;
2080 IWL_DEBUG_INFO(priv,
2081 "Copying (but not loading) init data len %zd\n", len);
2082 memcpy(priv->ucode_init_data.v_addr, src, len);
2083 src += len;
2084 }
2085
2086 /* Bootstrap instructions (5th block) */
2087 len = boot_size;
2088 IWL_DEBUG_INFO(priv,
2089 "Copying (but not loading) boot instr len %zd\n", len);
2090 memcpy(priv->ucode_boot.v_addr, src, len);
2091
2092 /* We have our copies now, allow OS release its copies */
2093 release_firmware(ucode_raw);
2094 return 0;
2095
2096 err_pci_alloc:
2097 IWL_ERR(priv, "failed to allocate pci memory\n");
2098 ret = -ENOMEM;
2099 iwl3945_dealloc_ucode_pci(priv);
2100
2101 err_release:
2102 release_firmware(ucode_raw);
2103
2104 error:
2105 return ret;
2106}
2107
2108
2109/**
2110 * iwl3945_set_ucode_ptrs - Set uCode address location
2111 *
2112 * Tell initialization uCode where to find runtime uCode.
2113 *
2114 * BSM registers initially contain pointers to initialization uCode.
2115 * We need to replace them to load runtime uCode inst and data,
2116 * and to save runtime data when powering down.
2117 */
2118static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
2119{
2120 dma_addr_t pinst;
2121 dma_addr_t pdata;
2122
2123 /* bits 31:0 for 3945 */
2124 pinst = priv->ucode_code.p_addr;
2125 pdata = priv->ucode_data_backup.p_addr;
2126
2127 /* Tell bootstrap uCode where to find image to load */
2128 iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
2129 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
2130 iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
2131 priv->ucode_data.len);
2132
2133 /* Inst byte count must be last to set up, bit 31 signals uCode
2134 * that all new ptr/size info is in place */
2135 iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
2136 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
2137
2138 IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
2139
2140 return 0;
2141}
2142
2143/**
2144 * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
2145 *
2146 * Called after REPLY_ALIVE notification received from "initialize" uCode.
2147 *
2148 * Tell "initialize" uCode to go ahead and load the runtime uCode.
2149 */
2150static void iwl3945_init_alive_start(struct iwl_priv *priv)
2151{
2152 /* Check alive response for "valid" sign from uCode */
2153 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
2154 /* We had an error bringing up the hardware, so take it
2155 * all the way back down so we can try again */
2156 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
2157 goto restart;
2158 }
2159
2160 /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
2161 * This is a paranoid check, because we would not have gotten the
2162 * "initialize" alive if code weren't properly loaded. */
2163 if (iwl3945_verify_ucode(priv)) {
2164 /* Runtime instruction load was bad;
2165 * take it all the way back down so we can try again */
2166 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
2167 goto restart;
2168 }
2169
2170 /* Send pointers to protocol/runtime uCode image ... init code will
2171 * load and launch runtime uCode, which will send us another "Alive"
2172 * notification. */
2173 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
2174 if (iwl3945_set_ucode_ptrs(priv)) {
2175 /* Runtime instruction load won't happen;
2176 * take it all the way back down so we can try again */
2177 IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
2178 goto restart;
2179 }
2180 return;
2181
2182 restart:
2183 queue_work(priv->workqueue, &priv->restart);
2184}
2185
2186/**
2187 * iwl3945_alive_start - called after REPLY_ALIVE notification received
2188 * from protocol/runtime uCode (initialization uCode's
2189 * Alive gets handled by iwl3945_init_alive_start()).
2190 */
2191static void iwl3945_alive_start(struct iwl_priv *priv)
2192{
2193 int thermal_spin = 0;
2194 u32 rfkill;
2195 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2196
2197 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
2198
2199 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
2200 /* We had an error bringing up the hardware, so take it
2201 * all the way back down so we can try again */
2202 IWL_DEBUG_INFO(priv, "Alive failed.\n");
2203 goto restart;
2204 }
2205
2206 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
2207 * This is a paranoid check, because we would not have gotten the
2208 * "runtime" alive if code weren't properly loaded. */
2209 if (iwl3945_verify_ucode(priv)) {
2210 /* Runtime instruction load was bad;
2211 * take it all the way back down so we can try again */
2212 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
2213 goto restart;
2214 }
2215
2216 rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
2217 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2218
2219 if (rfkill & 0x1) {
2220 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2221 /* if RFKILL is not on, then wait for thermal
2222 * sensor in adapter to kick in */
2223 while (iwl3945_hw_get_temperature(priv) == 0) {
2224 thermal_spin++;
2225 udelay(10);
2226 }
2227
2228 if (thermal_spin)
2229 IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
2230 thermal_spin * 10);
2231 } else
2232 set_bit(STATUS_RF_KILL_HW, &priv->status);
2233
2234 /* After the ALIVE response, we can send commands to 3945 uCode */
2235 set_bit(STATUS_ALIVE, &priv->status);
2236
2237 /* Enable watchdog to monitor the driver tx queues */
2238 iwl_legacy_setup_watchdog(priv);
2239
2240 if (iwl_legacy_is_rfkill(priv))
2241 return;
2242
2243 ieee80211_wake_queues(priv->hw);
2244
2245 priv->active_rate = IWL_RATES_MASK_3945;
2246
2247 iwl_legacy_power_update_mode(priv, true);
2248
2249 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2250 struct iwl3945_rxon_cmd *active_rxon =
2251 (struct iwl3945_rxon_cmd *)(&ctx->active);
2252
2253 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2254 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2255 } else {
2256 /* Initialize our rx_config data */
2257 iwl_legacy_connection_init_rx_config(priv, ctx);
2258 }
2259
2260 /* Configure Bluetooth device coexistence support */
2261 iwl_legacy_send_bt_config(priv);
2262
2263 set_bit(STATUS_READY, &priv->status);
2264
2265 /* Configure the adapter for unassociated operation */
2266 iwl3945_commit_rxon(priv, ctx);
2267
2268 iwl3945_reg_txpower_periodic(priv);
2269
2270 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2271 wake_up(&priv->wait_command_queue);
2272
2273 return;
2274
2275 restart:
2276 queue_work(priv->workqueue, &priv->restart);
2277}
2278
2279static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
2280
2281static void __iwl3945_down(struct iwl_priv *priv)
2282{
2283 unsigned long flags;
2284 int exit_pending;
2285
2286 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2287
2288 iwl_legacy_scan_cancel_timeout(priv, 200);
2289
2290 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2291
2292 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2293 * to prevent rearm timer */
2294 del_timer_sync(&priv->watchdog);
2295
2296 /* Station information will now be cleared in device */
2297 iwl_legacy_clear_ucode_stations(priv, NULL);
2298 iwl_legacy_dealloc_bcast_stations(priv);
2299 iwl_legacy_clear_driver_stations(priv);
2300
2301 /* Unblock any waiting calls */
2302 wake_up_all(&priv->wait_command_queue);
2303
2304 /* Wipe out the EXIT_PENDING status bit if we are not actually
2305 * exiting the module */
2306 if (!exit_pending)
2307 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2308
2309 /* stop and reset the on-board processor */
2310 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2311
2312 /* tell the device to stop sending interrupts */
2313 spin_lock_irqsave(&priv->lock, flags);
2314 iwl_legacy_disable_interrupts(priv);
2315 spin_unlock_irqrestore(&priv->lock, flags);
2316 iwl3945_synchronize_irq(priv);
2317
2318 if (priv->mac80211_registered)
2319 ieee80211_stop_queues(priv->hw);
2320
2321 /* If we have not previously called iwl3945_init() then
2322 * clear all bits but the RF Kill bits and return */
2323 if (!iwl_legacy_is_init(priv)) {
2324 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2325 STATUS_RF_KILL_HW |
2326 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2327 STATUS_GEO_CONFIGURED |
2328 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2329 STATUS_EXIT_PENDING;
2330 goto exit;
2331 }
2332
2333 /* ...otherwise clear out all the status bits but the RF Kill
2334 * bit and continue taking the NIC down. */
2335 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2336 STATUS_RF_KILL_HW |
2337 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2338 STATUS_GEO_CONFIGURED |
2339 test_bit(STATUS_FW_ERROR, &priv->status) <<
2340 STATUS_FW_ERROR |
2341 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2342 STATUS_EXIT_PENDING;
2343
2344 iwl3945_hw_txq_ctx_stop(priv);
2345 iwl3945_hw_rxq_stop(priv);
2346
2347 /* Power-down device's busmaster DMA clocks */
2348 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2349 udelay(5);
2350
2351 /* Stop the device, and put it in low power state */
2352 iwl_legacy_apm_stop(priv);
2353
2354 exit:
2355 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2356
2357 if (priv->beacon_skb)
2358 dev_kfree_skb(priv->beacon_skb);
2359 priv->beacon_skb = NULL;
2360
2361 /* clear out any free frames */
2362 iwl3945_clear_free_frames(priv);
2363}
2364
2365static void iwl3945_down(struct iwl_priv *priv)
2366{
2367 mutex_lock(&priv->mutex);
2368 __iwl3945_down(priv);
2369 mutex_unlock(&priv->mutex);
2370
2371 iwl3945_cancel_deferred_work(priv);
2372}
2373
2374#define MAX_HW_RESTARTS 5
2375
2376static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
2377{
2378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2379 unsigned long flags;
2380 u8 sta_id;
2381
2382 spin_lock_irqsave(&priv->sta_lock, flags);
2383 sta_id = iwl_legacy_prep_station(priv, ctx,
2384 iwlegacy_bcast_addr, false, NULL);
2385 if (sta_id == IWL_INVALID_STATION) {
2386 IWL_ERR(priv, "Unable to prepare broadcast station\n");
2387 spin_unlock_irqrestore(&priv->sta_lock, flags);
2388
2389 return -EINVAL;
2390 }
2391
2392 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
2393 priv->stations[sta_id].used |= IWL_STA_BCAST;
2394 spin_unlock_irqrestore(&priv->sta_lock, flags);
2395
2396 return 0;
2397}
2398
2399static int __iwl3945_up(struct iwl_priv *priv)
2400{
2401 int rc, i;
2402
2403 rc = iwl3945_alloc_bcast_station(priv);
2404 if (rc)
2405 return rc;
2406
2407 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2408 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2409 return -EIO;
2410 }
2411
2412 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
2413 IWL_ERR(priv, "ucode not available for device bring up\n");
2414 return -EIO;
2415 }
2416
2417 /* If platform's RF_KILL switch is NOT set to KILL */
2418 if (iwl_read32(priv, CSR_GP_CNTRL) &
2419 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
2420 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2421 else {
2422 set_bit(STATUS_RF_KILL_HW, &priv->status);
2423 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
2424 return -ENODEV;
2425 }
2426
2427 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2428
2429 rc = iwl3945_hw_nic_init(priv);
2430 if (rc) {
2431 IWL_ERR(priv, "Unable to int nic\n");
2432 return rc;
2433 }
2434
2435 /* make sure rfkill handshake bits are cleared */
2436 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2437 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2438 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2439
2440 /* clear (again), then enable host interrupts */
2441 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2442 iwl_legacy_enable_interrupts(priv);
2443
2444 /* really make sure rfkill handshake bits are cleared */
2445 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2446 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2447
2448 /* Copy original ucode data image from disk into backup cache.
2449 * This will be used to initialize the on-board processor's
2450 * data SRAM for a clean start when the runtime program first loads. */
2451 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2452 priv->ucode_data.len);
2453
2454 /* We return success when we resume from suspend and rf_kill is on. */
2455 if (test_bit(STATUS_RF_KILL_HW, &priv->status))
2456 return 0;
2457
2458 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2459
2460 /* load bootstrap state machine,
2461 * load bootstrap program into processor's memory,
2462 * prepare to load the "initialize" uCode */
2463 rc = priv->cfg->ops->lib->load_ucode(priv);
2464
2465 if (rc) {
2466 IWL_ERR(priv,
2467 "Unable to set up bootstrap uCode: %d\n", rc);
2468 continue;
2469 }
2470
2471 /* start card; "initialize" will load runtime ucode */
2472 iwl3945_nic_start(priv);
2473
2474 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2475
2476 return 0;
2477 }
2478
2479 set_bit(STATUS_EXIT_PENDING, &priv->status);
2480 __iwl3945_down(priv);
2481 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2482
2483 /* tried to restart and config the device for as long as our
2484 * patience could withstand */
2485 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2486 return -EIO;
2487}
2488
2489
2490/*****************************************************************************
2491 *
2492 * Workqueue callbacks
2493 *
2494 *****************************************************************************/
2495
2496static void iwl3945_bg_init_alive_start(struct work_struct *data)
2497{
2498 struct iwl_priv *priv =
2499 container_of(data, struct iwl_priv, init_alive_start.work);
2500
2501 mutex_lock(&priv->mutex);
2502 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2503 goto out;
2504
2505 iwl3945_init_alive_start(priv);
2506out:
2507 mutex_unlock(&priv->mutex);
2508}
2509
2510static void iwl3945_bg_alive_start(struct work_struct *data)
2511{
2512 struct iwl_priv *priv =
2513 container_of(data, struct iwl_priv, alive_start.work);
2514
2515 mutex_lock(&priv->mutex);
2516 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2517 goto out;
2518
2519 iwl3945_alive_start(priv);
2520out:
2521 mutex_unlock(&priv->mutex);
2522}
2523
2524/*
2525 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2526 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2527 * *is* readable even when device has been SW_RESET into low power mode
2528 * (e.g. during RF KILL).
2529 */
2530static void iwl3945_rfkill_poll(struct work_struct *data)
2531{
2532 struct iwl_priv *priv =
2533 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2534 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2535 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2536 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2537
2538 if (new_rfkill != old_rfkill) {
2539 if (new_rfkill)
2540 set_bit(STATUS_RF_KILL_HW, &priv->status);
2541 else
2542 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2543
2544 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2545
2546 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2547 new_rfkill ? "disable radio" : "enable radio");
2548 }
2549
2550 /* Keep this running, even if radio now enabled. This will be
2551 * cancelled in mac_start() if system decides to start again */
2552 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2553 round_jiffies_relative(2 * HZ));
2554
2555}
2556
2557int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2558{
2559 struct iwl_host_cmd cmd = {
2560 .id = REPLY_SCAN_CMD,
2561 .len = sizeof(struct iwl3945_scan_cmd),
2562 .flags = CMD_SIZE_HUGE,
2563 };
2564 struct iwl3945_scan_cmd *scan;
2565 u8 n_probes = 0;
2566 enum ieee80211_band band;
2567 bool is_active = false;
2568 int ret;
2569 u16 len;
2570
2571 lockdep_assert_held(&priv->mutex);
2572
2573 if (!priv->scan_cmd) {
2574 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2575 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2576 if (!priv->scan_cmd) {
2577 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2578 return -ENOMEM;
2579 }
2580 }
2581 scan = priv->scan_cmd;
2582 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2583
2584 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
2585 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
2586
2587 if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
2588 u16 interval;
2589 u32 extra;
2590 u32 suspend_time = 100;
2591 u32 scan_suspend_time = 100;
2592
2593 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2594
2595 interval = vif->bss_conf.beacon_int;
2596
2597 scan->suspend_time = 0;
2598 scan->max_out_time = cpu_to_le32(200 * 1024);
2599 if (!interval)
2600 interval = suspend_time;
2601 /*
2602 * suspend time format:
2603 * 0-19: beacon interval in usec (time before exec.)
2604 * 20-23: 0
2605 * 24-31: number of beacons (suspend between channels)
2606 */
2607
2608 extra = (suspend_time / interval) << 24;
2609 scan_suspend_time = 0xFF0FFFFF &
2610 (extra | ((suspend_time % interval) * 1024));
2611
2612 scan->suspend_time = cpu_to_le32(scan_suspend_time);
2613 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
2614 scan_suspend_time, interval);
2615 }
2616
2617 if (priv->scan_request->n_ssids) {
2618 int i, p = 0;
2619 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2620 for (i = 0; i < priv->scan_request->n_ssids; i++) {
2621 /* always does wildcard anyway */
2622 if (!priv->scan_request->ssids[i].ssid_len)
2623 continue;
2624 scan->direct_scan[p].id = WLAN_EID_SSID;
2625 scan->direct_scan[p].len =
2626 priv->scan_request->ssids[i].ssid_len;
2627 memcpy(scan->direct_scan[p].ssid,
2628 priv->scan_request->ssids[i].ssid,
2629 priv->scan_request->ssids[i].ssid_len);
2630 n_probes++;
2631 p++;
2632 }
2633 is_active = true;
2634 } else
2635 IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
2636
2637 /* We don't build a direct scan probe request; the uCode will do
2638 * that based on the direct_mask added to each channel entry */
2639 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
2640 scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
2641 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2642
2643 /* flags + rate selection */
2644
2645 switch (priv->scan_band) {
2646 case IEEE80211_BAND_2GHZ:
2647 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2648 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2649 band = IEEE80211_BAND_2GHZ;
2650 break;
2651 case IEEE80211_BAND_5GHZ:
2652 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2653 band = IEEE80211_BAND_5GHZ;
2654 break;
2655 default:
2656 IWL_WARN(priv, "Invalid scan band\n");
2657 return -EIO;
2658 }
2659
2660 /*
2661 * If active scaning is requested but a certain channel
2662 * is marked passive, we can do active scanning if we
2663 * detect transmissions.
2664 */
2665 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2666 IWL_GOOD_CRC_TH_DISABLED;
2667
2668 len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
2669 vif->addr, priv->scan_request->ie,
2670 priv->scan_request->ie_len,
2671 IWL_MAX_SCAN_SIZE - sizeof(*scan));
2672 scan->tx_cmd.len = cpu_to_le16(len);
2673
2674 /* select Rx antennas */
2675 scan->flags |= iwl3945_get_antenna_flags(priv);
2676
2677 scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2678 (void *)&scan->data[len], vif);
2679 if (scan->channel_count == 0) {
2680 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
2681 return -EIO;
2682 }
2683
2684 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
2685 scan->channel_count * sizeof(struct iwl3945_scan_channel);
2686 cmd.data = scan;
2687 scan->len = cpu_to_le16(cmd.len);
2688
2689 set_bit(STATUS_SCAN_HW, &priv->status);
2690 ret = iwl_legacy_send_cmd_sync(priv, &cmd);
2691 if (ret)
2692 clear_bit(STATUS_SCAN_HW, &priv->status);
2693 return ret;
2694}
2695
2696void iwl3945_post_scan(struct iwl_priv *priv)
2697{
2698 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2699
2700 /*
2701 * Since setting the RXON may have been deferred while
2702 * performing the scan, fire one off if needed
2703 */
2704 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
2705 iwl3945_commit_rxon(priv, ctx);
2706}
2707
2708static void iwl3945_bg_restart(struct work_struct *data)
2709{
2710 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2711
2712 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2713 return;
2714
2715 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2716 struct iwl_rxon_context *ctx;
2717 mutex_lock(&priv->mutex);
2718 for_each_context(priv, ctx)
2719 ctx->vif = NULL;
2720 priv->is_open = 0;
2721 mutex_unlock(&priv->mutex);
2722 iwl3945_down(priv);
2723 ieee80211_restart_hw(priv->hw);
2724 } else {
2725 iwl3945_down(priv);
2726
2727 mutex_lock(&priv->mutex);
2728 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2729 mutex_unlock(&priv->mutex);
2730 return;
2731 }
2732
2733 __iwl3945_up(priv);
2734 mutex_unlock(&priv->mutex);
2735 }
2736}
2737
2738static void iwl3945_bg_rx_replenish(struct work_struct *data)
2739{
2740 struct iwl_priv *priv =
2741 container_of(data, struct iwl_priv, rx_replenish);
2742
2743 mutex_lock(&priv->mutex);
2744 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2745 goto out;
2746
2747 iwl3945_rx_replenish(priv);
2748out:
2749 mutex_unlock(&priv->mutex);
2750}
2751
2752void iwl3945_post_associate(struct iwl_priv *priv)
2753{
2754 int rc = 0;
2755 struct ieee80211_conf *conf = NULL;
2756 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2757
2758 if (!ctx->vif || !priv->is_open)
2759 return;
2760
2761 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2762 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
2763
2764 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2765 return;
2766
2767 iwl_legacy_scan_cancel_timeout(priv, 200);
2768
2769 conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
2770
2771 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2772 iwl3945_commit_rxon(priv, ctx);
2773
2774 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2775 if (rc)
2776 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2777 "Attempting to continue.\n");
2778
2779 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2780
2781 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
2782
2783 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2784 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
2785
2786 if (ctx->vif->bss_conf.use_short_preamble)
2787 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2788 else
2789 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2790
2791 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2792 if (ctx->vif->bss_conf.use_short_slot)
2793 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2794 else
2795 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2796 }
2797
2798 iwl3945_commit_rxon(priv, ctx);
2799
2800 switch (ctx->vif->type) {
2801 case NL80211_IFTYPE_STATION:
2802 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
2803 break;
2804 case NL80211_IFTYPE_ADHOC:
2805 iwl3945_send_beacon_cmd(priv);
2806 break;
2807 default:
2808 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2809 __func__, ctx->vif->type);
2810 break;
2811 }
2812}
2813
2814/*****************************************************************************
2815 *
2816 * mac80211 entry point functions
2817 *
2818 *****************************************************************************/
2819
2820#define UCODE_READY_TIMEOUT (2 * HZ)
2821
2822static int iwl3945_mac_start(struct ieee80211_hw *hw)
2823{
2824 struct iwl_priv *priv = hw->priv;
2825 int ret;
2826
2827 IWL_DEBUG_MAC80211(priv, "enter\n");
2828
2829 /* we should be verifying the device is ready to be opened */
2830 mutex_lock(&priv->mutex);
2831
2832 /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
2833 * ucode filename and max sizes are card-specific. */
2834
2835 if (!priv->ucode_code.len) {
2836 ret = iwl3945_read_ucode(priv);
2837 if (ret) {
2838 IWL_ERR(priv, "Could not read microcode: %d\n", ret);
2839 mutex_unlock(&priv->mutex);
2840 goto out_release_irq;
2841 }
2842 }
2843
2844 ret = __iwl3945_up(priv);
2845
2846 mutex_unlock(&priv->mutex);
2847
2848 if (ret)
2849 goto out_release_irq;
2850
2851 IWL_DEBUG_INFO(priv, "Start UP work.\n");
2852
2853 /* Wait for START_ALIVE from ucode. Otherwise callbacks from
2854 * mac80211 will not be run successfully. */
2855 ret = wait_event_timeout(priv->wait_command_queue,
2856 test_bit(STATUS_READY, &priv->status),
2857 UCODE_READY_TIMEOUT);
2858 if (!ret) {
2859 if (!test_bit(STATUS_READY, &priv->status)) {
2860 IWL_ERR(priv,
2861 "Wait for START_ALIVE timeout after %dms.\n",
2862 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2863 ret = -ETIMEDOUT;
2864 goto out_release_irq;
2865 }
2866 }
2867
2868 /* ucode is running and will send rfkill notifications,
2869 * no need to poll the killswitch state anymore */
2870 cancel_delayed_work(&priv->_3945.rfkill_poll);
2871
2872 priv->is_open = 1;
2873 IWL_DEBUG_MAC80211(priv, "leave\n");
2874 return 0;
2875
2876out_release_irq:
2877 priv->is_open = 0;
2878 IWL_DEBUG_MAC80211(priv, "leave - failed\n");
2879 return ret;
2880}
2881
2882static void iwl3945_mac_stop(struct ieee80211_hw *hw)
2883{
2884 struct iwl_priv *priv = hw->priv;
2885
2886 IWL_DEBUG_MAC80211(priv, "enter\n");
2887
2888 if (!priv->is_open) {
2889 IWL_DEBUG_MAC80211(priv, "leave - skip\n");
2890 return;
2891 }
2892
2893 priv->is_open = 0;
2894
2895 iwl3945_down(priv);
2896
2897 flush_workqueue(priv->workqueue);
2898
2899 /* start polling the killswitch state again */
2900 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2901 round_jiffies_relative(2 * HZ));
2902
2903 IWL_DEBUG_MAC80211(priv, "leave\n");
2904}
2905
2906static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2907{
2908 struct iwl_priv *priv = hw->priv;
2909
2910 IWL_DEBUG_MAC80211(priv, "enter\n");
2911
2912 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2913 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2914
2915 if (iwl3945_tx_skb(priv, skb))
2916 dev_kfree_skb_any(skb);
2917
2918 IWL_DEBUG_MAC80211(priv, "leave\n");
2919}
2920
2921void iwl3945_config_ap(struct iwl_priv *priv)
2922{
2923 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2924 struct ieee80211_vif *vif = ctx->vif;
2925 int rc = 0;
2926
2927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2928 return;
2929
2930 /* The following should be done only at AP bring up */
2931 if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
2932
2933 /* RXON - unassoc (to set timing command) */
2934 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2935 iwl3945_commit_rxon(priv, ctx);
2936
2937 /* RXON Timing */
2938 rc = iwl_legacy_send_rxon_timing(priv, ctx);
2939 if (rc)
2940 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2941 "Attempting to continue.\n");
2942
2943 ctx->staging.assoc_id = 0;
2944
2945 if (vif->bss_conf.use_short_preamble)
2946 ctx->staging.flags |=
2947 RXON_FLG_SHORT_PREAMBLE_MSK;
2948 else
2949 ctx->staging.flags &=
2950 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2951
2952 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2953 if (vif->bss_conf.use_short_slot)
2954 ctx->staging.flags |=
2955 RXON_FLG_SHORT_SLOT_MSK;
2956 else
2957 ctx->staging.flags &=
2958 ~RXON_FLG_SHORT_SLOT_MSK;
2959 }
2960 /* restore RXON assoc */
2961 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2962 iwl3945_commit_rxon(priv, ctx);
2963 }
2964 iwl3945_send_beacon_cmd(priv);
2965}
2966
2967static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2968 struct ieee80211_vif *vif,
2969 struct ieee80211_sta *sta,
2970 struct ieee80211_key_conf *key)
2971{
2972 struct iwl_priv *priv = hw->priv;
2973 int ret = 0;
2974 u8 sta_id = IWL_INVALID_STATION;
2975 u8 static_key;
2976
2977 IWL_DEBUG_MAC80211(priv, "enter\n");
2978
2979 if (iwl3945_mod_params.sw_crypto) {
2980 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2981 return -EOPNOTSUPP;
2982 }
2983
2984 /*
2985 * To support IBSS RSN, don't program group keys in IBSS, the
2986 * hardware will then not attempt to decrypt the frames.
2987 */
2988 if (vif->type == NL80211_IFTYPE_ADHOC &&
2989 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2990 return -EOPNOTSUPP;
2991
2992 static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
2993
2994 if (!static_key) {
2995 sta_id = iwl_legacy_sta_id_or_broadcast(
2996 priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
2997 if (sta_id == IWL_INVALID_STATION)
2998 return -EINVAL;
2999 }
3000
3001 mutex_lock(&priv->mutex);
3002 iwl_legacy_scan_cancel_timeout(priv, 100);
3003
3004 switch (cmd) {
3005 case SET_KEY:
3006 if (static_key)
3007 ret = iwl3945_set_static_key(priv, key);
3008 else
3009 ret = iwl3945_set_dynamic_key(priv, key, sta_id);
3010 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
3011 break;
3012 case DISABLE_KEY:
3013 if (static_key)
3014 ret = iwl3945_remove_static_key(priv);
3015 else
3016 ret = iwl3945_clear_sta_key_info(priv, sta_id);
3017 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
3018 break;
3019 default:
3020 ret = -EINVAL;
3021 }
3022
3023 mutex_unlock(&priv->mutex);
3024 IWL_DEBUG_MAC80211(priv, "leave\n");
3025
3026 return ret;
3027}
3028
3029static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3030 struct ieee80211_vif *vif,
3031 struct ieee80211_sta *sta)
3032{
3033 struct iwl_priv *priv = hw->priv;
3034 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3035 int ret;
3036 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3037 u8 sta_id;
3038
3039 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3040 sta->addr);
3041 mutex_lock(&priv->mutex);
3042 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3043 sta->addr);
3044 sta_priv->common.sta_id = IWL_INVALID_STATION;
3045
3046
3047 ret = iwl_legacy_add_station_common(priv,
3048 &priv->contexts[IWL_RXON_CTX_BSS],
3049 sta->addr, is_ap, sta, &sta_id);
3050 if (ret) {
3051 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3052 sta->addr, ret);
3053 /* Should we return success if return code is EEXIST ? */
3054 mutex_unlock(&priv->mutex);
3055 return ret;
3056 }
3057
3058 sta_priv->common.sta_id = sta_id;
3059
3060 /* Initialize rate scaling */
3061 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3062 sta->addr);
3063 iwl3945_rs_rate_init(priv, sta, sta_id);
3064 mutex_unlock(&priv->mutex);
3065
3066 return 0;
3067}
3068
3069static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3070 unsigned int changed_flags,
3071 unsigned int *total_flags,
3072 u64 multicast)
3073{
3074 struct iwl_priv *priv = hw->priv;
3075 __le32 filter_or = 0, filter_nand = 0;
3076 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3077
3078#define CHK(test, flag) do { \
3079 if (*total_flags & (test)) \
3080 filter_or |= (flag); \
3081 else \
3082 filter_nand |= (flag); \
3083 } while (0)
3084
3085 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3086 changed_flags, *total_flags);
3087
3088 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3089 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
3090 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3091
3092#undef CHK
3093
3094 mutex_lock(&priv->mutex);
3095
3096 ctx->staging.filter_flags &= ~filter_nand;
3097 ctx->staging.filter_flags |= filter_or;
3098
3099 /*
3100 * Not committing directly because hardware can perform a scan,
3101 * but even if hw is ready, committing here breaks for some reason,
3102 * we'll eventually commit the filter flags change anyway.
3103 */
3104
3105 mutex_unlock(&priv->mutex);
3106
3107 /*
3108 * Receiving all multicast frames is always enabled by the
3109 * default flags setup in iwl_legacy_connection_init_rx_config()
3110 * since we currently do not support programming multicast
3111 * filters into the device.
3112 */
3113 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3114 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3115}
3116
3117
3118/*****************************************************************************
3119 *
3120 * sysfs attributes
3121 *
3122 *****************************************************************************/
3123
3124#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3125
3126/*
3127 * The following adds a new attribute to the sysfs representation
3128 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
3129 * used for controlling the debug level.
3130 *
3131 * See the level definitions in iwl for details.
3132 *
3133 * The debug_level being managed using sysfs below is a per device debug
3134 * level that is used instead of the global debug level if it (the per
3135 * device debug level) is set.
3136 */
3137static ssize_t iwl3945_show_debug_level(struct device *d,
3138 struct device_attribute *attr, char *buf)
3139{
3140 struct iwl_priv *priv = dev_get_drvdata(d);
3141 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
3142}
3143static ssize_t iwl3945_store_debug_level(struct device *d,
3144 struct device_attribute *attr,
3145 const char *buf, size_t count)
3146{
3147 struct iwl_priv *priv = dev_get_drvdata(d);
3148 unsigned long val;
3149 int ret;
3150
3151 ret = strict_strtoul(buf, 0, &val);
3152 if (ret)
3153 IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
3154 else {
3155 priv->debug_level = val;
3156 if (iwl_legacy_alloc_traffic_mem(priv))
3157 IWL_ERR(priv,
3158 "Not enough memory to generate traffic log\n");
3159 }
3160 return strnlen(buf, count);
3161}
3162
3163static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
3164 iwl3945_show_debug_level, iwl3945_store_debug_level);
3165
3166#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
3167
3168static ssize_t iwl3945_show_temperature(struct device *d,
3169 struct device_attribute *attr, char *buf)
3170{
3171 struct iwl_priv *priv = dev_get_drvdata(d);
3172
3173 if (!iwl_legacy_is_alive(priv))
3174 return -EAGAIN;
3175
3176 return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
3177}
3178
3179static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
3180
3181static ssize_t iwl3945_show_tx_power(struct device *d,
3182 struct device_attribute *attr, char *buf)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3186}
3187
3188static ssize_t iwl3945_store_tx_power(struct device *d,
3189 struct device_attribute *attr,
3190 const char *buf, size_t count)
3191{
3192 struct iwl_priv *priv = dev_get_drvdata(d);
3193 char *p = (char *)buf;
3194 u32 val;
3195
3196 val = simple_strtoul(p, &p, 10);
3197 if (p == buf)
3198 IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
3199 else
3200 iwl3945_hw_reg_set_txpower(priv, val);
3201
3202 return count;
3203}
3204
3205static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
3206
3207static ssize_t iwl3945_show_flags(struct device *d,
3208 struct device_attribute *attr, char *buf)
3209{
3210 struct iwl_priv *priv = dev_get_drvdata(d);
3211 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3212
3213 return sprintf(buf, "0x%04X\n", ctx->active.flags);
3214}
3215
3216static ssize_t iwl3945_store_flags(struct device *d,
3217 struct device_attribute *attr,
3218 const char *buf, size_t count)
3219{
3220 struct iwl_priv *priv = dev_get_drvdata(d);
3221 u32 flags = simple_strtoul(buf, NULL, 0);
3222 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3223
3224 mutex_lock(&priv->mutex);
3225 if (le32_to_cpu(ctx->staging.flags) != flags) {
3226 /* Cancel any currently running scans... */
3227 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3228 IWL_WARN(priv, "Could not cancel scan.\n");
3229 else {
3230 IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
3231 flags);
3232 ctx->staging.flags = cpu_to_le32(flags);
3233 iwl3945_commit_rxon(priv, ctx);
3234 }
3235 }
3236 mutex_unlock(&priv->mutex);
3237
3238 return count;
3239}
3240
3241static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
3242
3243static ssize_t iwl3945_show_filter_flags(struct device *d,
3244 struct device_attribute *attr, char *buf)
3245{
3246 struct iwl_priv *priv = dev_get_drvdata(d);
3247 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3248
3249 return sprintf(buf, "0x%04X\n",
3250 le32_to_cpu(ctx->active.filter_flags));
3251}
3252
3253static ssize_t iwl3945_store_filter_flags(struct device *d,
3254 struct device_attribute *attr,
3255 const char *buf, size_t count)
3256{
3257 struct iwl_priv *priv = dev_get_drvdata(d);
3258 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3259 u32 filter_flags = simple_strtoul(buf, NULL, 0);
3260
3261 mutex_lock(&priv->mutex);
3262 if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
3263 /* Cancel any currently running scans... */
3264 if (iwl_legacy_scan_cancel_timeout(priv, 100))
3265 IWL_WARN(priv, "Could not cancel scan.\n");
3266 else {
3267 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3268 "0x%04X\n", filter_flags);
3269 ctx->staging.filter_flags =
3270 cpu_to_le32(filter_flags);
3271 iwl3945_commit_rxon(priv, ctx);
3272 }
3273 }
3274 mutex_unlock(&priv->mutex);
3275
3276 return count;
3277}
3278
3279static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
3280 iwl3945_store_filter_flags);
3281
3282static ssize_t iwl3945_show_measurement(struct device *d,
3283 struct device_attribute *attr, char *buf)
3284{
3285 struct iwl_priv *priv = dev_get_drvdata(d);
3286 struct iwl_spectrum_notification measure_report;
3287 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3288 u8 *data = (u8 *)&measure_report;
3289 unsigned long flags;
3290
3291 spin_lock_irqsave(&priv->lock, flags);
3292 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3293 spin_unlock_irqrestore(&priv->lock, flags);
3294 return 0;
3295 }
3296 memcpy(&measure_report, &priv->measure_report, size);
3297 priv->measurement_status = 0;
3298 spin_unlock_irqrestore(&priv->lock, flags);
3299
3300 while (size && (PAGE_SIZE - len)) {
3301 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3302 PAGE_SIZE - len, 1);
3303 len = strlen(buf);
3304 if (PAGE_SIZE - len)
3305 buf[len++] = '\n';
3306
3307 ofs += 16;
3308 size -= min(size, 16U);
3309 }
3310
3311 return len;
3312}
3313
3314static ssize_t iwl3945_store_measurement(struct device *d,
3315 struct device_attribute *attr,
3316 const char *buf, size_t count)
3317{
3318 struct iwl_priv *priv = dev_get_drvdata(d);
3319 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3320 struct ieee80211_measurement_params params = {
3321 .channel = le16_to_cpu(ctx->active.channel),
3322 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3323 .duration = cpu_to_le16(1),
3324 };
3325 u8 type = IWL_MEASURE_BASIC;
3326 u8 buffer[32];
3327 u8 channel;
3328
3329 if (count) {
3330 char *p = buffer;
3331 strncpy(buffer, buf, min(sizeof(buffer), count));
3332 channel = simple_strtoul(p, NULL, 0);
3333 if (channel)
3334 params.channel = channel;
3335
3336 p = buffer;
3337 while (*p && *p != ' ')
3338 p++;
3339 if (*p)
3340 type = simple_strtoul(p + 1, NULL, 0);
3341 }
3342
3343 IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
3344 "channel %d (for '%s')\n", type, params.channel, buf);
3345 iwl3945_get_measurement(priv, &params, type);
3346
3347 return count;
3348}
3349
3350static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3351 iwl3945_show_measurement, iwl3945_store_measurement);
3352
3353static ssize_t iwl3945_store_retry_rate(struct device *d,
3354 struct device_attribute *attr,
3355 const char *buf, size_t count)
3356{
3357 struct iwl_priv *priv = dev_get_drvdata(d);
3358
3359 priv->retry_rate = simple_strtoul(buf, NULL, 0);
3360 if (priv->retry_rate <= 0)
3361 priv->retry_rate = 1;
3362
3363 return count;
3364}
3365
3366static ssize_t iwl3945_show_retry_rate(struct device *d,
3367 struct device_attribute *attr, char *buf)
3368{
3369 struct iwl_priv *priv = dev_get_drvdata(d);
3370 return sprintf(buf, "%d", priv->retry_rate);
3371}
3372
3373static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
3374 iwl3945_store_retry_rate);
3375
3376
3377static ssize_t iwl3945_show_channels(struct device *d,
3378 struct device_attribute *attr, char *buf)
3379{
3380 /* all this shit doesn't belong into sysfs anyway */
3381 return 0;
3382}
3383
3384static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
3385
3386static ssize_t iwl3945_show_antenna(struct device *d,
3387 struct device_attribute *attr, char *buf)
3388{
3389 struct iwl_priv *priv = dev_get_drvdata(d);
3390
3391 if (!iwl_legacy_is_alive(priv))
3392 return -EAGAIN;
3393
3394 return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
3395}
3396
3397static ssize_t iwl3945_store_antenna(struct device *d,
3398 struct device_attribute *attr,
3399 const char *buf, size_t count)
3400{
3401 struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
3402 int ant;
3403
3404 if (count == 0)
3405 return 0;
3406
3407 if (sscanf(buf, "%1i", &ant) != 1) {
3408 IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
3409 return count;
3410 }
3411
3412 if ((ant >= 0) && (ant <= 2)) {
3413 IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
3414 iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
3415 } else
3416 IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
3417
3418
3419 return count;
3420}
3421
3422static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
3423
3424static ssize_t iwl3945_show_status(struct device *d,
3425 struct device_attribute *attr, char *buf)
3426{
3427 struct iwl_priv *priv = dev_get_drvdata(d);
3428 if (!iwl_legacy_is_alive(priv))
3429 return -EAGAIN;
3430 return sprintf(buf, "0x%08x\n", (int)priv->status);
3431}
3432
3433static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
3434
3435static ssize_t iwl3945_dump_error_log(struct device *d,
3436 struct device_attribute *attr,
3437 const char *buf, size_t count)
3438{
3439 struct iwl_priv *priv = dev_get_drvdata(d);
3440 char *p = (char *)buf;
3441
3442 if (p[0] == '1')
3443 iwl3945_dump_nic_error_log(priv);
3444
3445 return strnlen(buf, count);
3446}
3447
3448static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
3449
3450/*****************************************************************************
3451 *
3452 * driver setup and tear down
3453 *
3454 *****************************************************************************/
3455
3456static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3457{
3458 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3459
3460 init_waitqueue_head(&priv->wait_command_queue);
3461
3462 INIT_WORK(&priv->restart, iwl3945_bg_restart);
3463 INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
3464 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3465 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3466 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3467
3468 iwl_legacy_setup_scan_deferred_work(priv);
3469
3470 iwl3945_hw_setup_deferred_work(priv);
3471
3472 init_timer(&priv->watchdog);
3473 priv->watchdog.data = (unsigned long)priv;
3474 priv->watchdog.function = iwl_legacy_bg_watchdog;
3475
3476 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3477 iwl3945_irq_tasklet, (unsigned long)priv);
3478}
3479
3480static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3481{
3482 iwl3945_hw_cancel_deferred_work(priv);
3483
3484 cancel_delayed_work_sync(&priv->init_alive_start);
3485 cancel_delayed_work(&priv->alive_start);
3486
3487 iwl_legacy_cancel_scan_deferred_work(priv);
3488}
3489
3490static struct attribute *iwl3945_sysfs_entries[] = {
3491 &dev_attr_antenna.attr,
3492 &dev_attr_channels.attr,
3493 &dev_attr_dump_errors.attr,
3494 &dev_attr_flags.attr,
3495 &dev_attr_filter_flags.attr,
3496 &dev_attr_measurement.attr,
3497 &dev_attr_retry_rate.attr,
3498 &dev_attr_status.attr,
3499 &dev_attr_temperature.attr,
3500 &dev_attr_tx_power.attr,
3501#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3502 &dev_attr_debug_level.attr,
3503#endif
3504 NULL
3505};
3506
3507static struct attribute_group iwl3945_attribute_group = {
3508 .name = NULL, /* put in device directory */
3509 .attrs = iwl3945_sysfs_entries,
3510};
3511
3512struct ieee80211_ops iwl3945_hw_ops = {
3513 .tx = iwl3945_mac_tx,
3514 .start = iwl3945_mac_start,
3515 .stop = iwl3945_mac_stop,
3516 .add_interface = iwl_legacy_mac_add_interface,
3517 .remove_interface = iwl_legacy_mac_remove_interface,
3518 .change_interface = iwl_legacy_mac_change_interface,
3519 .config = iwl_legacy_mac_config,
3520 .configure_filter = iwl3945_configure_filter,
3521 .set_key = iwl3945_mac_set_key,
3522 .conf_tx = iwl_legacy_mac_conf_tx,
3523 .reset_tsf = iwl_legacy_mac_reset_tsf,
3524 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3525 .hw_scan = iwl_legacy_mac_hw_scan,
3526 .sta_add = iwl3945_mac_sta_add,
3527 .sta_remove = iwl_legacy_mac_sta_remove,
3528 .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
3529};
3530
3531static int iwl3945_init_drv(struct iwl_priv *priv)
3532{
3533 int ret;
3534 struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3535
3536 priv->retry_rate = 1;
3537 priv->beacon_skb = NULL;
3538
3539 spin_lock_init(&priv->sta_lock);
3540 spin_lock_init(&priv->hcmd_lock);
3541
3542 INIT_LIST_HEAD(&priv->free_frames);
3543
3544 mutex_init(&priv->mutex);
3545
3546 priv->ieee_channels = NULL;
3547 priv->ieee_rates = NULL;
3548 priv->band = IEEE80211_BAND_2GHZ;
3549
3550 priv->iw_mode = NL80211_IFTYPE_STATION;
3551 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3552
3553 /* initialize force reset */
3554 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3555
3556 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3557 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3558 eeprom->version);
3559 ret = -EINVAL;
3560 goto err;
3561 }
3562 ret = iwl_legacy_init_channel_map(priv);
3563 if (ret) {
3564 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3565 goto err;
3566 }
3567
3568 /* Set up txpower settings in driver for all channels */
3569 if (iwl3945_txpower_set_from_eeprom(priv)) {
3570 ret = -EIO;
3571 goto err_free_channel_map;
3572 }
3573
3574 ret = iwl_legacy_init_geos(priv);
3575 if (ret) {
3576 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3577 goto err_free_channel_map;
3578 }
3579 iwl3945_init_hw_rates(priv, priv->ieee_rates);
3580
3581 return 0;
3582
3583err_free_channel_map:
3584 iwl_legacy_free_channel_map(priv);
3585err:
3586 return ret;
3587}
3588
3589#define IWL3945_MAX_PROBE_REQUEST 200
3590
3591static int iwl3945_setup_mac(struct iwl_priv *priv)
3592{
3593 int ret;
3594 struct ieee80211_hw *hw = priv->hw;
3595
3596 hw->rate_control_algorithm = "iwl-3945-rs";
3597 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3598 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3599
3600 /* Tell mac80211 our characteristics */
3601 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3602 IEEE80211_HW_SPECTRUM_MGMT;
3603
3604 hw->wiphy->interface_modes =
3605 priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
3606
3607 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
3608 WIPHY_FLAG_DISABLE_BEACON_HINTS |
3609 WIPHY_FLAG_IBSS_RSN;
3610
3611 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3612 /* we create the 802.11 header and a zero-length SSID element */
3613 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3614
3615 /* Default value; 4 EDCA QOS priorities */
3616 hw->queues = 4;
3617
3618 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
3619 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3620 &priv->bands[IEEE80211_BAND_2GHZ];
3621
3622 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
3623 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
3624 &priv->bands[IEEE80211_BAND_5GHZ];
3625
3626 iwl_legacy_leds_init(priv);
3627
3628 ret = ieee80211_register_hw(priv->hw);
3629 if (ret) {
3630 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
3631 return ret;
3632 }
3633 priv->mac80211_registered = 1;
3634
3635 return 0;
3636}
3637
3638static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3639{
3640 int err = 0, i;
3641 struct iwl_priv *priv;
3642 struct ieee80211_hw *hw;
3643 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
3644 struct iwl3945_eeprom *eeprom;
3645 unsigned long flags;
3646
3647 /***********************
3648 * 1. Allocating HW data
3649 * ********************/
3650
3651 /* mac80211 allocates memory for this device instance, including
3652 * space for this driver's private structure */
3653 hw = iwl_legacy_alloc_all(cfg);
3654 if (hw == NULL) {
3655 pr_err("Can not allocate network device\n");
3656 err = -ENOMEM;
3657 goto out;
3658 }
3659 priv = hw->priv;
3660 SET_IEEE80211_DEV(hw, &pdev->dev);
3661
3662 priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
3663
3664 /* 3945 has only one valid context */
3665 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3666
3667 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3668 priv->contexts[i].ctxid = i;
3669
3670 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3671 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3672 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3673 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3674 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3675 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3676 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3677 BIT(NL80211_IFTYPE_STATION) |
3678 BIT(NL80211_IFTYPE_ADHOC);
3679 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3680 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3681 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3682
3683 /*
3684 * Disabling hardware scan means that mac80211 will perform scans
3685 * "the hard way", rather than using device's scan.
3686 */
3687 if (iwl3945_mod_params.disable_hw_scan) {
3688 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
3689 iwl3945_hw_ops.hw_scan = NULL;
3690 }
3691
3692 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3693 priv->cfg = cfg;
3694 priv->pci_dev = pdev;
3695 priv->inta_mask = CSR_INI_SET_MASK;
3696
3697 if (iwl_legacy_alloc_traffic_mem(priv))
3698 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3699
3700 /***************************
3701 * 2. Initializing PCI bus
3702 * *************************/
3703 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3704 PCIE_LINK_STATE_CLKPM);
3705
3706 if (pci_enable_device(pdev)) {
3707 err = -ENODEV;
3708 goto out_ieee80211_free_hw;
3709 }
3710
3711 pci_set_master(pdev);
3712
3713 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3714 if (!err)
3715 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3716 if (err) {
3717 IWL_WARN(priv, "No suitable DMA available.\n");
3718 goto out_pci_disable_device;
3719 }
3720
3721 pci_set_drvdata(pdev, priv);
3722 err = pci_request_regions(pdev, DRV_NAME);
3723 if (err)
3724 goto out_pci_disable_device;
3725
3726 /***********************
3727 * 3. Read REV Register
3728 * ********************/
3729 priv->hw_base = pci_iomap(pdev, 0, 0);
3730 if (!priv->hw_base) {
3731 err = -ENODEV;
3732 goto out_pci_release_regions;
3733 }
3734
3735 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3736 (unsigned long long) pci_resource_len(pdev, 0));
3737 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3738
3739 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3740 * PCI Tx retries from interfering with C3 CPU state */
3741 pci_write_config_byte(pdev, 0x41, 0x00);
3742
3743 /* these spin locks will be used in apm_ops.init and EEPROM access
3744 * we should init now
3745 */
3746 spin_lock_init(&priv->reg_lock);
3747 spin_lock_init(&priv->lock);
3748
3749 /*
3750 * stop and reset the on-board processor just in case it is in a
3751 * strange state ... like being left stranded by a primary kernel
3752 * and this is now the kdump kernel trying to start up
3753 */
3754 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3755
3756 /***********************
3757 * 4. Read EEPROM
3758 * ********************/
3759
3760 /* Read the EEPROM */
3761 err = iwl_legacy_eeprom_init(priv);
3762 if (err) {
3763 IWL_ERR(priv, "Unable to init EEPROM\n");
3764 goto out_iounmap;
3765 }
3766 /* MAC Address location in EEPROM same for 3945/4965 */
3767 eeprom = (struct iwl3945_eeprom *)priv->eeprom;
3768 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
3769 SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
3770
3771 /***********************
3772 * 5. Setup HW Constants
3773 * ********************/
3774 /* Device-specific setup */
3775 if (iwl3945_hw_set_hw_params(priv)) {
3776 IWL_ERR(priv, "failed to set hw settings\n");
3777 goto out_eeprom_free;
3778 }
3779
3780 /***********************
3781 * 6. Setup priv
3782 * ********************/
3783
3784 err = iwl3945_init_drv(priv);
3785 if (err) {
3786 IWL_ERR(priv, "initializing driver failed\n");
3787 goto out_unset_hw_params;
3788 }
3789
3790 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
3791 priv->cfg->name);
3792
3793 /***********************
3794 * 7. Setup Services
3795 * ********************/
3796
3797 spin_lock_irqsave(&priv->lock, flags);
3798 iwl_legacy_disable_interrupts(priv);
3799 spin_unlock_irqrestore(&priv->lock, flags);
3800
3801 pci_enable_msi(priv->pci_dev);
3802
3803 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3804 IRQF_SHARED, DRV_NAME, priv);
3805 if (err) {
3806 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3807 goto out_disable_msi;
3808 }
3809
3810 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3811 if (err) {
3812 IWL_ERR(priv, "failed to create sysfs device attributes\n");
3813 goto out_release_irq;
3814 }
3815
3816 iwl_legacy_set_rxon_channel(priv,
3817 &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
3818 &priv->contexts[IWL_RXON_CTX_BSS]);
3819 iwl3945_setup_deferred_work(priv);
3820 iwl3945_setup_rx_handlers(priv);
3821 iwl_legacy_power_initialize(priv);
3822
3823 /*********************************
3824 * 8. Setup and Register mac80211
3825 * *******************************/
3826
3827 iwl_legacy_enable_interrupts(priv);
3828
3829 err = iwl3945_setup_mac(priv);
3830 if (err)
3831 goto out_remove_sysfs;
3832
3833 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
3834 if (err)
3835 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
3836
3837 /* Start monitoring the killswitch */
3838 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3839 2 * HZ);
3840
3841 return 0;
3842
3843 out_remove_sysfs:
3844 destroy_workqueue(priv->workqueue);
3845 priv->workqueue = NULL;
3846 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3847 out_release_irq:
3848 free_irq(priv->pci_dev->irq, priv);
3849 out_disable_msi:
3850 pci_disable_msi(priv->pci_dev);
3851 iwl_legacy_free_geos(priv);
3852 iwl_legacy_free_channel_map(priv);
3853 out_unset_hw_params:
3854 iwl3945_unset_hw_params(priv);
3855 out_eeprom_free:
3856 iwl_legacy_eeprom_free(priv);
3857 out_iounmap:
3858 pci_iounmap(pdev, priv->hw_base);
3859 out_pci_release_regions:
3860 pci_release_regions(pdev);
3861 out_pci_disable_device:
3862 pci_set_drvdata(pdev, NULL);
3863 pci_disable_device(pdev);
3864 out_ieee80211_free_hw:
3865 iwl_legacy_free_traffic_mem(priv);
3866 ieee80211_free_hw(priv->hw);
3867 out:
3868 return err;
3869}
3870
3871static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
3872{
3873 struct iwl_priv *priv = pci_get_drvdata(pdev);
3874 unsigned long flags;
3875
3876 if (!priv)
3877 return;
3878
3879 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3880
3881 iwl_legacy_dbgfs_unregister(priv);
3882
3883 set_bit(STATUS_EXIT_PENDING, &priv->status);
3884
3885 iwl_legacy_leds_exit(priv);
3886
3887 if (priv->mac80211_registered) {
3888 ieee80211_unregister_hw(priv->hw);
3889 priv->mac80211_registered = 0;
3890 } else {
3891 iwl3945_down(priv);
3892 }
3893
3894 /*
3895 * Make sure device is reset to low power before unloading driver.
3896 * This may be redundant with iwl_down(), but there are paths to
3897 * run iwl_down() without calling apm_ops.stop(), and there are
3898 * paths to avoid running iwl_down() at all before leaving driver.
3899 * This (inexpensive) call *makes sure* device is reset.
3900 */
3901 iwl_legacy_apm_stop(priv);
3902
3903 /* make sure we flush any pending irq or
3904 * tasklet for the driver
3905 */
3906 spin_lock_irqsave(&priv->lock, flags);
3907 iwl_legacy_disable_interrupts(priv);
3908 spin_unlock_irqrestore(&priv->lock, flags);
3909
3910 iwl3945_synchronize_irq(priv);
3911
3912 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
3913
3914 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
3915
3916 iwl3945_dealloc_ucode_pci(priv);
3917
3918 if (priv->rxq.bd)
3919 iwl3945_rx_queue_free(priv, &priv->rxq);
3920 iwl3945_hw_txq_ctx_free(priv);
3921
3922 iwl3945_unset_hw_params(priv);
3923
3924 /*netif_stop_queue(dev); */
3925 flush_workqueue(priv->workqueue);
3926
3927 /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
3928 * priv->workqueue... so we can't take down the workqueue
3929 * until now... */
3930 destroy_workqueue(priv->workqueue);
3931 priv->workqueue = NULL;
3932 iwl_legacy_free_traffic_mem(priv);
3933
3934 free_irq(pdev->irq, priv);
3935 pci_disable_msi(pdev);
3936
3937 pci_iounmap(pdev, priv->hw_base);
3938 pci_release_regions(pdev);
3939 pci_disable_device(pdev);
3940 pci_set_drvdata(pdev, NULL);
3941
3942 iwl_legacy_free_channel_map(priv);
3943 iwl_legacy_free_geos(priv);
3944 kfree(priv->scan_cmd);
3945 if (priv->beacon_skb)
3946 dev_kfree_skb(priv->beacon_skb);
3947
3948 ieee80211_free_hw(priv->hw);
3949}
3950
3951
3952/*****************************************************************************
3953 *
3954 * driver and module entry point
3955 *
3956 *****************************************************************************/
3957
3958static struct pci_driver iwl3945_driver = {
3959 .name = DRV_NAME,
3960 .id_table = iwl3945_hw_card_ids,
3961 .probe = iwl3945_pci_probe,
3962 .remove = __devexit_p(iwl3945_pci_remove),
3963 .driver.pm = IWL_LEGACY_PM_OPS,
3964};
3965
3966static int __init iwl3945_init(void)
3967{
3968
3969 int ret;
3970 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3971 pr_info(DRV_COPYRIGHT "\n");
3972
3973 ret = iwl3945_rate_control_register();
3974 if (ret) {
3975 pr_err("Unable to register rate control algorithm: %d\n", ret);
3976 return ret;
3977 }
3978
3979 ret = pci_register_driver(&iwl3945_driver);
3980 if (ret) {
3981 pr_err("Unable to initialize PCI module\n");
3982 goto error_register;
3983 }
3984
3985 return ret;
3986
3987error_register:
3988 iwl3945_rate_control_unregister();
3989 return ret;
3990}
3991
3992static void __exit iwl3945_exit(void)
3993{
3994 pci_unregister_driver(&iwl3945_driver);
3995 iwl3945_rate_control_unregister();
3996}
3997
3998MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
3999
4000module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4001MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4002module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4003MODULE_PARM_DESC(swcrypto,
4004 "using software crypto (default 1 [software])");
4005module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4006 int, S_IRUGO);
4007MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
4008#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
4009module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
4010MODULE_PARM_DESC(debug, "debug output mask");
4011#endif
4012module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4013MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4014
4015module_exit(iwl3945_exit);
4016module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9eae153..000000000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/pci.h>
36#include <linux/pci-aspm.h>
37#include <linux/slab.h>
38#include <linux/dma-mapping.h>
39#include <linux/delay.h>
40#include <linux/sched.h>
41#include <linux/skbuff.h>
42#include <linux/netdevice.h>
43#include <linux/firmware.h>
44#include <linux/etherdevice.h>
45#include <linux/if_arp.h>
46
47#include <net/mac80211.h>
48
49#include <asm/div64.h>
50
51#define DRV_NAME "iwl4965"
52
53#include "iwl-eeprom.h"
54#include "iwl-dev.h"
55#include "iwl-core.h"
56#include "iwl-io.h"
57#include "iwl-helpers.h"
58#include "iwl-sta.h"
59#include "iwl-4965-calib.h"
60#include "iwl-4965.h"
61#include "iwl-4965-led.h"
62
63
64/******************************************************************************
65 *
66 * module boiler plate
67 *
68 ******************************************************************************/
69
70/*
71 * module name, copyright, version, etc.
72 */
73#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
74
75#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
76#define VD "d"
77#else
78#define VD
79#endif
80
81#define DRV_VERSION IWLWIFI_VERSION VD
82
83
84MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION);
86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87MODULE_LICENSE("GPL");
88MODULE_ALIAS("iwl4965");
89
90void iwl4965_update_chain_flags(struct iwl_priv *priv)
91{
92 struct iwl_rxon_context *ctx;
93
94 if (priv->cfg->ops->hcmd->set_rxon_chain) {
95 for_each_context(priv, ctx) {
96 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
97 if (ctx->active.rx_chain != ctx->staging.rx_chain)
98 iwl_legacy_commit_rxon(priv, ctx);
99 }
100 }
101}
102
103static void iwl4965_clear_free_frames(struct iwl_priv *priv)
104{
105 struct list_head *element;
106
107 IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
108 priv->frames_count);
109
110 while (!list_empty(&priv->free_frames)) {
111 element = priv->free_frames.next;
112 list_del(element);
113 kfree(list_entry(element, struct iwl_frame, list));
114 priv->frames_count--;
115 }
116
117 if (priv->frames_count) {
118 IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
119 priv->frames_count);
120 priv->frames_count = 0;
121 }
122}
123
124static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
125{
126 struct iwl_frame *frame;
127 struct list_head *element;
128 if (list_empty(&priv->free_frames)) {
129 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
130 if (!frame) {
131 IWL_ERR(priv, "Could not allocate frame!\n");
132 return NULL;
133 }
134
135 priv->frames_count++;
136 return frame;
137 }
138
139 element = priv->free_frames.next;
140 list_del(element);
141 return list_entry(element, struct iwl_frame, list);
142}
143
144static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
145{
146 memset(frame, 0, sizeof(*frame));
147 list_add(&frame->list, &priv->free_frames);
148}
149
150static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
151 struct ieee80211_hdr *hdr,
152 int left)
153{
154 lockdep_assert_held(&priv->mutex);
155
156 if (!priv->beacon_skb)
157 return 0;
158
159 if (priv->beacon_skb->len > left)
160 return 0;
161
162 memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
163
164 return priv->beacon_skb->len;
165}
166
167/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
168static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
169 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
170 u8 *beacon, u32 frame_size)
171{
172 u16 tim_idx;
173 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
174
175 /*
176 * The index is relative to frame start but we start looking at the
177 * variable-length part of the beacon.
178 */
179 tim_idx = mgmt->u.beacon.variable - beacon;
180
181 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
182 while ((tim_idx < (frame_size - 2)) &&
183 (beacon[tim_idx] != WLAN_EID_TIM))
184 tim_idx += beacon[tim_idx+1] + 2;
185
186 /* If TIM field was found, set variables */
187 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
188 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
189 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
190 } else
191 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
192}
193
194static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
195 struct iwl_frame *frame)
196{
197 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
198 u32 frame_size;
199 u32 rate_flags;
200 u32 rate;
201 /*
202 * We have to set up the TX command, the TX Beacon command, and the
203 * beacon contents.
204 */
205
206 lockdep_assert_held(&priv->mutex);
207
208 if (!priv->beacon_ctx) {
209 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
210 return 0;
211 }
212
213 /* Initialize memory */
214 tx_beacon_cmd = &frame->u.beacon;
215 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
216
217 /* Set up TX beacon contents */
218 frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
219 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
220 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
221 return 0;
222 if (!frame_size)
223 return 0;
224
225 /* Set up TX command fields */
226 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
227 tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
228 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
229 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
230 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
231
232 /* Set up TX beacon command fields */
233 iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
234 frame_size);
235
236 /* Set up packet rate and flags */
237 rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
238 priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
239 priv->hw_params.valid_tx_ant);
240 rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
241 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
242 rate_flags |= RATE_MCS_CCK_MSK;
243 tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
244 rate_flags);
245
246 return sizeof(*tx_beacon_cmd) + frame_size;
247}
248
249int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
250{
251 struct iwl_frame *frame;
252 unsigned int frame_size;
253 int rc;
254
255 frame = iwl4965_get_free_frame(priv);
256 if (!frame) {
257 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
258 "command.\n");
259 return -ENOMEM;
260 }
261
262 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
263 if (!frame_size) {
264 IWL_ERR(priv, "Error configuring the beacon command\n");
265 iwl4965_free_frame(priv, frame);
266 return -EINVAL;
267 }
268
269 rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
270 &frame->u.cmd[0]);
271
272 iwl4965_free_frame(priv, frame);
273
274 return rc;
275}
276
277static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
278{
279 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
280
281 dma_addr_t addr = get_unaligned_le32(&tb->lo);
282 if (sizeof(dma_addr_t) > sizeof(u32))
283 addr |=
284 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
285
286 return addr;
287}
288
289static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
290{
291 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
292
293 return le16_to_cpu(tb->hi_n_len) >> 4;
294}
295
296static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
297 dma_addr_t addr, u16 len)
298{
299 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
300 u16 hi_n_len = len << 4;
301
302 put_unaligned_le32(addr, &tb->lo);
303 if (sizeof(dma_addr_t) > sizeof(u32))
304 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
305
306 tb->hi_n_len = cpu_to_le16(hi_n_len);
307
308 tfd->num_tbs = idx + 1;
309}
310
311static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
312{
313 return tfd->num_tbs & 0x1f;
314}
315
316/**
317 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
318 * @priv - driver private data
319 * @txq - tx queue
320 *
321 * Does NOT advance any TFD circular buffer read/write indexes
322 * Does NOT free the TFD itself (which is within circular buffer)
323 */
324void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
325{
326 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
327 struct iwl_tfd *tfd;
328 struct pci_dev *dev = priv->pci_dev;
329 int index = txq->q.read_ptr;
330 int i;
331 int num_tbs;
332
333 tfd = &tfd_tmp[index];
334
335 /* Sanity check on number of chunks */
336 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
337
338 if (num_tbs >= IWL_NUM_OF_TBS) {
339 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
340 /* @todo issue fatal error, it is quite serious situation */
341 return;
342 }
343
344 /* Unmap tx_cmd */
345 if (num_tbs)
346 pci_unmap_single(dev,
347 dma_unmap_addr(&txq->meta[index], mapping),
348 dma_unmap_len(&txq->meta[index], len),
349 PCI_DMA_BIDIRECTIONAL);
350
351 /* Unmap chunks, if any. */
352 for (i = 1; i < num_tbs; i++)
353 pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
354 iwl4965_tfd_tb_get_len(tfd, i),
355 PCI_DMA_TODEVICE);
356
357 /* free SKB */
358 if (txq->txb) {
359 struct sk_buff *skb;
360
361 skb = txq->txb[txq->q.read_ptr].skb;
362
363 /* can be called from irqs-disabled context */
364 if (skb) {
365 dev_kfree_skb_any(skb);
366 txq->txb[txq->q.read_ptr].skb = NULL;
367 }
368 }
369}
370
371int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
372 struct iwl_tx_queue *txq,
373 dma_addr_t addr, u16 len,
374 u8 reset, u8 pad)
375{
376 struct iwl_queue *q;
377 struct iwl_tfd *tfd, *tfd_tmp;
378 u32 num_tbs;
379
380 q = &txq->q;
381 tfd_tmp = (struct iwl_tfd *)txq->tfds;
382 tfd = &tfd_tmp[q->write_ptr];
383
384 if (reset)
385 memset(tfd, 0, sizeof(*tfd));
386
387 num_tbs = iwl4965_tfd_get_num_tbs(tfd);
388
389 /* Each TFD can point to a maximum 20 Tx buffers */
390 if (num_tbs >= IWL_NUM_OF_TBS) {
391 IWL_ERR(priv, "Error can not send more than %d chunks\n",
392 IWL_NUM_OF_TBS);
393 return -EINVAL;
394 }
395
396 BUG_ON(addr & ~DMA_BIT_MASK(36));
397 if (unlikely(addr & ~IWL_TX_DMA_MASK))
398 IWL_ERR(priv, "Unaligned address = %llx\n",
399 (unsigned long long)addr);
400
401 iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
402
403 return 0;
404}
405
406/*
407 * Tell nic where to find circular buffer of Tx Frame Descriptors for
408 * given Tx queue, and enable the DMA channel used for that queue.
409 *
410 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
411 * channels supported in hardware.
412 */
413int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
414 struct iwl_tx_queue *txq)
415{
416 int txq_id = txq->q.id;
417
418 /* Circular buffer (TFD queue in DRAM) physical base address */
419 iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
420 txq->q.dma_addr >> 8);
421
422 return 0;
423}
424
425/******************************************************************************
426 *
427 * Generic RX handler implementations
428 *
429 ******************************************************************************/
430static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
431 struct iwl_rx_mem_buffer *rxb)
432{
433 struct iwl_rx_packet *pkt = rxb_addr(rxb);
434 struct iwl_alive_resp *palive;
435 struct delayed_work *pwork;
436
437 palive = &pkt->u.alive_frame;
438
439 IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
440 "0x%01X 0x%01X\n",
441 palive->is_valid, palive->ver_type,
442 palive->ver_subtype);
443
444 if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
445 IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
446 memcpy(&priv->card_alive_init,
447 &pkt->u.alive_frame,
448 sizeof(struct iwl_init_alive_resp));
449 pwork = &priv->init_alive_start;
450 } else {
451 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
452 memcpy(&priv->card_alive, &pkt->u.alive_frame,
453 sizeof(struct iwl_alive_resp));
454 pwork = &priv->alive_start;
455 }
456
457 /* We delay the ALIVE response by 5ms to
458 * give the HW RF Kill time to activate... */
459 if (palive->is_valid == UCODE_VALID_OK)
460 queue_delayed_work(priv->workqueue, pwork,
461 msecs_to_jiffies(5));
462 else
463 IWL_WARN(priv, "uCode did not respond OK.\n");
464}
465
466/**
467 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
468 *
469 * This callback is provided in order to send a statistics request.
470 *
471 * This timer function is continually reset to execute within
472 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
473 * was received. We need to ensure we receive the statistics in order
474 * to update the temperature used for calibrating the TXPOWER.
475 */
476static void iwl4965_bg_statistics_periodic(unsigned long data)
477{
478 struct iwl_priv *priv = (struct iwl_priv *)data;
479
480 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
481 return;
482
483 /* dont send host command if rf-kill is on */
484 if (!iwl_legacy_is_ready_rf(priv))
485 return;
486
487 iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
488}
489
490static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb)
492{
493 struct iwl_rx_packet *pkt = rxb_addr(rxb);
494 struct iwl4965_beacon_notif *beacon =
495 (struct iwl4965_beacon_notif *)pkt->u.raw;
496#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
497 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
498
499 IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
500 "tsf %d %d rate %d\n",
501 le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
502 beacon->beacon_notify_hdr.failure_frame,
503 le32_to_cpu(beacon->ibss_mgr_status),
504 le32_to_cpu(beacon->high_tsf),
505 le32_to_cpu(beacon->low_tsf), rate);
506#endif
507
508 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
509}
510
511static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
512{
513 unsigned long flags;
514
515 IWL_DEBUG_POWER(priv, "Stop all queues\n");
516
517 if (priv->mac80211_registered)
518 ieee80211_stop_queues(priv->hw);
519
520 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
521 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
522 iwl_read32(priv, CSR_UCODE_DRV_GP1);
523
524 spin_lock_irqsave(&priv->reg_lock, flags);
525 if (!iwl_grab_nic_access(priv))
526 iwl_release_nic_access(priv);
527 spin_unlock_irqrestore(&priv->reg_lock, flags);
528}
529
530/* Handle notification from uCode that card's power state is changing
531 * due to software, hardware, or critical temperature RFKILL */
532static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
533 struct iwl_rx_mem_buffer *rxb)
534{
535 struct iwl_rx_packet *pkt = rxb_addr(rxb);
536 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
537 unsigned long status = priv->status;
538
539 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
540 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
541 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
542 (flags & CT_CARD_DISABLED) ?
543 "Reached" : "Not reached");
544
545 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
546 CT_CARD_DISABLED)) {
547
548 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
549 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
550
551 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
552 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
553
554 if (!(flags & RXON_CARD_DISABLED)) {
555 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
556 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
557 iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
558 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
559 }
560 }
561
562 if (flags & CT_CARD_DISABLED)
563 iwl4965_perform_ct_kill_task(priv);
564
565 if (flags & HW_CARD_DISABLED)
566 set_bit(STATUS_RF_KILL_HW, &priv->status);
567 else
568 clear_bit(STATUS_RF_KILL_HW, &priv->status);
569
570 if (!(flags & RXON_CARD_DISABLED))
571 iwl_legacy_scan_cancel(priv);
572
573 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
574 test_bit(STATUS_RF_KILL_HW, &priv->status)))
575 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
576 test_bit(STATUS_RF_KILL_HW, &priv->status));
577 else
578 wake_up(&priv->wait_command_queue);
579}
580
581/**
582 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
583 *
584 * Setup the RX handlers for each of the reply types sent from the uCode
585 * to the host.
586 *
587 * This function chains into the hardware specific files for them to setup
588 * any hardware specific handlers as well.
589 */
590static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
591{
592 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
593 priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
594 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
595 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
596 iwl_legacy_rx_spectrum_measure_notif;
597 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
598 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
599 iwl_legacy_rx_pm_debug_statistics_notif;
600 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
601
602 /*
603 * The same handler is used for both the REPLY to a discrete
604 * statistics request from the host as well as for the periodic
605 * statistics notifications (after received beacons) from the uCode.
606 */
607 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
608 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
609
610 iwl_legacy_setup_rx_scan_handlers(priv);
611
612 /* status change handler */
613 priv->rx_handlers[CARD_STATE_NOTIFICATION] =
614 iwl4965_rx_card_state_notif;
615
616 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
617 iwl4965_rx_missed_beacon_notif;
618 /* Rx handlers */
619 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
620 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
621 /* block ack */
622 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
623 /* Set up hardware specific Rx handlers */
624 priv->cfg->ops->lib->rx_handler_setup(priv);
625}
626
627/**
628 * iwl4965_rx_handle - Main entry function for receiving responses from uCode
629 *
630 * Uses the priv->rx_handlers callback function array to invoke
631 * the appropriate handlers, including command responses,
632 * frame-received notifications, and other notifications.
633 */
634void iwl4965_rx_handle(struct iwl_priv *priv)
635{
636 struct iwl_rx_mem_buffer *rxb;
637 struct iwl_rx_packet *pkt;
638 struct iwl_rx_queue *rxq = &priv->rxq;
639 u32 r, i;
640 int reclaim;
641 unsigned long flags;
642 u8 fill_rx = 0;
643 u32 count = 8;
644 int total_empty;
645
646 /* uCode's read index (stored in shared DRAM) indicates the last Rx
647 * buffer that the driver may process (last buffer filled by ucode). */
648 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
649 i = rxq->read;
650
651 /* Rx interrupt, but nothing sent from uCode */
652 if (i == r)
653 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
654
655 /* calculate total frames need to be restock after handling RX */
656 total_empty = r - rxq->write_actual;
657 if (total_empty < 0)
658 total_empty += RX_QUEUE_SIZE;
659
660 if (total_empty > (RX_QUEUE_SIZE / 2))
661 fill_rx = 1;
662
663 while (i != r) {
664 int len;
665
666 rxb = rxq->queue[i];
667
668 /* If an RXB doesn't have a Rx queue slot associated with it,
669 * then a bug has been introduced in the queue refilling
670 * routines -- catch it here */
671 BUG_ON(rxb == NULL);
672
673 rxq->queue[i] = NULL;
674
675 pci_unmap_page(priv->pci_dev, rxb->page_dma,
676 PAGE_SIZE << priv->hw_params.rx_page_order,
677 PCI_DMA_FROMDEVICE);
678 pkt = rxb_addr(rxb);
679
680 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
681 len += sizeof(u32); /* account for status word */
682 trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
683
684 /* Reclaim a command buffer only if this packet is a response
685 * to a (driver-originated) command.
686 * If the packet (e.g. Rx frame) originated from uCode,
687 * there is no command buffer to reclaim.
688 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
689 * but apparently a few don't get set; catch them here. */
690 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
691 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
692 (pkt->hdr.cmd != REPLY_RX) &&
693 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
694 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
695 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
696 (pkt->hdr.cmd != REPLY_TX);
697
698 /* Based on type of command response or notification,
699 * handle those that need handling via function in
700 * rx_handlers table. See iwl4965_setup_rx_handlers() */
701 if (priv->rx_handlers[pkt->hdr.cmd]) {
702 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
703 i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
704 pkt->hdr.cmd);
705 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
706 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
707 } else {
708 /* No handling needed */
709 IWL_DEBUG_RX(priv,
710 "r %d i %d No handler needed for %s, 0x%02x\n",
711 r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
712 pkt->hdr.cmd);
713 }
714
715 /*
716 * XXX: After here, we should always check rxb->page
717 * against NULL before touching it or its virtual
718 * memory (pkt). Because some rx_handler might have
719 * already taken or freed the pages.
720 */
721
722 if (reclaim) {
723 /* Invoke any callbacks, transfer the buffer to caller,
724 * and fire off the (possibly) blocking iwl_legacy_send_cmd()
725 * as we reclaim the driver command queue */
726 if (rxb->page)
727 iwl_legacy_tx_cmd_complete(priv, rxb);
728 else
729 IWL_WARN(priv, "Claim null rxb?\n");
730 }
731
732 /* Reuse the page if possible. For notification packets and
733 * SKBs that fail to Rx correctly, add them back into the
734 * rx_free list for reuse later. */
735 spin_lock_irqsave(&rxq->lock, flags);
736 if (rxb->page != NULL) {
737 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
738 0, PAGE_SIZE << priv->hw_params.rx_page_order,
739 PCI_DMA_FROMDEVICE);
740 list_add_tail(&rxb->list, &rxq->rx_free);
741 rxq->free_count++;
742 } else
743 list_add_tail(&rxb->list, &rxq->rx_used);
744
745 spin_unlock_irqrestore(&rxq->lock, flags);
746
747 i = (i + 1) & RX_QUEUE_MASK;
748 /* If there are a lot of unused frames,
749 * restock the Rx queue so ucode wont assert. */
750 if (fill_rx) {
751 count++;
752 if (count >= 8) {
753 rxq->read = i;
754 iwl4965_rx_replenish_now(priv);
755 count = 0;
756 }
757 }
758 }
759
760 /* Backtrack one entry */
761 rxq->read = i;
762 if (fill_rx)
763 iwl4965_rx_replenish_now(priv);
764 else
765 iwl4965_rx_queue_restock(priv);
766}
767
768/* call this function to flush any scheduled tasklet */
769static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
770{
771 /* wait to make sure we flush pending tasklet*/
772 synchronize_irq(priv->pci_dev->irq);
773 tasklet_kill(&priv->irq_tasklet);
774}
775
776static void iwl4965_irq_tasklet(struct iwl_priv *priv)
777{
778 u32 inta, handled = 0;
779 u32 inta_fh;
780 unsigned long flags;
781 u32 i;
782#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
783 u32 inta_mask;
784#endif
785
786 spin_lock_irqsave(&priv->lock, flags);
787
788 /* Ack/clear/reset pending uCode interrupts.
789 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
790 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
791 inta = iwl_read32(priv, CSR_INT);
792 iwl_write32(priv, CSR_INT, inta);
793
794 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
795 * Any new interrupts that happen after this, either while we're
796 * in this tasklet, or later, will show up in next ISR/tasklet. */
797 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
798 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
799
800#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
801 if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
802 /* just for debug */
803 inta_mask = iwl_read32(priv, CSR_INT_MASK);
804 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
805 inta, inta_mask, inta_fh);
806 }
807#endif
808
809 spin_unlock_irqrestore(&priv->lock, flags);
810
811 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
812 * atomic, make sure that inta covers all the interrupts that
813 * we've discovered, even if FH interrupt came in just after
814 * reading CSR_INT. */
815 if (inta_fh & CSR49_FH_INT_RX_MASK)
816 inta |= CSR_INT_BIT_FH_RX;
817 if (inta_fh & CSR49_FH_INT_TX_MASK)
818 inta |= CSR_INT_BIT_FH_TX;
819
820 /* Now service all interrupt bits discovered above. */
821 if (inta & CSR_INT_BIT_HW_ERR) {
822 IWL_ERR(priv, "Hardware error detected. Restarting.\n");
823
824 /* Tell the device to stop sending interrupts */
825 iwl_legacy_disable_interrupts(priv);
826
827 priv->isr_stats.hw++;
828 iwl_legacy_irq_handle_error(priv);
829
830 handled |= CSR_INT_BIT_HW_ERR;
831
832 return;
833 }
834
835#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
836 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
837 /* NIC fires this, but we don't use it, redundant with WAKEUP */
838 if (inta & CSR_INT_BIT_SCD) {
839 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
840 "the frame/frames.\n");
841 priv->isr_stats.sch++;
842 }
843
844 /* Alive notification via Rx interrupt will do the real work */
845 if (inta & CSR_INT_BIT_ALIVE) {
846 IWL_DEBUG_ISR(priv, "Alive interrupt\n");
847 priv->isr_stats.alive++;
848 }
849 }
850#endif
851 /* Safely ignore these bits for debug checks below */
852 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
853
854 /* HW RF KILL switch toggled */
855 if (inta & CSR_INT_BIT_RF_KILL) {
856 int hw_rf_kill = 0;
857 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
858 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
859 hw_rf_kill = 1;
860
861 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
862 hw_rf_kill ? "disable radio" : "enable radio");
863
864 priv->isr_stats.rfkill++;
865
866 /* driver only loads ucode once setting the interface up.
867 * the driver allows loading the ucode even if the radio
868 * is killed. Hence update the killswitch state here. The
869 * rfkill handler will care about restarting if needed.
870 */
871 if (!test_bit(STATUS_ALIVE, &priv->status)) {
872 if (hw_rf_kill)
873 set_bit(STATUS_RF_KILL_HW, &priv->status);
874 else
875 clear_bit(STATUS_RF_KILL_HW, &priv->status);
876 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
877 }
878
879 handled |= CSR_INT_BIT_RF_KILL;
880 }
881
882 /* Chip got too hot and stopped itself */
883 if (inta & CSR_INT_BIT_CT_KILL) {
884 IWL_ERR(priv, "Microcode CT kill error detected.\n");
885 priv->isr_stats.ctkill++;
886 handled |= CSR_INT_BIT_CT_KILL;
887 }
888
889 /* Error detected by uCode */
890 if (inta & CSR_INT_BIT_SW_ERR) {
891 IWL_ERR(priv, "Microcode SW error detected. "
892 " Restarting 0x%X.\n", inta);
893 priv->isr_stats.sw++;
894 iwl_legacy_irq_handle_error(priv);
895 handled |= CSR_INT_BIT_SW_ERR;
896 }
897
898 /*
899 * uCode wakes up after power-down sleep.
900 * Tell device about any new tx or host commands enqueued,
901 * and about any Rx buffers made available while asleep.
902 */
903 if (inta & CSR_INT_BIT_WAKEUP) {
904 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
905 iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
906 for (i = 0; i < priv->hw_params.max_txq_num; i++)
907 iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
908 priv->isr_stats.wakeup++;
909 handled |= CSR_INT_BIT_WAKEUP;
910 }
911
912 /* All uCode command responses, including Tx command responses,
913 * Rx "responses" (frame-received notification), and other
914 * notifications from uCode come through here*/
915 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
916 iwl4965_rx_handle(priv);
917 priv->isr_stats.rx++;
918 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
919 }
920
921 /* This "Tx" DMA channel is used only for loading uCode */
922 if (inta & CSR_INT_BIT_FH_TX) {
923 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
924 priv->isr_stats.tx++;
925 handled |= CSR_INT_BIT_FH_TX;
926 /* Wake up uCode load routine, now that load is complete */
927 priv->ucode_write_complete = 1;
928 wake_up(&priv->wait_command_queue);
929 }
930
931 if (inta & ~handled) {
932 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
933 priv->isr_stats.unhandled++;
934 }
935
936 if (inta & ~(priv->inta_mask)) {
937 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
938 inta & ~priv->inta_mask);
939 IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
940 }
941
942 /* Re-enable all interrupts */
943 /* only Re-enable if disabled by irq */
944 if (test_bit(STATUS_INT_ENABLED, &priv->status))
945 iwl_legacy_enable_interrupts(priv);
946 /* Re-enable RF_KILL if it occurred */
947 else if (handled & CSR_INT_BIT_RF_KILL)
948 iwl_legacy_enable_rfkill_int(priv);
949
950#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
951 if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
952 inta = iwl_read32(priv, CSR_INT);
953 inta_mask = iwl_read32(priv, CSR_INT_MASK);
954 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
955 IWL_DEBUG_ISR(priv,
956 "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
957 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
958 }
959#endif
960}
961
962/*****************************************************************************
963 *
964 * sysfs attributes
965 *
966 *****************************************************************************/
967
968#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
969
970/*
971 * The following adds a new attribute to the sysfs representation
972 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
973 * used for controlling the debug level.
974 *
975 * See the level definitions in iwl for details.
976 *
977 * The debug_level being managed using sysfs below is a per device debug
978 * level that is used instead of the global debug level if it (the per
979 * device debug level) is set.
980 */
981static ssize_t iwl4965_show_debug_level(struct device *d,
982 struct device_attribute *attr, char *buf)
983{
984 struct iwl_priv *priv = dev_get_drvdata(d);
985 return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
986}
987static ssize_t iwl4965_store_debug_level(struct device *d,
988 struct device_attribute *attr,
989 const char *buf, size_t count)
990{
991 struct iwl_priv *priv = dev_get_drvdata(d);
992 unsigned long val;
993 int ret;
994
995 ret = strict_strtoul(buf, 0, &val);
996 if (ret)
997 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
998 else {
999 priv->debug_level = val;
1000 if (iwl_legacy_alloc_traffic_mem(priv))
1001 IWL_ERR(priv,
1002 "Not enough memory to generate traffic log\n");
1003 }
1004 return strnlen(buf, count);
1005}
1006
1007static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
1008 iwl4965_show_debug_level, iwl4965_store_debug_level);
1009
1010
1011#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
1012
1013
1014static ssize_t iwl4965_show_temperature(struct device *d,
1015 struct device_attribute *attr, char *buf)
1016{
1017 struct iwl_priv *priv = dev_get_drvdata(d);
1018
1019 if (!iwl_legacy_is_alive(priv))
1020 return -EAGAIN;
1021
1022 return sprintf(buf, "%d\n", priv->temperature);
1023}
1024
1025static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
1026
1027static ssize_t iwl4965_show_tx_power(struct device *d,
1028 struct device_attribute *attr, char *buf)
1029{
1030 struct iwl_priv *priv = dev_get_drvdata(d);
1031
1032 if (!iwl_legacy_is_ready_rf(priv))
1033 return sprintf(buf, "off\n");
1034 else
1035 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
1036}
1037
1038static ssize_t iwl4965_store_tx_power(struct device *d,
1039 struct device_attribute *attr,
1040 const char *buf, size_t count)
1041{
1042 struct iwl_priv *priv = dev_get_drvdata(d);
1043 unsigned long val;
1044 int ret;
1045
1046 ret = strict_strtoul(buf, 10, &val);
1047 if (ret)
1048 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
1049 else {
1050 ret = iwl_legacy_set_tx_power(priv, val, false);
1051 if (ret)
1052 IWL_ERR(priv, "failed setting tx power (0x%d).\n",
1053 ret);
1054 else
1055 ret = count;
1056 }
1057 return ret;
1058}
1059
1060static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
1061 iwl4965_show_tx_power, iwl4965_store_tx_power);
1062
1063static struct attribute *iwl_sysfs_entries[] = {
1064 &dev_attr_temperature.attr,
1065 &dev_attr_tx_power.attr,
1066#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1067 &dev_attr_debug_level.attr,
1068#endif
1069 NULL
1070};
1071
1072static struct attribute_group iwl_attribute_group = {
1073 .name = NULL, /* put in device directory */
1074 .attrs = iwl_sysfs_entries,
1075};
1076
1077/******************************************************************************
1078 *
1079 * uCode download functions
1080 *
1081 ******************************************************************************/
1082
1083static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1084{
1085 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1086 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
1087 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1088 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
1089 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1090 iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1091}
1092
1093static void iwl4965_nic_start(struct iwl_priv *priv)
1094{
1095 /* Remove all resets to allow NIC to operate */
1096 iwl_write32(priv, CSR_RESET, 0);
1097}
1098
1099static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
1100 void *context);
1101static int iwl4965_mac_setup_register(struct iwl_priv *priv,
1102 u32 max_probe_length);
1103
1104static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
1105{
1106 const char *name_pre = priv->cfg->fw_name_pre;
1107 char tag[8];
1108
1109 if (first) {
1110 priv->fw_index = priv->cfg->ucode_api_max;
1111 sprintf(tag, "%d", priv->fw_index);
1112 } else {
1113 priv->fw_index--;
1114 sprintf(tag, "%d", priv->fw_index);
1115 }
1116
1117 if (priv->fw_index < priv->cfg->ucode_api_min) {
1118 IWL_ERR(priv, "no suitable firmware found!\n");
1119 return -ENOENT;
1120 }
1121
1122 sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1123
1124 IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
1125 priv->firmware_name);
1126
1127 return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1128 &priv->pci_dev->dev, GFP_KERNEL, priv,
1129 iwl4965_ucode_callback);
1130}
1131
1132struct iwl4965_firmware_pieces {
1133 const void *inst, *data, *init, *init_data, *boot;
1134 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1135};
1136
1137static int iwl4965_load_firmware(struct iwl_priv *priv,
1138 const struct firmware *ucode_raw,
1139 struct iwl4965_firmware_pieces *pieces)
1140{
1141 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1142 u32 api_ver, hdr_size;
1143 const u8 *src;
1144
1145 priv->ucode_ver = le32_to_cpu(ucode->ver);
1146 api_ver = IWL_UCODE_API(priv->ucode_ver);
1147
1148 switch (api_ver) {
1149 default:
1150 case 0:
1151 case 1:
1152 case 2:
1153 hdr_size = 24;
1154 if (ucode_raw->size < hdr_size) {
1155 IWL_ERR(priv, "File size too small!\n");
1156 return -EINVAL;
1157 }
1158 pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
1159 pieces->data_size = le32_to_cpu(ucode->v1.data_size);
1160 pieces->init_size = le32_to_cpu(ucode->v1.init_size);
1161 pieces->init_data_size =
1162 le32_to_cpu(ucode->v1.init_data_size);
1163 pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
1164 src = ucode->v1.data;
1165 break;
1166 }
1167
1168 /* Verify size of file vs. image size info in file's header */
1169 if (ucode_raw->size != hdr_size + pieces->inst_size +
1170 pieces->data_size + pieces->init_size +
1171 pieces->init_data_size + pieces->boot_size) {
1172
1173 IWL_ERR(priv,
1174 "uCode file size %d does not match expected size\n",
1175 (int)ucode_raw->size);
1176 return -EINVAL;
1177 }
1178
1179 pieces->inst = src;
1180 src += pieces->inst_size;
1181 pieces->data = src;
1182 src += pieces->data_size;
1183 pieces->init = src;
1184 src += pieces->init_size;
1185 pieces->init_data = src;
1186 src += pieces->init_data_size;
1187 pieces->boot = src;
1188 src += pieces->boot_size;
1189
1190 return 0;
1191}
1192
1193/**
1194 * iwl4965_ucode_callback - callback when firmware was loaded
1195 *
1196 * If loaded successfully, copies the firmware into buffers
1197 * for the card to fetch (via DMA).
1198 */
1199static void
1200iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
1201{
1202 struct iwl_priv *priv = context;
1203 struct iwl_ucode_header *ucode;
1204 int err;
1205 struct iwl4965_firmware_pieces pieces;
1206 const unsigned int api_max = priv->cfg->ucode_api_max;
1207 const unsigned int api_min = priv->cfg->ucode_api_min;
1208 u32 api_ver;
1209
1210 u32 max_probe_length = 200;
1211 u32 standard_phy_calibration_size =
1212 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1213
1214 memset(&pieces, 0, sizeof(pieces));
1215
1216 if (!ucode_raw) {
1217 if (priv->fw_index <= priv->cfg->ucode_api_max)
1218 IWL_ERR(priv,
1219 "request for firmware file '%s' failed.\n",
1220 priv->firmware_name);
1221 goto try_again;
1222 }
1223
1224 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1225 priv->firmware_name, ucode_raw->size);
1226
1227 /* Make sure that we got at least the API version number */
1228 if (ucode_raw->size < 4) {
1229 IWL_ERR(priv, "File size way too small!\n");
1230 goto try_again;
1231 }
1232
1233 /* Data from ucode file: header followed by uCode images */
1234 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1235
1236 err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
1237
1238 if (err)
1239 goto try_again;
1240
1241 api_ver = IWL_UCODE_API(priv->ucode_ver);
1242
1243 /*
1244 * api_ver should match the api version forming part of the
1245 * firmware filename ... but we don't check for that and only rely
1246 * on the API version read from firmware header from here on forward
1247 */
1248 if (api_ver < api_min || api_ver > api_max) {
1249 IWL_ERR(priv,
1250 "Driver unable to support your firmware API. "
1251 "Driver supports v%u, firmware is v%u.\n",
1252 api_max, api_ver);
1253 goto try_again;
1254 }
1255
1256 if (api_ver != api_max)
1257 IWL_ERR(priv,
1258 "Firmware has old API version. Expected v%u, "
1259 "got v%u. New firmware can be obtained "
1260 "from http://www.intellinuxwireless.org.\n",
1261 api_max, api_ver);
1262
1263 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
1264 IWL_UCODE_MAJOR(priv->ucode_ver),
1265 IWL_UCODE_MINOR(priv->ucode_ver),
1266 IWL_UCODE_API(priv->ucode_ver),
1267 IWL_UCODE_SERIAL(priv->ucode_ver));
1268
1269 snprintf(priv->hw->wiphy->fw_version,
1270 sizeof(priv->hw->wiphy->fw_version),
1271 "%u.%u.%u.%u",
1272 IWL_UCODE_MAJOR(priv->ucode_ver),
1273 IWL_UCODE_MINOR(priv->ucode_ver),
1274 IWL_UCODE_API(priv->ucode_ver),
1275 IWL_UCODE_SERIAL(priv->ucode_ver));
1276
1277 /*
1278 * For any of the failures below (before allocating pci memory)
1279 * we will try to load a version with a smaller API -- maybe the
1280 * user just got a corrupted version of the latest API.
1281 */
1282
1283 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1284 priv->ucode_ver);
1285 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1286 pieces.inst_size);
1287 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1288 pieces.data_size);
1289 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1290 pieces.init_size);
1291 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1292 pieces.init_data_size);
1293 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1294 pieces.boot_size);
1295
1296 /* Verify that uCode images will fit in card's SRAM */
1297 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1298 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1299 pieces.inst_size);
1300 goto try_again;
1301 }
1302
1303 if (pieces.data_size > priv->hw_params.max_data_size) {
1304 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1305 pieces.data_size);
1306 goto try_again;
1307 }
1308
1309 if (pieces.init_size > priv->hw_params.max_inst_size) {
1310 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1311 pieces.init_size);
1312 goto try_again;
1313 }
1314
1315 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1316 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1317 pieces.init_data_size);
1318 goto try_again;
1319 }
1320
1321 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1322 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1323 pieces.boot_size);
1324 goto try_again;
1325 }
1326
1327 /* Allocate ucode buffers for card's bus-master loading ... */
1328
1329 /* Runtime instructions and 2 copies of data:
1330 * 1) unmodified from disk
1331 * 2) backup cache for save/restore during power-downs */
1332 priv->ucode_code.len = pieces.inst_size;
1333 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1334
1335 priv->ucode_data.len = pieces.data_size;
1336 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1337
1338 priv->ucode_data_backup.len = pieces.data_size;
1339 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1340
1341 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
1342 !priv->ucode_data_backup.v_addr)
1343 goto err_pci_alloc;
1344
1345 /* Initialization instructions and data */
1346 if (pieces.init_size && pieces.init_data_size) {
1347 priv->ucode_init.len = pieces.init_size;
1348 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1349
1350 priv->ucode_init_data.len = pieces.init_data_size;
1351 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1352
1353 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
1354 goto err_pci_alloc;
1355 }
1356
1357 /* Bootstrap (instructions only, no data) */
1358 if (pieces.boot_size) {
1359 priv->ucode_boot.len = pieces.boot_size;
1360 iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1361
1362 if (!priv->ucode_boot.v_addr)
1363 goto err_pci_alloc;
1364 }
1365
1366 /* Now that we can no longer fail, copy information */
1367
1368 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1369
1370 /* Copy images into buffers for card's bus-master reads ... */
1371
1372 /* Runtime instructions (first block of data in file) */
1373 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1374 pieces.inst_size);
1375 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1376
1377 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1378 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1379
1380 /*
1381 * Runtime data
1382 * NOTE: Copy into backup buffer will be done in iwl_up()
1383 */
1384 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1385 pieces.data_size);
1386 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1387 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1388
1389 /* Initialization instructions */
1390 if (pieces.init_size) {
1391 IWL_DEBUG_INFO(priv,
1392 "Copying (but not loading) init instr len %Zd\n",
1393 pieces.init_size);
1394 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1395 }
1396
1397 /* Initialization data */
1398 if (pieces.init_data_size) {
1399 IWL_DEBUG_INFO(priv,
1400 "Copying (but not loading) init data len %Zd\n",
1401 pieces.init_data_size);
1402 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1403 pieces.init_data_size);
1404 }
1405
1406 /* Bootstrap instructions */
1407 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1408 pieces.boot_size);
1409 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1410
1411 /*
1412 * figure out the offset of chain noise reset and gain commands
1413 * base on the size of standard phy calibration commands table size
1414 */
1415 priv->_4965.phy_calib_chain_noise_reset_cmd =
1416 standard_phy_calibration_size;
1417 priv->_4965.phy_calib_chain_noise_gain_cmd =
1418 standard_phy_calibration_size + 1;
1419
1420 /**************************************************
1421 * This is still part of probe() in a sense...
1422 *
1423 * 9. Setup and register with mac80211 and debugfs
1424 **************************************************/
1425 err = iwl4965_mac_setup_register(priv, max_probe_length);
1426 if (err)
1427 goto out_unbind;
1428
1429 err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
1430 if (err)
1431 IWL_ERR(priv,
1432 "failed to create debugfs files. Ignoring error: %d\n", err);
1433
1434 err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1435 &iwl_attribute_group);
1436 if (err) {
1437 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1438 goto out_unbind;
1439 }
1440
1441 /* We have our copies now, allow OS release its copies */
1442 release_firmware(ucode_raw);
1443 complete(&priv->_4965.firmware_loading_complete);
1444 return;
1445
1446 try_again:
1447 /* try next, if any */
1448 if (iwl4965_request_firmware(priv, false))
1449 goto out_unbind;
1450 release_firmware(ucode_raw);
1451 return;
1452
1453 err_pci_alloc:
1454 IWL_ERR(priv, "failed to allocate pci memory\n");
1455 iwl4965_dealloc_ucode_pci(priv);
1456 out_unbind:
1457 complete(&priv->_4965.firmware_loading_complete);
1458 device_release_driver(&priv->pci_dev->dev);
1459 release_firmware(ucode_raw);
1460}
1461
1462static const char * const desc_lookup_text[] = {
1463 "OK",
1464 "FAIL",
1465 "BAD_PARAM",
1466 "BAD_CHECKSUM",
1467 "NMI_INTERRUPT_WDG",
1468 "SYSASSERT",
1469 "FATAL_ERROR",
1470 "BAD_COMMAND",
1471 "HW_ERROR_TUNE_LOCK",
1472 "HW_ERROR_TEMPERATURE",
1473 "ILLEGAL_CHAN_FREQ",
1474 "VCC_NOT_STABLE",
1475 "FH_ERROR",
1476 "NMI_INTERRUPT_HOST",
1477 "NMI_INTERRUPT_ACTION_PT",
1478 "NMI_INTERRUPT_UNKNOWN",
1479 "UCODE_VERSION_MISMATCH",
1480 "HW_ERROR_ABS_LOCK",
1481 "HW_ERROR_CAL_LOCK_FAIL",
1482 "NMI_INTERRUPT_INST_ACTION_PT",
1483 "NMI_INTERRUPT_DATA_ACTION_PT",
1484 "NMI_TRM_HW_ER",
1485 "NMI_INTERRUPT_TRM",
1486 "NMI_INTERRUPT_BREAK_POINT",
1487 "DEBUG_0",
1488 "DEBUG_1",
1489 "DEBUG_2",
1490 "DEBUG_3",
1491};
1492
1493static struct { char *name; u8 num; } advanced_lookup[] = {
1494 { "NMI_INTERRUPT_WDG", 0x34 },
1495 { "SYSASSERT", 0x35 },
1496 { "UCODE_VERSION_MISMATCH", 0x37 },
1497 { "BAD_COMMAND", 0x38 },
1498 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1499 { "FATAL_ERROR", 0x3D },
1500 { "NMI_TRM_HW_ERR", 0x46 },
1501 { "NMI_INTERRUPT_TRM", 0x4C },
1502 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1503 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1504 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1505 { "NMI_INTERRUPT_HOST", 0x66 },
1506 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1507 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1508 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1509 { "ADVANCED_SYSASSERT", 0 },
1510};
1511
1512static const char *iwl4965_desc_lookup(u32 num)
1513{
1514 int i;
1515 int max = ARRAY_SIZE(desc_lookup_text);
1516
1517 if (num < max)
1518 return desc_lookup_text[num];
1519
1520 max = ARRAY_SIZE(advanced_lookup) - 1;
1521 for (i = 0; i < max; i++) {
1522 if (advanced_lookup[i].num == num)
1523 break;
1524 }
1525 return advanced_lookup[i].name;
1526}
1527
1528#define ERROR_START_OFFSET (1 * sizeof(u32))
1529#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1530
1531void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
1532{
1533 u32 data2, line;
1534 u32 desc, time, count, base, data1;
1535 u32 blink1, blink2, ilink1, ilink2;
1536 u32 pc, hcmd;
1537
1538 if (priv->ucode_type == UCODE_INIT) {
1539 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
1540 } else {
1541 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
1542 }
1543
1544 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1545 IWL_ERR(priv,
1546 "Not valid error log pointer 0x%08X for %s uCode\n",
1547 base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
1548 return;
1549 }
1550
1551 count = iwl_legacy_read_targ_mem(priv, base);
1552
1553 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
1554 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1555 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1556 priv->status, count);
1557 }
1558
1559 desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
1560 priv->isr_stats.err_code = desc;
1561 pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
1562 blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
1563 blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
1564 ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
1565 ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
1566 data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
1567 data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
1568 line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
1569 time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
1570 hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
1571
1572 trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
1573 time, data1, data2, line,
1574 blink1, blink2, ilink1, ilink2);
1575
1576 IWL_ERR(priv, "Desc Time "
1577 "data1 data2 line\n");
1578 IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
1579 iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
1580 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1581 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1582 pc, blink1, blink2, ilink1, ilink2, hcmd);
1583}
1584
1585static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
1586{
1587 struct iwl_ct_kill_config cmd;
1588 unsigned long flags;
1589 int ret = 0;
1590
1591 spin_lock_irqsave(&priv->lock, flags);
1592 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1593 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1594 spin_unlock_irqrestore(&priv->lock, flags);
1595
1596 cmd.critical_temperature_R =
1597 cpu_to_le32(priv->hw_params.ct_kill_threshold);
1598
1599 ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1600 sizeof(cmd), &cmd);
1601 if (ret)
1602 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1603 else
1604 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1605 "succeeded, "
1606 "critical temperature is %d\n",
1607 priv->hw_params.ct_kill_threshold);
1608}
1609
1610static const s8 default_queue_to_tx_fifo[] = {
1611 IWL_TX_FIFO_VO,
1612 IWL_TX_FIFO_VI,
1613 IWL_TX_FIFO_BE,
1614 IWL_TX_FIFO_BK,
1615 IWL49_CMD_FIFO_NUM,
1616 IWL_TX_FIFO_UNUSED,
1617 IWL_TX_FIFO_UNUSED,
1618};
1619
1620static int iwl4965_alive_notify(struct iwl_priv *priv)
1621{
1622 u32 a;
1623 unsigned long flags;
1624 int i, chan;
1625 u32 reg_val;
1626
1627 spin_lock_irqsave(&priv->lock, flags);
1628
1629 /* Clear 4965's internal Tx Scheduler data base */
1630 priv->scd_base_addr = iwl_legacy_read_prph(priv,
1631 IWL49_SCD_SRAM_BASE_ADDR);
1632 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1633 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1634 iwl_legacy_write_targ_mem(priv, a, 0);
1635 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
1636 iwl_legacy_write_targ_mem(priv, a, 0);
1637 for (; a < priv->scd_base_addr +
1638 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
1639 iwl_legacy_write_targ_mem(priv, a, 0);
1640
1641 /* Tel 4965 where to find Tx byte count tables */
1642 iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1643 priv->scd_bc_tbls.dma >> 10);
1644
1645 /* Enable DMA channel */
1646 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
1647 iwl_legacy_write_direct32(priv,
1648 FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1649 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1650 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1651
1652 /* Update FH chicken bits */
1653 reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
1654 iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
1655 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1656
1657 /* Disable chain mode for all queues */
1658 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1659
1660 /* Initialize each Tx queue (including the command queue) */
1661 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1662
1663 /* TFD circular buffer read/write indexes */
1664 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1665 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1666
1667 /* Max Tx Window size for Scheduler-ACK mode */
1668 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1669 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1670 (SCD_WIN_SIZE <<
1671 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1672 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1673
1674 /* Frame limit */
1675 iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
1676 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1677 sizeof(u32),
1678 (SCD_FRAME_LIMIT <<
1679 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1680 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1681
1682 }
1683 iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1684 (1 << priv->hw_params.max_txq_num) - 1);
1685
1686 /* Activate all Tx DMA/FIFO channels */
1687 iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
1688
1689 iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
1690
1691 /* make sure all queue are not stopped */
1692 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
1693 for (i = 0; i < 4; i++)
1694 atomic_set(&priv->queue_stop_count[i], 0);
1695
1696 /* reset to 0 to enable all the queue first */
1697 priv->txq_ctx_active_msk = 0;
1698 /* Map each Tx/cmd queue to its corresponding fifo */
1699 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
1700
1701 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1702 int ac = default_queue_to_tx_fifo[i];
1703
1704 iwl_txq_ctx_activate(priv, i);
1705
1706 if (ac == IWL_TX_FIFO_UNUSED)
1707 continue;
1708
1709 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1710 }
1711
1712 spin_unlock_irqrestore(&priv->lock, flags);
1713
1714 return 0;
1715}
1716
1717/**
1718 * iwl4965_alive_start - called after REPLY_ALIVE notification received
1719 * from protocol/runtime uCode (initialization uCode's
1720 * Alive gets handled by iwl_init_alive_start()).
1721 */
1722static void iwl4965_alive_start(struct iwl_priv *priv)
1723{
1724 int ret = 0;
1725 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1726
1727 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1728
1729 if (priv->card_alive.is_valid != UCODE_VALID_OK) {
1730 /* We had an error bringing up the hardware, so take it
1731 * all the way back down so we can try again */
1732 IWL_DEBUG_INFO(priv, "Alive failed.\n");
1733 goto restart;
1734 }
1735
1736 /* Initialize uCode has loaded Runtime uCode ... verify inst image.
1737 * This is a paranoid check, because we would not have gotten the
1738 * "runtime" alive if code weren't properly loaded. */
1739 if (iwl4965_verify_ucode(priv)) {
1740 /* Runtime instruction load was bad;
1741 * take it all the way back down so we can try again */
1742 IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
1743 goto restart;
1744 }
1745
1746 ret = iwl4965_alive_notify(priv);
1747 if (ret) {
1748 IWL_WARN(priv,
1749 "Could not complete ALIVE transition [ntf]: %d\n", ret);
1750 goto restart;
1751 }
1752
1753
1754 /* After the ALIVE response, we can send host commands to the uCode */
1755 set_bit(STATUS_ALIVE, &priv->status);
1756
1757 /* Enable watchdog to monitor the driver tx queues */
1758 iwl_legacy_setup_watchdog(priv);
1759
1760 if (iwl_legacy_is_rfkill(priv))
1761 return;
1762
1763 ieee80211_wake_queues(priv->hw);
1764
1765 priv->active_rate = IWL_RATES_MASK;
1766
1767 if (iwl_legacy_is_associated_ctx(ctx)) {
1768 struct iwl_legacy_rxon_cmd *active_rxon =
1769 (struct iwl_legacy_rxon_cmd *)&ctx->active;
1770 /* apply any changes in staging */
1771 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
1772 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1773 } else {
1774 struct iwl_rxon_context *tmp;
1775 /* Initialize our rx_config data */
1776 for_each_context(priv, tmp)
1777 iwl_legacy_connection_init_rx_config(priv, tmp);
1778
1779 if (priv->cfg->ops->hcmd->set_rxon_chain)
1780 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1781 }
1782
1783 /* Configure bluetooth coexistence if enabled */
1784 iwl_legacy_send_bt_config(priv);
1785
1786 iwl4965_reset_run_time_calib(priv);
1787
1788 set_bit(STATUS_READY, &priv->status);
1789
1790 /* Configure the adapter for unassociated operation */
1791 iwl_legacy_commit_rxon(priv, ctx);
1792
1793 /* At this point, the NIC is initialized and operational */
1794 iwl4965_rf_kill_ct_config(priv);
1795
1796 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1797 wake_up(&priv->wait_command_queue);
1798
1799 iwl_legacy_power_update_mode(priv, true);
1800 IWL_DEBUG_INFO(priv, "Updated power mode\n");
1801
1802 return;
1803
1804 restart:
1805 queue_work(priv->workqueue, &priv->restart);
1806}
1807
1808static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
1809
1810static void __iwl4965_down(struct iwl_priv *priv)
1811{
1812 unsigned long flags;
1813 int exit_pending;
1814
1815 IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
1816
1817 iwl_legacy_scan_cancel_timeout(priv, 200);
1818
1819 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
1820
1821 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1822 * to prevent rearm timer */
1823 del_timer_sync(&priv->watchdog);
1824
1825 iwl_legacy_clear_ucode_stations(priv, NULL);
1826 iwl_legacy_dealloc_bcast_stations(priv);
1827 iwl_legacy_clear_driver_stations(priv);
1828
1829 /* Unblock any waiting calls */
1830 wake_up_all(&priv->wait_command_queue);
1831
1832 /* Wipe out the EXIT_PENDING status bit if we are not actually
1833 * exiting the module */
1834 if (!exit_pending)
1835 clear_bit(STATUS_EXIT_PENDING, &priv->status);
1836
1837 /* stop and reset the on-board processor */
1838 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1839
1840 /* tell the device to stop sending interrupts */
1841 spin_lock_irqsave(&priv->lock, flags);
1842 iwl_legacy_disable_interrupts(priv);
1843 spin_unlock_irqrestore(&priv->lock, flags);
1844 iwl4965_synchronize_irq(priv);
1845
1846 if (priv->mac80211_registered)
1847 ieee80211_stop_queues(priv->hw);
1848
1849 /* If we have not previously called iwl_init() then
1850 * clear all bits but the RF Kill bit and return */
1851 if (!iwl_legacy_is_init(priv)) {
1852 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1853 STATUS_RF_KILL_HW |
1854 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1855 STATUS_GEO_CONFIGURED |
1856 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1857 STATUS_EXIT_PENDING;
1858 goto exit;
1859 }
1860
1861 /* ...otherwise clear out all the status bits but the RF Kill
1862 * bit and continue taking the NIC down. */
1863 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
1864 STATUS_RF_KILL_HW |
1865 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
1866 STATUS_GEO_CONFIGURED |
1867 test_bit(STATUS_FW_ERROR, &priv->status) <<
1868 STATUS_FW_ERROR |
1869 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
1870 STATUS_EXIT_PENDING;
1871
1872 iwl4965_txq_ctx_stop(priv);
1873 iwl4965_rxq_stop(priv);
1874
1875 /* Power-down device's busmaster DMA clocks */
1876 iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1877 udelay(5);
1878
1879 /* Make sure (redundant) we've released our request to stay awake */
1880 iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
1881 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1882
1883 /* Stop the device, and put it in low power state */
1884 iwl_legacy_apm_stop(priv);
1885
1886 exit:
1887 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1888
1889 dev_kfree_skb(priv->beacon_skb);
1890 priv->beacon_skb = NULL;
1891
1892 /* clear out any free frames */
1893 iwl4965_clear_free_frames(priv);
1894}
1895
1896static void iwl4965_down(struct iwl_priv *priv)
1897{
1898 mutex_lock(&priv->mutex);
1899 __iwl4965_down(priv);
1900 mutex_unlock(&priv->mutex);
1901
1902 iwl4965_cancel_deferred_work(priv);
1903}
1904
1905#define HW_READY_TIMEOUT (50)
1906
1907static int iwl4965_set_hw_ready(struct iwl_priv *priv)
1908{
1909 int ret = 0;
1910
1911 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1912 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1913
1914 /* See if we got it */
1915 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1916 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1917 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1918 HW_READY_TIMEOUT);
1919 if (ret != -ETIMEDOUT)
1920 priv->hw_ready = true;
1921 else
1922 priv->hw_ready = false;
1923
1924 IWL_DEBUG_INFO(priv, "hardware %s\n",
1925 (priv->hw_ready == 1) ? "ready" : "not ready");
1926 return ret;
1927}
1928
1929static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
1930{
1931 int ret = 0;
1932
1933 IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
1934
1935 ret = iwl4965_set_hw_ready(priv);
1936 if (priv->hw_ready)
1937 return ret;
1938
1939 /* If HW is not ready, prepare the conditions to check again */
1940 iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1941 CSR_HW_IF_CONFIG_REG_PREPARE);
1942
1943 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
1944 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
1945 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
1946
1947 /* HW should be ready by now, check again. */
1948 if (ret != -ETIMEDOUT)
1949 iwl4965_set_hw_ready(priv);
1950
1951 return ret;
1952}
1953
1954#define MAX_HW_RESTARTS 5
1955
1956static int __iwl4965_up(struct iwl_priv *priv)
1957{
1958 struct iwl_rxon_context *ctx;
1959 int i;
1960 int ret;
1961
1962 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1963 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1964 return -EIO;
1965 }
1966
1967 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
1968 IWL_ERR(priv, "ucode not available for device bringup\n");
1969 return -EIO;
1970 }
1971
1972 for_each_context(priv, ctx) {
1973 ret = iwl4965_alloc_bcast_station(priv, ctx);
1974 if (ret) {
1975 iwl_legacy_dealloc_bcast_stations(priv);
1976 return ret;
1977 }
1978 }
1979
1980 iwl4965_prepare_card_hw(priv);
1981
1982 if (!priv->hw_ready) {
1983 IWL_WARN(priv, "Exit HW not ready\n");
1984 return -EIO;
1985 }
1986
1987 /* If platform's RF_KILL switch is NOT set to KILL */
1988 if (iwl_read32(priv,
1989 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
1990 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1991 else
1992 set_bit(STATUS_RF_KILL_HW, &priv->status);
1993
1994 if (iwl_legacy_is_rfkill(priv)) {
1995 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
1996
1997 iwl_legacy_enable_interrupts(priv);
1998 IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
1999 return 0;
2000 }
2001
2002 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2003
2004 /* must be initialised before iwl_hw_nic_init */
2005 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
2006
2007 ret = iwl4965_hw_nic_init(priv);
2008 if (ret) {
2009 IWL_ERR(priv, "Unable to init nic\n");
2010 return ret;
2011 }
2012
2013 /* make sure rfkill handshake bits are cleared */
2014 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2015 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
2016 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2017
2018 /* clear (again), then enable host interrupts */
2019 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2020 iwl_legacy_enable_interrupts(priv);
2021
2022 /* really make sure rfkill handshake bits are cleared */
2023 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2024 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2025
2026 /* Copy original ucode data image from disk into backup cache.
2027 * This will be used to initialize the on-board processor's
2028 * data SRAM for a clean start when the runtime program first loads. */
2029 memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
2030 priv->ucode_data.len);
2031
2032 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2033
2034 /* load bootstrap state machine,
2035 * load bootstrap program into processor's memory,
2036 * prepare to load the "initialize" uCode */
2037 ret = priv->cfg->ops->lib->load_ucode(priv);
2038
2039 if (ret) {
2040 IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
2041 ret);
2042 continue;
2043 }
2044
2045 /* start card; "initialize" will load runtime ucode */
2046 iwl4965_nic_start(priv);
2047
2048 IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
2049
2050 return 0;
2051 }
2052
2053 set_bit(STATUS_EXIT_PENDING, &priv->status);
2054 __iwl4965_down(priv);
2055 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2056
2057 /* tried to restart and config the device for as long as our
2058 * patience could withstand */
2059 IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
2060 return -EIO;
2061}
2062
2063
2064/*****************************************************************************
2065 *
2066 * Workqueue callbacks
2067 *
2068 *****************************************************************************/
2069
2070static void iwl4965_bg_init_alive_start(struct work_struct *data)
2071{
2072 struct iwl_priv *priv =
2073 container_of(data, struct iwl_priv, init_alive_start.work);
2074
2075 mutex_lock(&priv->mutex);
2076 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2077 goto out;
2078
2079 priv->cfg->ops->lib->init_alive_start(priv);
2080out:
2081 mutex_unlock(&priv->mutex);
2082}
2083
2084static void iwl4965_bg_alive_start(struct work_struct *data)
2085{
2086 struct iwl_priv *priv =
2087 container_of(data, struct iwl_priv, alive_start.work);
2088
2089 mutex_lock(&priv->mutex);
2090 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2091 goto out;
2092
2093 iwl4965_alive_start(priv);
2094out:
2095 mutex_unlock(&priv->mutex);
2096}
2097
2098static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
2099{
2100 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2101 run_time_calib_work);
2102
2103 mutex_lock(&priv->mutex);
2104
2105 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2106 test_bit(STATUS_SCANNING, &priv->status)) {
2107 mutex_unlock(&priv->mutex);
2108 return;
2109 }
2110
2111 if (priv->start_calib) {
2112 iwl4965_chain_noise_calibration(priv,
2113 (void *)&priv->_4965.statistics);
2114 iwl4965_sensitivity_calibration(priv,
2115 (void *)&priv->_4965.statistics);
2116 }
2117
2118 mutex_unlock(&priv->mutex);
2119}
2120
2121static void iwl4965_bg_restart(struct work_struct *data)
2122{
2123 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2124
2125 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2126 return;
2127
2128 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2129 struct iwl_rxon_context *ctx;
2130
2131 mutex_lock(&priv->mutex);
2132 for_each_context(priv, ctx)
2133 ctx->vif = NULL;
2134 priv->is_open = 0;
2135
2136 __iwl4965_down(priv);
2137
2138 mutex_unlock(&priv->mutex);
2139 iwl4965_cancel_deferred_work(priv);
2140 ieee80211_restart_hw(priv->hw);
2141 } else {
2142 iwl4965_down(priv);
2143
2144 mutex_lock(&priv->mutex);
2145 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2146 mutex_unlock(&priv->mutex);
2147 return;
2148 }
2149
2150 __iwl4965_up(priv);
2151 mutex_unlock(&priv->mutex);
2152 }
2153}
2154
2155static void iwl4965_bg_rx_replenish(struct work_struct *data)
2156{
2157 struct iwl_priv *priv =
2158 container_of(data, struct iwl_priv, rx_replenish);
2159
2160 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2161 return;
2162
2163 mutex_lock(&priv->mutex);
2164 iwl4965_rx_replenish(priv);
2165 mutex_unlock(&priv->mutex);
2166}
2167
2168/*****************************************************************************
2169 *
2170 * mac80211 entry point functions
2171 *
2172 *****************************************************************************/
2173
2174#define UCODE_READY_TIMEOUT (4 * HZ)
2175
2176/*
2177 * Not a mac80211 entry point function, but it fits in with all the
2178 * other mac80211 functions grouped here.
2179 */
2180static int iwl4965_mac_setup_register(struct iwl_priv *priv,
2181 u32 max_probe_length)
2182{
2183 int ret;
2184 struct ieee80211_hw *hw = priv->hw;
2185 struct iwl_rxon_context *ctx;
2186
2187 hw->rate_control_algorithm = "iwl-4965-rs";
2188
2189 /* Tell mac80211 our characteristics */
2190 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2191 IEEE80211_HW_AMPDU_AGGREGATION |
2192 IEEE80211_HW_NEED_DTIM_PERIOD |
2193 IEEE80211_HW_SPECTRUM_MGMT |
2194 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2195
2196 if (priv->cfg->sku & IWL_SKU_N)
2197 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2198 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2199
2200 hw->sta_data_size = sizeof(struct iwl_station_priv);
2201 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2202
2203 for_each_context(priv, ctx) {
2204 hw->wiphy->interface_modes |= ctx->interface_modes;
2205 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2206 }
2207
2208 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2209 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2210
2211 /*
2212 * For now, disable PS by default because it affects
2213 * RX performance significantly.
2214 */
2215 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2216
2217 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2218 /* we create the 802.11 header and a zero-length SSID element */
2219 hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
2220
2221 /* Default value; 4 EDCA QOS priorities */
2222 hw->queues = 4;
2223
2224 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2225
2226 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2227 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2228 &priv->bands[IEEE80211_BAND_2GHZ];
2229 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2230 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2231 &priv->bands[IEEE80211_BAND_5GHZ];
2232
2233 iwl_legacy_leds_init(priv);
2234
2235 ret = ieee80211_register_hw(priv->hw);
2236 if (ret) {
2237 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2238 return ret;
2239 }
2240 priv->mac80211_registered = 1;
2241
2242 return 0;
2243}
2244
2245
2246int iwl4965_mac_start(struct ieee80211_hw *hw)
2247{
2248 struct iwl_priv *priv = hw->priv;
2249 int ret;
2250
2251 IWL_DEBUG_MAC80211(priv, "enter\n");
2252
2253 /* we should be verifying the device is ready to be opened */
2254 mutex_lock(&priv->mutex);
2255 ret = __iwl4965_up(priv);
2256 mutex_unlock(&priv->mutex);
2257
2258 if (ret)
2259 return ret;
2260
2261 if (iwl_legacy_is_rfkill(priv))
2262 goto out;
2263
2264 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2265
2266 /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
2267 * mac80211 will not be run successfully. */
2268 ret = wait_event_timeout(priv->wait_command_queue,
2269 test_bit(STATUS_READY, &priv->status),
2270 UCODE_READY_TIMEOUT);
2271 if (!ret) {
2272 if (!test_bit(STATUS_READY, &priv->status)) {
2273 IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
2274 jiffies_to_msecs(UCODE_READY_TIMEOUT));
2275 return -ETIMEDOUT;
2276 }
2277 }
2278
2279 iwl4965_led_enable(priv);
2280
2281out:
2282 priv->is_open = 1;
2283 IWL_DEBUG_MAC80211(priv, "leave\n");
2284 return 0;
2285}
2286
2287void iwl4965_mac_stop(struct ieee80211_hw *hw)
2288{
2289 struct iwl_priv *priv = hw->priv;
2290
2291 IWL_DEBUG_MAC80211(priv, "enter\n");
2292
2293 if (!priv->is_open)
2294 return;
2295
2296 priv->is_open = 0;
2297
2298 iwl4965_down(priv);
2299
2300 flush_workqueue(priv->workqueue);
2301
2302 /* User space software may expect getting rfkill changes
2303 * even if interface is down */
2304 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2305 iwl_legacy_enable_rfkill_int(priv);
2306
2307 IWL_DEBUG_MAC80211(priv, "leave\n");
2308}
2309
2310void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2311{
2312 struct iwl_priv *priv = hw->priv;
2313
2314 IWL_DEBUG_MACDUMP(priv, "enter\n");
2315
2316 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2317 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2318
2319 if (iwl4965_tx_skb(priv, skb))
2320 dev_kfree_skb_any(skb);
2321
2322 IWL_DEBUG_MACDUMP(priv, "leave\n");
2323}
2324
2325void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
2326 struct ieee80211_vif *vif,
2327 struct ieee80211_key_conf *keyconf,
2328 struct ieee80211_sta *sta,
2329 u32 iv32, u16 *phase1key)
2330{
2331 struct iwl_priv *priv = hw->priv;
2332 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2333
2334 IWL_DEBUG_MAC80211(priv, "enter\n");
2335
2336 iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2337 iv32, phase1key);
2338
2339 IWL_DEBUG_MAC80211(priv, "leave\n");
2340}
2341
2342int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2343 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
2344 struct ieee80211_key_conf *key)
2345{
2346 struct iwl_priv *priv = hw->priv;
2347 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2348 struct iwl_rxon_context *ctx = vif_priv->ctx;
2349 int ret;
2350 u8 sta_id;
2351 bool is_default_wep_key = false;
2352
2353 IWL_DEBUG_MAC80211(priv, "enter\n");
2354
2355 if (priv->cfg->mod_params->sw_crypto) {
2356 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2357 return -EOPNOTSUPP;
2358 }
2359
2360 sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2361 if (sta_id == IWL_INVALID_STATION)
2362 return -EINVAL;
2363
2364 mutex_lock(&priv->mutex);
2365 iwl_legacy_scan_cancel_timeout(priv, 100);
2366
2367 /*
2368 * If we are getting WEP group key and we didn't receive any key mapping
2369 * so far, we are in legacy wep mode (group key only), otherwise we are
2370 * in 1X mode.
2371 * In legacy wep mode, we use another host command to the uCode.
2372 */
2373 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2374 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2375 !sta) {
2376 if (cmd == SET_KEY)
2377 is_default_wep_key = !ctx->key_mapping_keys;
2378 else
2379 is_default_wep_key =
2380 (key->hw_key_idx == HW_KEY_DEFAULT);
2381 }
2382
2383 switch (cmd) {
2384 case SET_KEY:
2385 if (is_default_wep_key)
2386 ret = iwl4965_set_default_wep_key(priv,
2387 vif_priv->ctx, key);
2388 else
2389 ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
2390 key, sta_id);
2391
2392 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2393 break;
2394 case DISABLE_KEY:
2395 if (is_default_wep_key)
2396 ret = iwl4965_remove_default_wep_key(priv, ctx, key);
2397 else
2398 ret = iwl4965_remove_dynamic_key(priv, ctx,
2399 key, sta_id);
2400
2401 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2402 break;
2403 default:
2404 ret = -EINVAL;
2405 }
2406
2407 mutex_unlock(&priv->mutex);
2408 IWL_DEBUG_MAC80211(priv, "leave\n");
2409
2410 return ret;
2411}
2412
2413int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
2414 struct ieee80211_vif *vif,
2415 enum ieee80211_ampdu_mlme_action action,
2416 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2417 u8 buf_size)
2418{
2419 struct iwl_priv *priv = hw->priv;
2420 int ret = -EINVAL;
2421
2422 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2423 sta->addr, tid);
2424
2425 if (!(priv->cfg->sku & IWL_SKU_N))
2426 return -EACCES;
2427
2428 mutex_lock(&priv->mutex);
2429
2430 switch (action) {
2431 case IEEE80211_AMPDU_RX_START:
2432 IWL_DEBUG_HT(priv, "start Rx\n");
2433 ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
2434 break;
2435 case IEEE80211_AMPDU_RX_STOP:
2436 IWL_DEBUG_HT(priv, "stop Rx\n");
2437 ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
2438 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2439 ret = 0;
2440 break;
2441 case IEEE80211_AMPDU_TX_START:
2442 IWL_DEBUG_HT(priv, "start Tx\n");
2443 ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
2444 break;
2445 case IEEE80211_AMPDU_TX_STOP:
2446 IWL_DEBUG_HT(priv, "stop Tx\n");
2447 ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
2448 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2449 ret = 0;
2450 break;
2451 case IEEE80211_AMPDU_TX_OPERATIONAL:
2452 ret = 0;
2453 break;
2454 }
2455 mutex_unlock(&priv->mutex);
2456
2457 return ret;
2458}
2459
2460int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
2461 struct ieee80211_vif *vif,
2462 struct ieee80211_sta *sta)
2463{
2464 struct iwl_priv *priv = hw->priv;
2465 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2466 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2467 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2468 int ret;
2469 u8 sta_id;
2470
2471 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2472 sta->addr);
2473 mutex_lock(&priv->mutex);
2474 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2475 sta->addr);
2476 sta_priv->common.sta_id = IWL_INVALID_STATION;
2477
2478 atomic_set(&sta_priv->pending_frames, 0);
2479
2480 ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
2481 is_ap, sta, &sta_id);
2482 if (ret) {
2483 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2484 sta->addr, ret);
2485 /* Should we return success if return code is EEXIST ? */
2486 mutex_unlock(&priv->mutex);
2487 return ret;
2488 }
2489
2490 sta_priv->common.sta_id = sta_id;
2491
2492 /* Initialize rate scaling */
2493 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2494 sta->addr);
2495 iwl4965_rs_rate_init(priv, sta, sta_id);
2496 mutex_unlock(&priv->mutex);
2497
2498 return 0;
2499}
2500
2501void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2502 struct ieee80211_channel_switch *ch_switch)
2503{
2504 struct iwl_priv *priv = hw->priv;
2505 const struct iwl_channel_info *ch_info;
2506 struct ieee80211_conf *conf = &hw->conf;
2507 struct ieee80211_channel *channel = ch_switch->channel;
2508 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2509
2510 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2511 u16 ch;
2512
2513 IWL_DEBUG_MAC80211(priv, "enter\n");
2514
2515 mutex_lock(&priv->mutex);
2516
2517 if (iwl_legacy_is_rfkill(priv))
2518 goto out;
2519
2520 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2521 test_bit(STATUS_SCANNING, &priv->status) ||
2522 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2523 goto out;
2524
2525 if (!iwl_legacy_is_associated_ctx(ctx))
2526 goto out;
2527
2528 if (!priv->cfg->ops->lib->set_channel_switch)
2529 goto out;
2530
2531 ch = channel->hw_value;
2532 if (le16_to_cpu(ctx->active.channel) == ch)
2533 goto out;
2534
2535 ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
2536 if (!iwl_legacy_is_channel_valid(ch_info)) {
2537 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2538 goto out;
2539 }
2540
2541 spin_lock_irq(&priv->lock);
2542
2543 priv->current_ht_config.smps = conf->smps_mode;
2544
2545 /* Configure HT40 channels */
2546 ctx->ht.enabled = conf_is_ht(conf);
2547 if (ctx->ht.enabled) {
2548 if (conf_is_ht40_minus(conf)) {
2549 ctx->ht.extension_chan_offset =
2550 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2551 ctx->ht.is_40mhz = true;
2552 } else if (conf_is_ht40_plus(conf)) {
2553 ctx->ht.extension_chan_offset =
2554 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2555 ctx->ht.is_40mhz = true;
2556 } else {
2557 ctx->ht.extension_chan_offset =
2558 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2559 ctx->ht.is_40mhz = false;
2560 }
2561 } else
2562 ctx->ht.is_40mhz = false;
2563
2564 if ((le16_to_cpu(ctx->staging.channel) != ch))
2565 ctx->staging.flags = 0;
2566
2567 iwl_legacy_set_rxon_channel(priv, channel, ctx);
2568 iwl_legacy_set_rxon_ht(priv, ht_conf);
2569 iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
2570
2571 spin_unlock_irq(&priv->lock);
2572
2573 iwl_legacy_set_rate(priv);
2574 /*
2575 * at this point, staging_rxon has the
2576 * configuration for channel switch
2577 */
2578 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2579 priv->switch_channel = cpu_to_le16(ch);
2580 if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2581 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2582 priv->switch_channel = 0;
2583 ieee80211_chswitch_done(ctx->vif, false);
2584 }
2585
2586out:
2587 mutex_unlock(&priv->mutex);
2588 IWL_DEBUG_MAC80211(priv, "leave\n");
2589}
2590
2591void iwl4965_configure_filter(struct ieee80211_hw *hw,
2592 unsigned int changed_flags,
2593 unsigned int *total_flags,
2594 u64 multicast)
2595{
2596 struct iwl_priv *priv = hw->priv;
2597 __le32 filter_or = 0, filter_nand = 0;
2598 struct iwl_rxon_context *ctx;
2599
2600#define CHK(test, flag) do { \
2601 if (*total_flags & (test)) \
2602 filter_or |= (flag); \
2603 else \
2604 filter_nand |= (flag); \
2605 } while (0)
2606
2607 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
2608 changed_flags, *total_flags);
2609
2610 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
2611 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
2612 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
2613 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
2614
2615#undef CHK
2616
2617 mutex_lock(&priv->mutex);
2618
2619 for_each_context(priv, ctx) {
2620 ctx->staging.filter_flags &= ~filter_nand;
2621 ctx->staging.filter_flags |= filter_or;
2622
2623 /*
2624 * Not committing directly because hardware can perform a scan,
2625 * but we'll eventually commit the filter flags change anyway.
2626 */
2627 }
2628
2629 mutex_unlock(&priv->mutex);
2630
2631 /*
2632 * Receiving all multicast frames is always enabled by the
2633 * default flags setup in iwl_legacy_connection_init_rx_config()
2634 * since we currently do not support programming multicast
2635 * filters into the device.
2636 */
2637 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
2638 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
2639}
2640
2641/*****************************************************************************
2642 *
2643 * driver setup and teardown
2644 *
2645 *****************************************************************************/
2646
2647static void iwl4965_bg_txpower_work(struct work_struct *work)
2648{
2649 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2650 txpower_work);
2651
2652 mutex_lock(&priv->mutex);
2653
2654 /* If a scan happened to start before we got here
2655 * then just return; the statistics notification will
2656 * kick off another scheduled work to compensate for
2657 * any temperature delta we missed here. */
2658 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2659 test_bit(STATUS_SCANNING, &priv->status))
2660 goto out;
2661
2662 /* Regardless of if we are associated, we must reconfigure the
2663 * TX power since frames can be sent on non-radar channels while
2664 * not associated */
2665 priv->cfg->ops->lib->send_tx_power(priv);
2666
2667 /* Update last_temperature to keep is_calib_needed from running
2668 * when it isn't needed... */
2669 priv->last_temperature = priv->temperature;
2670out:
2671 mutex_unlock(&priv->mutex);
2672}
2673
2674static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
2675{
2676 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
2677
2678 init_waitqueue_head(&priv->wait_command_queue);
2679
2680 INIT_WORK(&priv->restart, iwl4965_bg_restart);
2681 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
2682 INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
2683 INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
2684 INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
2685
2686 iwl_legacy_setup_scan_deferred_work(priv);
2687
2688 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
2689
2690 init_timer(&priv->statistics_periodic);
2691 priv->statistics_periodic.data = (unsigned long)priv;
2692 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
2693
2694 init_timer(&priv->watchdog);
2695 priv->watchdog.data = (unsigned long)priv;
2696 priv->watchdog.function = iwl_legacy_bg_watchdog;
2697
2698 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
2699 iwl4965_irq_tasklet, (unsigned long)priv);
2700}
2701
2702static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2703{
2704 cancel_work_sync(&priv->txpower_work);
2705 cancel_delayed_work_sync(&priv->init_alive_start);
2706 cancel_delayed_work(&priv->alive_start);
2707 cancel_work_sync(&priv->run_time_calib_work);
2708
2709 iwl_legacy_cancel_scan_deferred_work(priv);
2710
2711 del_timer_sync(&priv->statistics_periodic);
2712}
2713
2714static void iwl4965_init_hw_rates(struct iwl_priv *priv,
2715 struct ieee80211_rate *rates)
2716{
2717 int i;
2718
2719 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
2720 rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
2721 rates[i].hw_value = i; /* Rate scaling will work on indexes */
2722 rates[i].hw_value_short = i;
2723 rates[i].flags = 0;
2724 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
2725 /*
2726 * If CCK != 1M then set short preamble rate flag.
2727 */
2728 rates[i].flags |=
2729 (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
2730 0 : IEEE80211_RATE_SHORT_PREAMBLE;
2731 }
2732 }
2733}
2734/*
2735 * Acquire priv->lock before calling this function !
2736 */
2737void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
2738{
2739 iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
2740 (index & 0xff) | (txq_id << 8));
2741 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
2742}
2743
2744void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
2745 struct iwl_tx_queue *txq,
2746 int tx_fifo_id, int scd_retry)
2747{
2748 int txq_id = txq->q.id;
2749
2750 /* Find out whether to activate Tx queue */
2751 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
2752
2753 /* Set up and activate */
2754 iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
2755 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2756 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
2757 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
2758 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
2759 IWL49_SCD_QUEUE_STTS_REG_MSK);
2760
2761 txq->sched_retry = scd_retry;
2762
2763 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
2764 active ? "Activate" : "Deactivate",
2765 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
2766}
2767
2768
2769static int iwl4965_init_drv(struct iwl_priv *priv)
2770{
2771 int ret;
2772
2773 spin_lock_init(&priv->sta_lock);
2774 spin_lock_init(&priv->hcmd_lock);
2775
2776 INIT_LIST_HEAD(&priv->free_frames);
2777
2778 mutex_init(&priv->mutex);
2779
2780 priv->ieee_channels = NULL;
2781 priv->ieee_rates = NULL;
2782 priv->band = IEEE80211_BAND_2GHZ;
2783
2784 priv->iw_mode = NL80211_IFTYPE_STATION;
2785 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
2786 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
2787
2788 /* initialize force reset */
2789 priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
2790
2791 /* Choose which receivers/antennas to use */
2792 if (priv->cfg->ops->hcmd->set_rxon_chain)
2793 priv->cfg->ops->hcmd->set_rxon_chain(priv,
2794 &priv->contexts[IWL_RXON_CTX_BSS]);
2795
2796 iwl_legacy_init_scan_params(priv);
2797
2798 ret = iwl_legacy_init_channel_map(priv);
2799 if (ret) {
2800 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
2801 goto err;
2802 }
2803
2804 ret = iwl_legacy_init_geos(priv);
2805 if (ret) {
2806 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
2807 goto err_free_channel_map;
2808 }
2809 iwl4965_init_hw_rates(priv, priv->ieee_rates);
2810
2811 return 0;
2812
2813err_free_channel_map:
2814 iwl_legacy_free_channel_map(priv);
2815err:
2816 return ret;
2817}
2818
2819static void iwl4965_uninit_drv(struct iwl_priv *priv)
2820{
2821 iwl4965_calib_free_results(priv);
2822 iwl_legacy_free_geos(priv);
2823 iwl_legacy_free_channel_map(priv);
2824 kfree(priv->scan_cmd);
2825}
2826
2827static void iwl4965_hw_detect(struct iwl_priv *priv)
2828{
2829 priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
2830 priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
2831 priv->rev_id = priv->pci_dev->revision;
2832 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
2833}
2834
2835static int iwl4965_set_hw_params(struct iwl_priv *priv)
2836{
2837 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2838 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2839 if (priv->cfg->mod_params->amsdu_size_8K)
2840 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
2841 else
2842 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
2843
2844 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
2845
2846 if (priv->cfg->mod_params->disable_11n)
2847 priv->cfg->sku &= ~IWL_SKU_N;
2848
2849 /* Device-specific setup */
2850 return priv->cfg->ops->lib->set_hw_params(priv);
2851}
2852
2853static const u8 iwl4965_bss_ac_to_fifo[] = {
2854 IWL_TX_FIFO_VO,
2855 IWL_TX_FIFO_VI,
2856 IWL_TX_FIFO_BE,
2857 IWL_TX_FIFO_BK,
2858};
2859
2860static const u8 iwl4965_bss_ac_to_queue[] = {
2861 0, 1, 2, 3,
2862};
2863
2864static int
2865iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2866{
2867 int err = 0, i;
2868 struct iwl_priv *priv;
2869 struct ieee80211_hw *hw;
2870 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
2871 unsigned long flags;
2872 u16 pci_cmd;
2873
2874 /************************
2875 * 1. Allocating HW data
2876 ************************/
2877
2878 hw = iwl_legacy_alloc_all(cfg);
2879 if (!hw) {
2880 err = -ENOMEM;
2881 goto out;
2882 }
2883 priv = hw->priv;
2884 /* At this point both hw and priv are allocated. */
2885
2886 /*
2887 * The default context is always valid,
2888 * more may be discovered when firmware
2889 * is loaded.
2890 */
2891 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
2892
2893 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
2894 priv->contexts[i].ctxid = i;
2895
2896 priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
2897 priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
2898 priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
2899 priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
2900 priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
2901 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
2902 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
2903 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
2904 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
2905 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
2906 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
2907 BIT(NL80211_IFTYPE_ADHOC);
2908 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
2909 BIT(NL80211_IFTYPE_STATION);
2910 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
2911 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
2912 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
2913 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
2914
2915 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
2916
2917 SET_IEEE80211_DEV(hw, &pdev->dev);
2918
2919 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
2920 priv->cfg = cfg;
2921 priv->pci_dev = pdev;
2922 priv->inta_mask = CSR_INI_SET_MASK;
2923
2924 if (iwl_legacy_alloc_traffic_mem(priv))
2925 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
2926
2927 /**************************
2928 * 2. Initializing PCI bus
2929 **************************/
2930 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
2931 PCIE_LINK_STATE_CLKPM);
2932
2933 if (pci_enable_device(pdev)) {
2934 err = -ENODEV;
2935 goto out_ieee80211_free_hw;
2936 }
2937
2938 pci_set_master(pdev);
2939
2940 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2941 if (!err)
2942 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2943 if (err) {
2944 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2945 if (!err)
2946 err = pci_set_consistent_dma_mask(pdev,
2947 DMA_BIT_MASK(32));
2948 /* both attempts failed: */
2949 if (err) {
2950 IWL_WARN(priv, "No suitable DMA available.\n");
2951 goto out_pci_disable_device;
2952 }
2953 }
2954
2955 err = pci_request_regions(pdev, DRV_NAME);
2956 if (err)
2957 goto out_pci_disable_device;
2958
2959 pci_set_drvdata(pdev, priv);
2960
2961
2962 /***********************
2963 * 3. Read REV register
2964 ***********************/
2965 priv->hw_base = pci_iomap(pdev, 0, 0);
2966 if (!priv->hw_base) {
2967 err = -ENODEV;
2968 goto out_pci_release_regions;
2969 }
2970
2971 IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
2972 (unsigned long long) pci_resource_len(pdev, 0));
2973 IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
2974
2975 /* these spin locks will be used in apm_ops.init and EEPROM access
2976 * we should init now
2977 */
2978 spin_lock_init(&priv->reg_lock);
2979 spin_lock_init(&priv->lock);
2980
2981 /*
2982 * stop and reset the on-board processor just in case it is in a
2983 * strange state ... like being left stranded by a primary kernel
2984 * and this is now the kdump kernel trying to start up
2985 */
2986 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
2987
2988 iwl4965_hw_detect(priv);
2989 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
2990 priv->cfg->name, priv->hw_rev);
2991
2992 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2993 * PCI Tx retries from interfering with C3 CPU state */
2994 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2995
2996 iwl4965_prepare_card_hw(priv);
2997 if (!priv->hw_ready) {
2998 IWL_WARN(priv, "Failed, HW not ready\n");
2999 goto out_iounmap;
3000 }
3001
3002 /*****************
3003 * 4. Read EEPROM
3004 *****************/
3005 /* Read the EEPROM */
3006 err = iwl_legacy_eeprom_init(priv);
3007 if (err) {
3008 IWL_ERR(priv, "Unable to init EEPROM\n");
3009 goto out_iounmap;
3010 }
3011 err = iwl4965_eeprom_check_version(priv);
3012 if (err)
3013 goto out_free_eeprom;
3014
3015 if (err)
3016 goto out_free_eeprom;
3017
3018 /* extract MAC Address */
3019 iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
3020 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3021 priv->hw->wiphy->addresses = priv->addresses;
3022 priv->hw->wiphy->n_addresses = 1;
3023
3024 /************************
3025 * 5. Setup HW constants
3026 ************************/
3027 if (iwl4965_set_hw_params(priv)) {
3028 IWL_ERR(priv, "failed to set hw parameters\n");
3029 goto out_free_eeprom;
3030 }
3031
3032 /*******************
3033 * 6. Setup priv
3034 *******************/
3035
3036 err = iwl4965_init_drv(priv);
3037 if (err)
3038 goto out_free_eeprom;
3039 /* At this point both hw and priv are initialized. */
3040
3041 /********************
3042 * 7. Setup services
3043 ********************/
3044 spin_lock_irqsave(&priv->lock, flags);
3045 iwl_legacy_disable_interrupts(priv);
3046 spin_unlock_irqrestore(&priv->lock, flags);
3047
3048 pci_enable_msi(priv->pci_dev);
3049
3050 err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
3051 IRQF_SHARED, DRV_NAME, priv);
3052 if (err) {
3053 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3054 goto out_disable_msi;
3055 }
3056
3057 iwl4965_setup_deferred_work(priv);
3058 iwl4965_setup_rx_handlers(priv);
3059
3060 /*********************************************
3061 * 8. Enable interrupts and read RFKILL state
3062 *********************************************/
3063
3064 /* enable rfkill interrupt: hw bug w/a */
3065 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3066 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3067 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3068 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3069 }
3070
3071 iwl_legacy_enable_rfkill_int(priv);
3072
3073 /* If platform's RF_KILL switch is NOT set to KILL */
3074 if (iwl_read32(priv, CSR_GP_CNTRL) &
3075 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3076 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3077 else
3078 set_bit(STATUS_RF_KILL_HW, &priv->status);
3079
3080 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3081 test_bit(STATUS_RF_KILL_HW, &priv->status));
3082
3083 iwl_legacy_power_initialize(priv);
3084
3085 init_completion(&priv->_4965.firmware_loading_complete);
3086
3087 err = iwl4965_request_firmware(priv, true);
3088 if (err)
3089 goto out_destroy_workqueue;
3090
3091 return 0;
3092
3093 out_destroy_workqueue:
3094 destroy_workqueue(priv->workqueue);
3095 priv->workqueue = NULL;
3096 free_irq(priv->pci_dev->irq, priv);
3097 out_disable_msi:
3098 pci_disable_msi(priv->pci_dev);
3099 iwl4965_uninit_drv(priv);
3100 out_free_eeprom:
3101 iwl_legacy_eeprom_free(priv);
3102 out_iounmap:
3103 pci_iounmap(pdev, priv->hw_base);
3104 out_pci_release_regions:
3105 pci_set_drvdata(pdev, NULL);
3106 pci_release_regions(pdev);
3107 out_pci_disable_device:
3108 pci_disable_device(pdev);
3109 out_ieee80211_free_hw:
3110 iwl_legacy_free_traffic_mem(priv);
3111 ieee80211_free_hw(priv->hw);
3112 out:
3113 return err;
3114}
3115
3116static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
3117{
3118 struct iwl_priv *priv = pci_get_drvdata(pdev);
3119 unsigned long flags;
3120
3121 if (!priv)
3122 return;
3123
3124 wait_for_completion(&priv->_4965.firmware_loading_complete);
3125
3126 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3127
3128 iwl_legacy_dbgfs_unregister(priv);
3129 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3130
3131 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3132 * to be called and iwl4965_down since we are removing the device
3133 * we need to set STATUS_EXIT_PENDING bit.
3134 */
3135 set_bit(STATUS_EXIT_PENDING, &priv->status);
3136
3137 iwl_legacy_leds_exit(priv);
3138
3139 if (priv->mac80211_registered) {
3140 ieee80211_unregister_hw(priv->hw);
3141 priv->mac80211_registered = 0;
3142 } else {
3143 iwl4965_down(priv);
3144 }
3145
3146 /*
3147 * Make sure device is reset to low power before unloading driver.
3148 * This may be redundant with iwl4965_down(), but there are paths to
3149 * run iwl4965_down() without calling apm_ops.stop(), and there are
3150 * paths to avoid running iwl4965_down() at all before leaving driver.
3151 * This (inexpensive) call *makes sure* device is reset.
3152 */
3153 iwl_legacy_apm_stop(priv);
3154
3155 /* make sure we flush any pending irq or
3156 * tasklet for the driver
3157 */
3158 spin_lock_irqsave(&priv->lock, flags);
3159 iwl_legacy_disable_interrupts(priv);
3160 spin_unlock_irqrestore(&priv->lock, flags);
3161
3162 iwl4965_synchronize_irq(priv);
3163
3164 iwl4965_dealloc_ucode_pci(priv);
3165
3166 if (priv->rxq.bd)
3167 iwl4965_rx_queue_free(priv, &priv->rxq);
3168 iwl4965_hw_txq_ctx_free(priv);
3169
3170 iwl_legacy_eeprom_free(priv);
3171
3172
3173 /*netif_stop_queue(dev); */
3174 flush_workqueue(priv->workqueue);
3175
3176 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3177 * priv->workqueue... so we can't take down the workqueue
3178 * until now... */
3179 destroy_workqueue(priv->workqueue);
3180 priv->workqueue = NULL;
3181 iwl_legacy_free_traffic_mem(priv);
3182
3183 free_irq(priv->pci_dev->irq, priv);
3184 pci_disable_msi(priv->pci_dev);
3185 pci_iounmap(pdev, priv->hw_base);
3186 pci_release_regions(pdev);
3187 pci_disable_device(pdev);
3188 pci_set_drvdata(pdev, NULL);
3189
3190 iwl4965_uninit_drv(priv);
3191
3192 dev_kfree_skb(priv->beacon_skb);
3193
3194 ieee80211_free_hw(priv->hw);
3195}
3196
3197/*
3198 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
3199 * must be called under priv->lock and mac access
3200 */
3201void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
3202{
3203 iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
3204}
3205
3206/*****************************************************************************
3207 *
3208 * driver and module entry point
3209 *
3210 *****************************************************************************/
3211
3212/* Hardware specific file defines the PCI IDs table for that hardware module */
3213static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
3214#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
3215 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
3216 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
3217#endif /* CONFIG_IWL4965 */
3218
3219 {0}
3220};
3221MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
3222
3223static struct pci_driver iwl4965_driver = {
3224 .name = DRV_NAME,
3225 .id_table = iwl4965_hw_card_ids,
3226 .probe = iwl4965_pci_probe,
3227 .remove = __devexit_p(iwl4965_pci_remove),
3228 .driver.pm = IWL_LEGACY_PM_OPS,
3229};
3230
3231static int __init iwl4965_init(void)
3232{
3233
3234 int ret;
3235 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3236 pr_info(DRV_COPYRIGHT "\n");
3237
3238 ret = iwl4965_rate_control_register();
3239 if (ret) {
3240 pr_err("Unable to register rate control algorithm: %d\n", ret);
3241 return ret;
3242 }
3243
3244 ret = pci_register_driver(&iwl4965_driver);
3245 if (ret) {
3246 pr_err("Unable to initialize PCI module\n");
3247 goto error_register;
3248 }
3249
3250 return ret;
3251
3252error_register:
3253 iwl4965_rate_control_unregister();
3254 return ret;
3255}
3256
3257static void __exit iwl4965_exit(void)
3258{
3259 pci_unregister_driver(&iwl4965_driver);
3260 iwl4965_rate_control_unregister();
3261}
3262
3263module_exit(iwl4965_exit);
3264module_init(iwl4965_init);
3265
3266#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
3267module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
3268MODULE_PARM_DESC(debug, "debug output mask");
3269#endif
3270
3271module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
3272MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3273module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
3274MODULE_PARM_DESC(queues_num, "number of hw queues.");
3275module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
3276MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3277module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
3278 int, S_IRUGO);
3279MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3280module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
3281MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a493003ab0..ffec4b4a248a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/ 61 *****************************************************************************/
62 62
63#ifndef __iwl_legacy_prph_h__ 63#ifndef __il_prph_h__
64#define __iwl_legacy_prph_h__ 64#define __il_prph_h__
65 65
66/* 66/*
67 * Registers in this file are internal, not PCI bus memory mapped. 67 * Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) 91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) 92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) 93#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ 94#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) 95#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ 96#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) 97#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
98 98
99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 99#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
120 * 120 *
121 * 1) Initialization -- performs hardware calibration and sets up some 121 * 1) Initialization -- performs hardware calibration and sets up some
122 * internal data, then notifies host via "initialize alive" notification 122 * internal data, then notifies host via "initialize alive" notification
123 * (struct iwl_init_alive_resp) that it has completed all of its work. 123 * (struct il_init_alive_resp) that it has completed all of its work.
124 * After signal from host, it then loads and starts the runtime program. 124 * After signal from host, it then loads and starts the runtime program.
125 * The initialization program must be used when initially setting up the 125 * The initialization program must be used when initially setting up the
126 * NIC after loading the driver. 126 * NIC after loading the driver.
127 * 127 *
128 * 2) Runtime/Protocol -- performs all normal runtime operations. This 128 * 2) Runtime/Protocol -- performs all normal runtime operations. This
129 * notifies host via "alive" notification (struct iwl_alive_resp) that it 129 * notifies host via "alive" notification (struct il_alive_resp) that it
130 * is ready to be used. 130 * is ready to be used.
131 * 131 *
132 * When initializing the NIC, the host driver does the following procedure: 132 * When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
189 * procedure. 189 * procedure.
190 * 190 *
191 * This save/restore method is mostly for autonomous power management during 191 * This save/restore method is mostly for autonomous power management during
192 * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and 192 * normal operation (result of C_POWER_TBL). Platform suspend/resume and
193 * RFKILL should use complete restarts (with total re-initialization) of uCode, 193 * RFKILL should use complete restarts (with total re-initialization) of uCode,
194 * allowing total shutdown (including BSM memory). 194 * allowing total shutdown (including BSM memory).
195 * 195 *
@@ -202,19 +202,19 @@
202 */ 202 */
203 203
204/* BSM bit fields */ 204/* BSM bit fields */
205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ 205#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ 206#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ 207#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
208 208
209/* BSM addresses */ 209/* BSM addresses */
210#define BSM_BASE (PRPH_BASE + 0x3400) 210#define BSM_BASE (PRPH_BASE + 0x3400)
211#define BSM_END (PRPH_BASE + 0x3800) 211#define BSM_END (PRPH_BASE + 0x3800)
212 212
213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ 213#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ 214#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ 215#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ 216#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ 217#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
218 218
219/* 219/*
220 * Pointers and size regs for bootstrap load and data SRAM save/restore. 220 * Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) 231 * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
232 */ 232 */
233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) 233#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
234#define BSM_SRAM_SIZE (1024) /* bytes */ 234#define BSM_SRAM_SIZE (1024) /* bytes */
235
236 235
237/* 3945 Tx scheduler registers */ 236/* 3945 Tx scheduler registers */
238#define ALM_SCD_BASE (PRPH_BASE + 0x2E00) 237#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
255 * but one DMA channel may take input from several queues. 254 * but one DMA channel may take input from several queues.
256 * 255 *
257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows 256 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c): 257 * (cf. default_queue_to_tx_fifo in 4965.c):
259 * 258 *
260 * 0 -- EDCA BK (background) frames, lowest priority 259 * 0 -- EDCA BK (background) frames, lowest priority
261 * 1 -- EDCA BE (best effort) frames, normal priority 260 * 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
274 * The driver sets up each queue to work in one of two modes: 273 * The driver sets up each queue to work in one of two modes:
275 * 274 *
276 * 1) Scheduler-Ack, in which the scheduler automatically supports a 275 * 1) Scheduler-Ack, in which the scheduler automatically supports a
277 * block-ack (BA) window of up to 64 TFDs. In this mode, each queue 276 * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
278 * contains TFDs for a unique combination of Recipient Address (RA) 277 * contains TFDs for a unique combination of Recipient Address (RA)
279 * and Traffic Identifier (TID), that is, traffic of a given 278 * and Traffic Identifier (TID), that is, traffic of a given
280 * Quality-Of-Service (QOS) priority, destined for a single station. 279 * Quality-Of-Service (QOS) priority, destined for a single station.
281 * 280 *
282 * In scheduler-ack mode, the scheduler keeps track of the Tx status of 281 * In scheduler-ack mode, the scheduler keeps track of the Tx status of
283 * each frame within the BA window, including whether it's been transmitted, 282 * each frame within the BA win, including whether it's been transmitted,
284 * and whether it's been acknowledged by the receiving station. The device 283 * and whether it's been acknowledged by the receiving station. The device
285 * automatically processes block-acks received from the receiving STA, 284 * automatically processes block-acks received from the receiving STA,
286 * and reschedules un-acked frames to be retransmitted (successful 285 * and reschedules un-acked frames to be retransmitted (successful
287 * Tx completion may end up being out-of-order). 286 * Tx completion may end up being out-of-order).
288 * 287 *
289 * The driver must maintain the queue's Byte Count table in host DRAM 288 * The driver must maintain the queue's Byte Count table in host DRAM
290 * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. 289 * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
291 * This mode does not support fragmentation. 290 * This mode does not support fragmentation.
292 * 291 *
293 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. 292 * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
316 */ 315 */
317 316
318/** 317/**
319 * Max Tx window size is the max number of contiguous TFDs that the scheduler 318 * Max Tx win size is the max number of contiguous TFDs that the scheduler
320 * can keep track of at one time when creating block-ack chains of frames. 319 * can keep track of at one time when creating block-ack chains of frames.
321 * Note that "64" matches the number of ack bits in a block-ack packet. 320 * Note that "64" matches the number of ack bits in a block-ack packet.
322 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize 321 * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
323 * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. 322 * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
324 */ 323 */
325#define SCD_WIN_SIZE 64 324#define SCD_WIN_SIZE 64
326#define SCD_FRAME_LIMIT 64 325#define SCD_FRAME_LIMIT 64
327 326
328/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ 327/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
329#define IWL49_SCD_START_OFFSET 0xa02c00 328#define IL49_SCD_START_OFFSET 0xa02c00
330 329
331/* 330/*
332 * 4965 tells driver SRAM address for internal scheduler structs via this reg. 331 * 4965 tells driver SRAM address for internal scheduler structs via this reg.
333 * Value is valid only after "Alive" response from uCode. 332 * Value is valid only after "Alive" response from uCode.
334 */ 333 */
335#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) 334#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
336 335
337/* 336/*
338 * Driver may need to update queue-empty bits after changing queue's 337 * Driver may need to update queue-empty bits after changing queue's
339 * write and read pointers (indexes) during (re-)initialization (i.e. when 338 * write and read pointers (idxes) during (re-)initialization (i.e. when
340 * scheduler is not tracking what's happening). 339 * scheduler is not tracking what's happening).
341 * Bit fields: 340 * Bit fields:
342 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit 341 * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
343 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty 342 * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
344 * NOTE: This register is not used by Linux driver. 343 * NOTE: This register is not used by Linux driver.
345 */ 344 */
346#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) 345#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
347 346
348/* 347/*
349 * Physical base address of array of byte count (BC) circular buffers (CBs). 348 * Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
351 * This register points to BC CB for queue 0, must be on 1024-byte boundary. 350 * This register points to BC CB for queue 0, must be on 1024-byte boundary.
352 * Others are spaced by 1024 bytes. 351 * Others are spaced by 1024 bytes.
353 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. 352 * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
354 * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). 353 * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
355 * Bit fields: 354 * Bit fields:
356 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. 355 * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
357 */ 356 */
358#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) 357#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
359 358
360/* 359/*
361 * Enables any/all Tx DMA/FIFO channels. 360 * Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
364 * Bit fields: 363 * Bit fields:
365 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 364 * 7- 0: Enable (1), disable (0), one bit for each channel 0-7
366 */ 365 */
367#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) 366#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
368/* 367/*
369 * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. 368 * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
370 * Initialized and updated by driver as new TFDs are added to queue. 369 * Initialized and updated by driver as new TFDs are added to queue.
371 * NOTE: If using Block Ack, index must correspond to frame's 370 * NOTE: If using Block Ack, idx must correspond to frame's
372 * Start Sequence Number; index = (SSN & 0xff) 371 * Start Sequence Number; idx = (SSN & 0xff)
373 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? 372 * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
374 */ 373 */
375#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) 374#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
376 375
377/* 376/*
378 * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. 377 * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
379 * For FIFO mode, index indicates next frame to transmit. 378 * For FIFO mode, idx indicates next frame to transmit.
380 * For Scheduler-ACK mode, index indicates first frame in Tx window. 379 * For Scheduler-ACK mode, idx indicates first frame in Tx win.
381 * Initialized by driver, updated by scheduler. 380 * Initialized by driver, updated by scheduler.
382 */ 381 */
383#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) 382#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
384 383
385/* 384/*
386 * Select which queues work in chain mode (1) vs. not (0). 385 * Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
391 * NOTE: If driver sets up queue for chain mode, it should be also set up 390 * NOTE: If driver sets up queue for chain mode, it should be also set up
392 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). 391 * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
393 */ 392 */
394#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) 393#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
395 394
396/* 395/*
397 * Select which queues interrupt driver when scheduler increments 396 * Select which queues interrupt driver when scheduler increments
398 * a queue's read pointer (index). 397 * a queue's read pointer (idx).
399 * Bit fields: 398 * Bit fields:
400 * 31-16: Reserved 399 * 31-16: Reserved
401 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled 400 * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
402 * NOTE: This functionality is apparently a no-op; driver relies on interrupts 401 * NOTE: This functionality is apparently a no-op; driver relies on interrupts
403 * from Rx queue to read Tx command responses and update Tx queues. 402 * from Rx queue to read Tx command responses and update Tx queues.
404 */ 403 */
405#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) 404#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
406 405
407/* 406/*
408 * Queue search status registers. One for each queue. 407 * Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
414 * Driver should init to "1" for aggregation mode, or "0" otherwise. 413 * Driver should init to "1" for aggregation mode, or "0" otherwise.
415 * 7-6: Driver should init to "0" 414 * 7-6: Driver should init to "0"
416 * 5: Window Size Left; indicates whether scheduler can request 415 * 5: Window Size Left; indicates whether scheduler can request
417 * another TFD, based on window size, etc. Driver should init 416 * another TFD, based on win size, etc. Driver should init
418 * this bit to "1" for aggregation mode, or "0" for non-agg. 417 * this bit to "1" for aggregation mode, or "0" for non-agg.
419 * 4-1: Tx FIFO to use (range 0-7). 418 * 4-1: Tx FIFO to use (range 0-7).
420 * 0: Queue is active (1), not active (0). 419 * 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
423 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled 422 * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
424 * via SCD_QUEUECHAIN_SEL. 423 * via SCD_QUEUECHAIN_SEL.
425 */ 424 */
426#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ 425#define IL49_SCD_QUEUE_STATUS_BITS(x)\
427 (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) 426 (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
428 427
429/* Bit field positions */ 428/* Bit field positions */
430#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) 429#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
431#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) 430#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
432#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) 431#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
433#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) 432#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
434 433
435/* Write masks */ 434/* Write masks */
436#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) 435#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
437#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) 436#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
438 437
439/** 438/**
440 * 4965 internal SRAM structures for scheduler, shared with driver ... 439 * 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
460 * each queue's entry as follows: 459 * each queue's entry as follows:
461 * 460 *
462 * LS Dword bit fields: 461 * LS Dword bit fields:
463 * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. 462 * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
464 * 463 *
465 * MS Dword bit fields: 464 * MS Dword bit fields:
466 * 16-22: Frame limit. Driver should init to 10 (0xa). 465 * 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
470 * Init must be done after driver receives "Alive" response from 4965 uCode, 469 * Init must be done after driver receives "Alive" response from 4965 uCode,
471 * and when setting up queue for aggregation. 470 * and when setting up queue for aggregation.
472 */ 471 */
473#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 472#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
474#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ 473#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
475 (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) 474 (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
476 475
477#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) 476#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
478#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) 477#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
479#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 478#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
480#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 479#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
481 480
482/* 481/*
483 * Tx Status Bitmap 482 * Tx Status Bitmap
@@ -486,7 +485,7 @@
486 * "Alive" notification from uCode. Area is used only by device itself; 485 * "Alive" notification from uCode. Area is used only by device itself;
487 * no other support (besides clearing) is required from driver. 486 * no other support (besides clearing) is required from driver.
488 */ 487 */
489#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 488#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
490 489
491/* 490/*
492 * RAxTID to queue translation mapping. 491 * RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
494 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be 493 * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
495 * for only one combination of receiver address (RA) and traffic ID (TID), i.e. 494 * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
496 * one QOS priority level destined for one station (for this wireless link, 495 * one QOS priority level destined for one station (for this wireless link,
497 * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit 496 * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
498 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK 497 * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
499 * mode, the device ignores the mapping value. 498 * mode, the device ignores the mapping value.
500 * 499 *
@@ -508,16 +507,16 @@
508 * must read a dword-aligned value from device SRAM, replace the 16-bit map 507 * must read a dword-aligned value from device SRAM, replace the 16-bit map
509 * value of interest, and write the dword value back into device SRAM. 508 * value of interest, and write the dword value back into device SRAM.
510 */ 509 */
511#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 510#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
512 511
513/* Find translation table dword to read/write for given queue */ 512/* Find translation table dword to read/write for given queue */
514#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 513#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
515 ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) 514 ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
516 515
517#define IWL_SCD_TXFIFO_POS_TID (0) 516#define IL_SCD_TXFIFO_POS_TID (0)
518#define IWL_SCD_TXFIFO_POS_RA (4) 517#define IL_SCD_TXFIFO_POS_RA (4)
519#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) 518#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
520 519
521/*********************** END TX SCHEDULER *************************************/ 520/*********************** END TX SCHEDULER *************************************/
522 521
523#endif /* __iwl_legacy_prph_h__ */ 522#endif /* __il_prph_h__ */