aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-08-09 16:02:26 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-09 16:02:26 -0400
commit392e741939ddb7dd7adf7e4ec414b2140fb3a15b (patch)
tree53ad4d70d670b78b5e0cd92a3be3367f5312e697 /drivers
parent53dd4b9329e4100405dc1cf251e6713b60051579 (diff)
parent197035737e96a517eed26e8f4bb941738249783e (diff)
Merge branch 'ath6kl-next' of master.kernel.org:/pub/scm/linux/kernel/git/kvalo/ath6kl
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig15
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile35
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c692
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h250
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1538
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h39
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h180
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h544
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c150
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h105
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h72
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h207
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c2456
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h604
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.c641
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h92
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1303
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1337
-rw-r--r--drivers/net/wireless/ath/ath6kl/node.c234
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c912
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h331
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c1457
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2743
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2018
26 files changed, 17957 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d1b23067619f..073548836413 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,5 +25,6 @@ config ATH_DEBUG
25source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
26source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
27source "drivers/net/wireless/ath/carl9170/Kconfig" 27source "drivers/net/wireless/ath/carl9170/Kconfig"
28source "drivers/net/wireless/ath/ath6kl/Kconfig"
28 29
29endif 30endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 0e8f528c81c0..d1214696a35b 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,7 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K_HW) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_CARL9170) += carl9170/ 3obj-$(CONFIG_CARL9170) += carl9170/
4obj-$(CONFIG_ATH6KL) += ath6kl/
4 5
5obj-$(CONFIG_ATH_COMMON) += ath.o 6obj-$(CONFIG_ATH_COMMON) += ath.o
6 7
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
new file mode 100644
index 000000000000..3d5f8be20eac
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -0,0 +1,15 @@
1config ATH6KL
2 tristate "Atheros ath6kl support"
3 depends on MMC
4 depends on CFG80211
5 ---help---
6 This module adds support for wireless adapters based on
7 Atheros AR6003 chipset running over SDIO. If you choose to
8 build it as a module, it will be called ath6kl. Pls note
9 that AR6002 and AR6001 are not supported by this driver.
10
11config ATH6KL_DEBUG
12 bool "Atheros ath6kl debugging"
13 depends on ATH6KL
14 ---help---
15 Enables debug support
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
new file mode 100644
index 000000000000..e1bb07ea8e80
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -0,0 +1,35 @@
1#------------------------------------------------------------------------------
2# Copyright (c) 2004-2010 Atheros Communications Inc.
3# All rights reserved.
4#
5#
6#
7# Permission to use, copy, modify, and/or distribute this software for any
8# purpose with or without fee is hereby granted, provided that the above
9# copyright notice and this permission notice appear in all copies.
10#
11# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18#
19#
20#
21# Author(s): ="Atheros"
22#------------------------------------------------------------------------------
23
24obj-$(CONFIG_ATH6KL) := ath6kl.o
25ath6kl-y += debug.o
26ath6kl-y += htc_hif.o
27ath6kl-y += htc.o
28ath6kl-y += bmi.o
29ath6kl-y += cfg80211.o
30ath6kl-y += init.o
31ath6kl-y += main.o
32ath6kl-y += txrx.o
33ath6kl-y += wmi.o
34ath6kl-y += node.o
35ath6kl-y += sdio.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
new file mode 100644
index 000000000000..84676697d7eb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -0,0 +1,692 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hif-ops.h"
19#include "target.h"
20#include "debug.h"
21
22static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
23{
24 u32 addr;
25 unsigned long timeout;
26 int ret;
27
28 ar->bmi.cmd_credits = 0;
29
30 /* Read the counter register to get the command credits */
31 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
32
33 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
34 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
35
36 /*
37 * Hit the credit counter with a 4-byte access, the first byte
38 * read will hit the counter and cause a decrement, while the
39 * remaining 3 bytes has no effect. The rationale behind this
40 * is to make all HIF accesses 4-byte aligned.
41 */
42 ret = hif_read_write_sync(ar, addr,
43 (u8 *)&ar->bmi.cmd_credits, 4,
44 HIF_RD_SYNC_BYTE_INC);
45 if (ret) {
46 ath6kl_err("Unable to decrement the command credit count register: %d\n",
47 ret);
48 return ret;
49 }
50
51 /* The counter is only 8 bits.
52 * Ignore anything in the upper 3 bytes
53 */
54 ar->bmi.cmd_credits &= 0xFF;
55 }
56
57 if (!ar->bmi.cmd_credits) {
58 ath6kl_err("bmi communication timeout\n");
59 return -ETIMEDOUT;
60 }
61
62 return 0;
63}
64
65static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar, bool need_timeout)
66{
67 unsigned long timeout;
68 u32 rx_word = 0;
69 int ret = 0;
70
71 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
72 while ((!need_timeout || time_before(jiffies, timeout)) && !rx_word) {
73 ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
74 (u8 *)&rx_word, sizeof(rx_word),
75 HIF_RD_SYNC_BYTE_INC);
76 if (ret) {
77 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
78 return ret;
79 }
80
81 /* all we really want is one bit */
82 rx_word &= (1 << ENDPOINT1);
83 }
84
85 if (!rx_word) {
86 ath6kl_err("bmi_recv_buf FIFO empty\n");
87 return -EINVAL;
88 }
89
90 return ret;
91}
92
93static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
94{
95 int ret;
96 u32 addr;
97
98 ret = ath6kl_get_bmi_cmd_credits(ar);
99 if (ret)
100 return ret;
101
102 addr = ar->mbox_info.htc_addr;
103
104 ret = hif_read_write_sync(ar, addr, buf, len,
105 HIF_WR_SYNC_BYTE_INC);
106 if (ret)
107 ath6kl_err("unable to send the bmi data to the device\n");
108
109 return ret;
110}
111
112static int ath6kl_bmi_recv_buf(struct ath6kl *ar,
113 u8 *buf, u32 len, bool want_timeout)
114{
115 int ret;
116 u32 addr;
117
118 /*
119 * During normal bootup, small reads may be required.
120 * Rather than issue an HIF Read and then wait as the Target
121 * adds successive bytes to the FIFO, we wait here until
122 * we know that response data is available.
123 *
124 * This allows us to cleanly timeout on an unexpected
125 * Target failure rather than risk problems at the HIF level.
126 * In particular, this avoids SDIO timeouts and possibly garbage
127 * data on some host controllers. And on an interconnect
128 * such as Compact Flash (as well as some SDIO masters) which
129 * does not provide any indication on data timeout, it avoids
130 * a potential hang or garbage response.
131 *
132 * Synchronization is more difficult for reads larger than the
133 * size of the MBOX FIFO (128B), because the Target is unable
134 * to push the 129th byte of data until AFTER the Host posts an
135 * HIF Read and removes some FIFO data. So for large reads the
136 * Host proceeds to post an HIF Read BEFORE all the data is
137 * actually available to read. Fortunately, large BMI reads do
138 * not occur in practice -- they're supported for debug/development.
139 *
140 * So Host/Target BMI synchronization is divided into these cases:
141 * CASE 1: length < 4
142 * Should not happen
143 *
144 * CASE 2: 4 <= length <= 128
145 * Wait for first 4 bytes to be in FIFO
146 * If CONSERVATIVE_BMI_READ is enabled, also wait for
147 * a BMI command credit, which indicates that the ENTIRE
148 * response is available in the the FIFO
149 *
150 * CASE 3: length > 128
151 * Wait for the first 4 bytes to be in FIFO
152 *
153 * For most uses, a small timeout should be sufficient and we will
154 * usually see a response quickly; but there may be some unusual
155 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
156 * For now, we use an unbounded busy loop while waiting for
157 * BMI_EXECUTE.
158 *
159 * If BMI_EXECUTE ever needs to support longer-latency execution,
160 * especially in production, this code needs to be enhanced to sleep
161 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
162 * a function of Host processor speed.
163 */
164 if (len >= 4) { /* NB: Currently, always true */
165 ret = ath6kl_bmi_get_rx_lkahd(ar, want_timeout);
166 if (ret)
167 return ret;
168 }
169
170 addr = ar->mbox_info.htc_addr;
171 ret = hif_read_write_sync(ar, addr, buf, len,
172 HIF_RD_SYNC_BYTE_INC);
173 if (ret) {
174 ath6kl_err("Unable to read the bmi data from the device: %d\n",
175 ret);
176 return ret;
177 }
178
179 return 0;
180}
181
182int ath6kl_bmi_done(struct ath6kl *ar)
183{
184 int ret;
185 u32 cid = BMI_DONE;
186
187 if (ar->bmi.done_sent) {
188 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
189 return 0;
190 }
191
192 ar->bmi.done_sent = true;
193
194 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
195 if (ret) {
196 ath6kl_err("Unable to send bmi done: %d\n", ret);
197 return ret;
198 }
199
200 ath6kl_bmi_cleanup(ar);
201
202 return 0;
203}
204
205int ath6kl_bmi_get_target_info(struct ath6kl *ar,
206 struct ath6kl_bmi_target_info *targ_info)
207{
208 int ret;
209 u32 cid = BMI_GET_TARGET_INFO;
210
211 if (ar->bmi.done_sent) {
212 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
213 return -EACCES;
214 }
215
216 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
217 if (ret) {
218 ath6kl_err("Unable to send get target info: %d\n", ret);
219 return ret;
220 }
221
222 ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
223 sizeof(targ_info->version), true);
224 if (ret) {
225 ath6kl_err("Unable to recv target info: %d\n", ret);
226 return ret;
227 }
228
229 if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
230 /* Determine how many bytes are in the Target's targ_info */
231 ret = ath6kl_bmi_recv_buf(ar,
232 (u8 *)&targ_info->byte_count,
233 sizeof(targ_info->byte_count),
234 true);
235 if (ret) {
236 ath6kl_err("unable to read target info byte count: %d\n",
237 ret);
238 return ret;
239 }
240
241 /*
242 * The target's targ_info doesn't match the host's targ_info.
243 * We need to do some backwards compatibility to make this work.
244 */
245 if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
246 WARN_ON(1);
247 return -EINVAL;
248 }
249
250 /* Read the remainder of the targ_info */
251 ret = ath6kl_bmi_recv_buf(ar,
252 ((u8 *)targ_info) +
253 sizeof(targ_info->byte_count),
254 sizeof(*targ_info) -
255 sizeof(targ_info->byte_count),
256 true);
257
258 if (ret) {
259 ath6kl_err("Unable to read target info (%d bytes): %d\n",
260 targ_info->byte_count, ret);
261 return ret;
262 }
263 }
264
265 ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
266 targ_info->version, targ_info->type);
267
268 return 0;
269}
270
271int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
272{
273 u32 cid = BMI_READ_MEMORY;
274 int ret;
275 u32 offset;
276 u32 len_remain, rx_len;
277 u16 size;
278
279 if (ar->bmi.done_sent) {
280 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
281 return -EACCES;
282 }
283
284 size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
285 if (size > MAX_BMI_CMDBUF_SZ) {
286 WARN_ON(1);
287 return -EINVAL;
288 }
289 memset(ar->bmi.cmd_buf, 0, size);
290
291 ath6kl_dbg(ATH6KL_DBG_BMI,
292 "bmi read memory: device: addr: 0x%x, len: %d\n",
293 addr, len);
294
295 len_remain = len;
296
297 while (len_remain) {
298 rx_len = (len_remain < BMI_DATASZ_MAX) ?
299 len_remain : BMI_DATASZ_MAX;
300 offset = 0;
301 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
302 offset += sizeof(cid);
303 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
304 offset += sizeof(addr);
305 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
306 offset += sizeof(len);
307
308 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
309 if (ret) {
310 ath6kl_err("Unable to write to the device: %d\n",
311 ret);
312 return ret;
313 }
314 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len, true);
315 if (ret) {
316 ath6kl_err("Unable to read from the device: %d\n",
317 ret);
318 return ret;
319 }
320 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
321 len_remain -= rx_len; addr += rx_len;
322 }
323
324 return 0;
325}
326
327int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
328{
329 u32 cid = BMI_WRITE_MEMORY;
330 int ret;
331 u32 offset;
332 u32 len_remain, tx_len;
333 const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
334 u8 aligned_buf[BMI_DATASZ_MAX];
335 u8 *src;
336
337 if (ar->bmi.done_sent) {
338 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
339 return -EACCES;
340 }
341
342 if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
343 WARN_ON(1);
344 return -EINVAL;
345 }
346
347 memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
348
349 ath6kl_dbg(ATH6KL_DBG_BMI,
350 "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
351
352 len_remain = len;
353 while (len_remain) {
354 src = &buf[len - len_remain];
355
356 if (len_remain < (BMI_DATASZ_MAX - header)) {
357 if (len_remain & 3) {
358 /* align it with 4 bytes */
359 len_remain = len_remain +
360 (4 - (len_remain & 3));
361 memcpy(aligned_buf, src, len_remain);
362 src = aligned_buf;
363 }
364 tx_len = len_remain;
365 } else {
366 tx_len = (BMI_DATASZ_MAX - header);
367 }
368
369 offset = 0;
370 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
371 offset += sizeof(cid);
372 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
373 offset += sizeof(addr);
374 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
375 offset += sizeof(tx_len);
376 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
377 offset += tx_len;
378
379 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
380 if (ret) {
381 ath6kl_err("Unable to write to the device: %d\n",
382 ret);
383 return ret;
384 }
385 len_remain -= tx_len; addr += tx_len;
386 }
387
388 return 0;
389}
390
391int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
392{
393 u32 cid = BMI_EXECUTE;
394 int ret;
395 u32 offset;
396 u16 size;
397
398 if (ar->bmi.done_sent) {
399 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
400 return -EACCES;
401 }
402
403 size = sizeof(cid) + sizeof(addr) + sizeof(param);
404 if (size > MAX_BMI_CMDBUF_SZ) {
405 WARN_ON(1);
406 return -EINVAL;
407 }
408 memset(ar->bmi.cmd_buf, 0, size);
409
410 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
411 addr, *param);
412
413 offset = 0;
414 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
415 offset += sizeof(cid);
416 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
417 offset += sizeof(addr);
418 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
419 offset += sizeof(*param);
420
421 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
422 if (ret) {
423 ath6kl_err("Unable to write to the device: %d\n", ret);
424 return ret;
425 }
426
427 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), false);
428 if (ret) {
429 ath6kl_err("Unable to read from the device: %d\n", ret);
430 return ret;
431 }
432
433 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
434
435 return 0;
436}
437
438int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
439{
440 u32 cid = BMI_SET_APP_START;
441 int ret;
442 u32 offset;
443 u16 size;
444
445 if (ar->bmi.done_sent) {
446 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
447 return -EACCES;
448 }
449
450 size = sizeof(cid) + sizeof(addr);
451 if (size > MAX_BMI_CMDBUF_SZ) {
452 WARN_ON(1);
453 return -EINVAL;
454 }
455 memset(ar->bmi.cmd_buf, 0, size);
456
457 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
458
459 offset = 0;
460 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
461 offset += sizeof(cid);
462 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
463 offset += sizeof(addr);
464
465 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
466 if (ret) {
467 ath6kl_err("Unable to write to the device: %d\n", ret);
468 return ret;
469 }
470
471 return 0;
472}
473
474int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
475{
476 u32 cid = BMI_READ_SOC_REGISTER;
477 int ret;
478 u32 offset;
479 u16 size;
480
481 if (ar->bmi.done_sent) {
482 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
483 return -EACCES;
484 }
485
486 size = sizeof(cid) + sizeof(addr);
487 if (size > MAX_BMI_CMDBUF_SZ) {
488 WARN_ON(1);
489 return -EINVAL;
490 }
491 memset(ar->bmi.cmd_buf, 0, size);
492
493 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
494
495 offset = 0;
496 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
497 offset += sizeof(cid);
498 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
499 offset += sizeof(addr);
500
501 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
502 if (ret) {
503 ath6kl_err("Unable to write to the device: %d\n", ret);
504 return ret;
505 }
506
507 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), true);
508 if (ret) {
509 ath6kl_err("Unable to read from the device: %d\n", ret);
510 return ret;
511 }
512 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
513
514 return 0;
515}
516
517int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
518{
519 u32 cid = BMI_WRITE_SOC_REGISTER;
520 int ret;
521 u32 offset;
522 u16 size;
523
524 if (ar->bmi.done_sent) {
525 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
526 return -EACCES;
527 }
528
529 size = sizeof(cid) + sizeof(addr) + sizeof(param);
530 if (size > MAX_BMI_CMDBUF_SZ) {
531 WARN_ON(1);
532 return -EINVAL;
533 }
534 memset(ar->bmi.cmd_buf, 0, size);
535
536 ath6kl_dbg(ATH6KL_DBG_BMI,
537 "bmi write SOC reg: addr: 0x%x, param: %d\n",
538 addr, param);
539
540 offset = 0;
541 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
542 offset += sizeof(cid);
543 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
544 offset += sizeof(addr);
545 memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
546 offset += sizeof(param);
547
548 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
549 if (ret) {
550 ath6kl_err("Unable to write to the device: %d\n", ret);
551 return ret;
552 }
553
554 return 0;
555}
556
557int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
558{
559 u32 cid = BMI_LZ_DATA;
560 int ret;
561 u32 offset;
562 u32 len_remain, tx_len;
563 const u32 header = sizeof(cid) + sizeof(len);
564 u16 size;
565
566 if (ar->bmi.done_sent) {
567 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
568 return -EACCES;
569 }
570
571 size = BMI_DATASZ_MAX + header;
572 if (size > MAX_BMI_CMDBUF_SZ) {
573 WARN_ON(1);
574 return -EINVAL;
575 }
576 memset(ar->bmi.cmd_buf, 0, size);
577
578 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
579 len);
580
581 len_remain = len;
582 while (len_remain) {
583 tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
584 len_remain : (BMI_DATASZ_MAX - header);
585
586 offset = 0;
587 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
588 offset += sizeof(cid);
589 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
590 offset += sizeof(tx_len);
591 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
592 tx_len);
593 offset += tx_len;
594
595 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
596 if (ret) {
597 ath6kl_err("Unable to write to the device: %d\n",
598 ret);
599 return ret;
600 }
601
602 len_remain -= tx_len;
603 }
604
605 return 0;
606}
607
608int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
609{
610 u32 cid = BMI_LZ_STREAM_START;
611 int ret;
612 u32 offset;
613 u16 size;
614
615 if (ar->bmi.done_sent) {
616 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
617 return -EACCES;
618 }
619
620 size = sizeof(cid) + sizeof(addr);
621 if (size > MAX_BMI_CMDBUF_SZ) {
622 WARN_ON(1);
623 return -EINVAL;
624 }
625 memset(ar->bmi.cmd_buf, 0, size);
626
627 ath6kl_dbg(ATH6KL_DBG_BMI,
628 "bmi LZ stream start: addr: 0x%x)\n",
629 addr);
630
631 offset = 0;
632 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
633 offset += sizeof(cid);
634 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
635 offset += sizeof(addr);
636
637 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
638 if (ret) {
639 ath6kl_err("Unable to start LZ stream to the device: %d\n",
640 ret);
641 return ret;
642 }
643
644 return 0;
645}
646
647int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
648{
649 int ret;
650 u32 last_word = 0;
651 u32 last_word_offset = len & ~0x3;
652 u32 unaligned_bytes = len & 0x3;
653
654 ret = ath6kl_bmi_lz_stream_start(ar, addr);
655 if (ret)
656 return ret;
657
658 if (unaligned_bytes) {
659 /* copy the last word into a zero padded buffer */
660 memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
661 }
662
663 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
664 if (ret)
665 return ret;
666
667 if (unaligned_bytes)
668 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
669
670 if (!ret) {
671 /* Close compressed stream and open a new (fake) one.
672 * This serves mainly to flush Target caches. */
673 ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
674 }
675 return ret;
676}
677
678int ath6kl_bmi_init(struct ath6kl *ar)
679{
680 ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
681
682 if (!ar->bmi.cmd_buf)
683 return -ENOMEM;
684
685 return 0;
686}
687
688void ath6kl_bmi_cleanup(struct ath6kl *ar)
689{
690 kfree(ar->bmi.cmd_buf);
691 ar->bmi.cmd_buf = NULL;
692}
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
new file mode 100644
index 000000000000..83546d76d979
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -0,0 +1,250 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef BMI_H
18#define BMI_H
19
20/*
21 * Bootloader Messaging Interface (BMI)
22 *
23 * BMI is a very simple messaging interface used during initialization
24 * to read memory, write memory, execute code, and to define an
25 * application entry PC.
26 *
27 * It is used to download an application to ATH6KL, to provide
28 * patches to code that is already resident on ATH6KL, and generally
29 * to examine and modify state. The Host has an opportunity to use
30 * BMI only once during bootup. Once the Host issues a BMI_DONE
31 * command, this opportunity ends.
32 *
33 * The Host writes BMI requests to mailbox0, and reads BMI responses
34 * from mailbox0. BMI requests all begin with a command
35 * (see below for specific commands), and are followed by
36 * command-specific data.
37 *
38 * Flow control:
39 * The Host can only issue a command once the Target gives it a
40 * "BMI Command Credit", using ATH6KL Counter #4. As soon as the
41 * Target has completed a command, it issues another BMI Command
42 * Credit (so the Host can issue the next command).
43 *
44 * BMI handles all required Target-side cache flushing.
45 */
46
47#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
48 (sizeof(u32) * 3 /* cmd + addr + len */))
49
50/* Maximum data size used for BMI transfers */
51#define BMI_DATASZ_MAX 256
52
53/* BMI Commands */
54
55#define BMI_NO_COMMAND 0
56
57#define BMI_DONE 1
58/*
59 * Semantics: Host is done using BMI
60 * Request format:
61 * u32 command (BMI_DONE)
62 * Response format: none
63 */
64
65#define BMI_READ_MEMORY 2
66/*
67 * Semantics: Host reads ATH6KL memory
68 * Request format:
69 * u32 command (BMI_READ_MEMORY)
70 * u32 address
71 * u32 length, at most BMI_DATASZ_MAX
72 * Response format:
73 * u8 data[length]
74 */
75
76#define BMI_WRITE_MEMORY 3
77/*
78 * Semantics: Host writes ATH6KL memory
79 * Request format:
80 * u32 command (BMI_WRITE_MEMORY)
81 * u32 address
82 * u32 length, at most BMI_DATASZ_MAX
83 * u8 data[length]
84 * Response format: none
85 */
86
87#define BMI_EXECUTE 4
88/*
89 * Semantics: Causes ATH6KL to execute code
90 * Request format:
91 * u32 command (BMI_EXECUTE)
92 * u32 address
93 * u32 parameter
94 * Response format:
95 * u32 return value
96 */
97
98#define BMI_SET_APP_START 5
99/*
100 * Semantics: Set Target application starting address
101 * Request format:
102 * u32 command (BMI_SET_APP_START)
103 * u32 address
104 * Response format: none
105 */
106
107#define BMI_READ_SOC_REGISTER 6
108/*
109 * Semantics: Read a 32-bit Target SOC register.
110 * Request format:
111 * u32 command (BMI_READ_REGISTER)
112 * u32 address
113 * Response format:
114 * u32 value
115 */
116
117#define BMI_WRITE_SOC_REGISTER 7
118/*
119 * Semantics: Write a 32-bit Target SOC register.
120 * Request format:
121 * u32 command (BMI_WRITE_REGISTER)
122 * u32 address
123 * u32 value
124 *
125 * Response format: none
126 */
127
128#define BMI_GET_TARGET_ID 8
129#define BMI_GET_TARGET_INFO 8
130/*
131 * Semantics: Fetch the 4-byte Target information
132 * Request format:
133 * u32 command (BMI_GET_TARGET_ID/INFO)
134 * Response format1 (old firmware):
135 * u32 TargetVersionID
136 * Response format2 (newer firmware):
137 * u32 TARGET_VERSION_SENTINAL
138 * struct bmi_target_info;
139 */
140
141#define TARGET_VERSION_SENTINAL 0xffffffff
142#define TARGET_TYPE_AR6003 3
143
144#define BMI_ROMPATCH_INSTALL 9
145/*
146 * Semantics: Install a ROM Patch.
147 * Request format:
148 * u32 command (BMI_ROMPATCH_INSTALL)
149 * u32 Target ROM Address
150 * u32 Target RAM Address or Value (depending on Target Type)
151 * u32 Size, in bytes
152 * u32 Activate? 1-->activate;
153 * 0-->install but do not activate
154 * Response format:
155 * u32 PatchID
156 */
157
158#define BMI_ROMPATCH_UNINSTALL 10
159/*
160 * Semantics: Uninstall a previously-installed ROM Patch,
161 * automatically deactivating, if necessary.
162 * Request format:
163 * u32 command (BMI_ROMPATCH_UNINSTALL)
164 * u32 PatchID
165 *
166 * Response format: none
167 */
168
169#define BMI_ROMPATCH_ACTIVATE 11
170/*
171 * Semantics: Activate a list of previously-installed ROM Patches.
172 * Request format:
173 * u32 command (BMI_ROMPATCH_ACTIVATE)
174 * u32 rompatch_count
175 * u32 PatchID[rompatch_count]
176 *
177 * Response format: none
178 */
179
180#define BMI_ROMPATCH_DEACTIVATE 12
181/*
182 * Semantics: Deactivate a list of active ROM Patches.
183 * Request format:
184 * u32 command (BMI_ROMPATCH_DEACTIVATE)
185 * u32 rompatch_count
186 * u32 PatchID[rompatch_count]
187 *
188 * Response format: none
189 */
190
191
192#define BMI_LZ_STREAM_START 13
193/*
194 * Semantics: Begin an LZ-compressed stream of input
195 * which is to be uncompressed by the Target to an
196 * output buffer at address. The output buffer must
197 * be sufficiently large to hold the uncompressed
198 * output from the compressed input stream. This BMI
199 * command should be followed by a series of 1 or more
200 * BMI_LZ_DATA commands.
201 * u32 command (BMI_LZ_STREAM_START)
202 * u32 address
203 * Note: Not supported on all versions of ROM firmware.
204 */
205
206#define BMI_LZ_DATA 14
207/*
208 * Semantics: Host writes ATH6KL memory with LZ-compressed
209 * data which is uncompressed by the Target. This command
210 * must be preceded by a BMI_LZ_STREAM_START command. A series
211 * of BMI_LZ_DATA commands are considered part of a single
212 * input stream until another BMI_LZ_STREAM_START is issued.
213 * Request format:
214 * u32 command (BMI_LZ_DATA)
215 * u32 length (of compressed data),
216 * at most BMI_DATASZ_MAX
217 * u8 CompressedData[length]
218 * Response format: none
219 * Note: Not supported on all versions of ROM firmware.
220 */
221
222#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */
223
224struct ath6kl;
225struct ath6kl_bmi_target_info {
226 __le32 byte_count; /* size of this structure */
227 __le32 version; /* target version id */
228 __le32 type; /* target type */
229} __packed;
230
231int ath6kl_bmi_init(struct ath6kl *ar);
232void ath6kl_bmi_cleanup(struct ath6kl *ar);
233int ath6kl_bmi_done(struct ath6kl *ar);
234int ath6kl_bmi_get_target_info(struct ath6kl *ar,
235 struct ath6kl_bmi_target_info *targ_info);
236int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
237int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
238int ath6kl_bmi_execute(struct ath6kl *ar,
239 u32 addr, u32 *param);
240int ath6kl_bmi_set_app_start(struct ath6kl *ar,
241 u32 addr);
242int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
243int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
244int ath6kl_bmi_lz_data(struct ath6kl *ar,
245 u8 *buf, u32 len);
246int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
247 u32 addr);
248int ath6kl_bmi_fast_download(struct ath6kl *ar,
249 u32 addr, u8 *buf, u32 len);
250#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
new file mode 100644
index 000000000000..14559ffb1453
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -0,0 +1,1538 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "cfg80211.h"
19#include "debug.h"
20
21#define RATETAB_ENT(_rate, _rateid, _flags) { \
22 .bitrate = (_rate), \
23 .flags = (_flags), \
24 .hw_value = (_rateid), \
25}
26
27#define CHAN2G(_channel, _freq, _flags) { \
28 .band = IEEE80211_BAND_2GHZ, \
29 .hw_value = (_channel), \
30 .center_freq = (_freq), \
31 .flags = (_flags), \
32 .max_antenna_gain = 0, \
33 .max_power = 30, \
34}
35
36#define CHAN5G(_channel, _flags) { \
37 .band = IEEE80211_BAND_5GHZ, \
38 .hw_value = (_channel), \
39 .center_freq = 5000 + (5 * (_channel)), \
40 .flags = (_flags), \
41 .max_antenna_gain = 0, \
42 .max_power = 30, \
43}
44
45static struct ieee80211_rate ath6kl_rates[] = {
46 RATETAB_ENT(10, 0x1, 0),
47 RATETAB_ENT(20, 0x2, 0),
48 RATETAB_ENT(55, 0x4, 0),
49 RATETAB_ENT(110, 0x8, 0),
50 RATETAB_ENT(60, 0x10, 0),
51 RATETAB_ENT(90, 0x20, 0),
52 RATETAB_ENT(120, 0x40, 0),
53 RATETAB_ENT(180, 0x80, 0),
54 RATETAB_ENT(240, 0x100, 0),
55 RATETAB_ENT(360, 0x200, 0),
56 RATETAB_ENT(480, 0x400, 0),
57 RATETAB_ENT(540, 0x800, 0),
58};
59
60#define ath6kl_a_rates (ath6kl_rates + 4)
61#define ath6kl_a_rates_size 8
62#define ath6kl_g_rates (ath6kl_rates + 0)
63#define ath6kl_g_rates_size 12
64
65static struct ieee80211_channel ath6kl_2ghz_channels[] = {
66 CHAN2G(1, 2412, 0),
67 CHAN2G(2, 2417, 0),
68 CHAN2G(3, 2422, 0),
69 CHAN2G(4, 2427, 0),
70 CHAN2G(5, 2432, 0),
71 CHAN2G(6, 2437, 0),
72 CHAN2G(7, 2442, 0),
73 CHAN2G(8, 2447, 0),
74 CHAN2G(9, 2452, 0),
75 CHAN2G(10, 2457, 0),
76 CHAN2G(11, 2462, 0),
77 CHAN2G(12, 2467, 0),
78 CHAN2G(13, 2472, 0),
79 CHAN2G(14, 2484, 0),
80};
81
82static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
83 CHAN5G(34, 0), CHAN5G(36, 0),
84 CHAN5G(38, 0), CHAN5G(40, 0),
85 CHAN5G(42, 0), CHAN5G(44, 0),
86 CHAN5G(46, 0), CHAN5G(48, 0),
87 CHAN5G(52, 0), CHAN5G(56, 0),
88 CHAN5G(60, 0), CHAN5G(64, 0),
89 CHAN5G(100, 0), CHAN5G(104, 0),
90 CHAN5G(108, 0), CHAN5G(112, 0),
91 CHAN5G(116, 0), CHAN5G(120, 0),
92 CHAN5G(124, 0), CHAN5G(128, 0),
93 CHAN5G(132, 0), CHAN5G(136, 0),
94 CHAN5G(140, 0), CHAN5G(149, 0),
95 CHAN5G(153, 0), CHAN5G(157, 0),
96 CHAN5G(161, 0), CHAN5G(165, 0),
97 CHAN5G(184, 0), CHAN5G(188, 0),
98 CHAN5G(192, 0), CHAN5G(196, 0),
99 CHAN5G(200, 0), CHAN5G(204, 0),
100 CHAN5G(208, 0), CHAN5G(212, 0),
101 CHAN5G(216, 0),
102};
103
104static struct ieee80211_supported_band ath6kl_band_2ghz = {
105 .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
106 .channels = ath6kl_2ghz_channels,
107 .n_bitrates = ath6kl_g_rates_size,
108 .bitrates = ath6kl_g_rates,
109};
110
111static struct ieee80211_supported_band ath6kl_band_5ghz = {
112 .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
113 .channels = ath6kl_5ghz_a_channels,
114 .n_bitrates = ath6kl_a_rates_size,
115 .bitrates = ath6kl_a_rates,
116};
117
118static int ath6kl_set_wpa_version(struct ath6kl *ar,
119 enum nl80211_wpa_versions wpa_version)
120{
121 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
122
123 if (!wpa_version) {
124 ar->auth_mode = NONE_AUTH;
125 } else if (wpa_version & NL80211_WPA_VERSION_2) {
126 ar->auth_mode = WPA2_AUTH;
127 } else if (wpa_version & NL80211_WPA_VERSION_1) {
128 ar->auth_mode = WPA_AUTH;
129 } else {
130 ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
131 return -ENOTSUPP;
132 }
133
134 return 0;
135}
136
137static int ath6kl_set_auth_type(struct ath6kl *ar,
138 enum nl80211_auth_type auth_type)
139{
140
141 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
142
143 switch (auth_type) {
144 case NL80211_AUTHTYPE_OPEN_SYSTEM:
145 ar->dot11_auth_mode = OPEN_AUTH;
146 break;
147 case NL80211_AUTHTYPE_SHARED_KEY:
148 ar->dot11_auth_mode = SHARED_AUTH;
149 break;
150 case NL80211_AUTHTYPE_NETWORK_EAP:
151 ar->dot11_auth_mode = LEAP_AUTH;
152 break;
153
154 case NL80211_AUTHTYPE_AUTOMATIC:
155 ar->dot11_auth_mode = OPEN_AUTH;
156 ar->auto_auth_stage = AUTH_OPEN_IN_PROGRESS;
157 break;
158
159 default:
160 ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
161 return -ENOTSUPP;
162 }
163
164 return 0;
165}
166
167static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
168{
169 u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
170 u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : &ar->grp_crpto_len;
171
172 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
173 __func__, cipher, ucast);
174
175 switch (cipher) {
176 case 0:
177 /* our own hack to use value 0 as no crypto used */
178 *ar_cipher = NONE_CRYPT;
179 *ar_cipher_len = 0;
180 break;
181 case WLAN_CIPHER_SUITE_WEP40:
182 *ar_cipher = WEP_CRYPT;
183 *ar_cipher_len = 5;
184 break;
185 case WLAN_CIPHER_SUITE_WEP104:
186 *ar_cipher = WEP_CRYPT;
187 *ar_cipher_len = 13;
188 break;
189 case WLAN_CIPHER_SUITE_TKIP:
190 *ar_cipher = TKIP_CRYPT;
191 *ar_cipher_len = 0;
192 break;
193 case WLAN_CIPHER_SUITE_CCMP:
194 *ar_cipher = AES_CRYPT;
195 *ar_cipher_len = 0;
196 break;
197 default:
198 ath6kl_err("cipher 0x%x not supported\n", cipher);
199 return -ENOTSUPP;
200 }
201
202 return 0;
203}
204
205static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
206{
207 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
208
209 if (key_mgmt == WLAN_AKM_SUITE_PSK) {
210 if (ar->auth_mode == WPA_AUTH)
211 ar->auth_mode = WPA_PSK_AUTH;
212 else if (ar->auth_mode == WPA2_AUTH)
213 ar->auth_mode = WPA2_PSK_AUTH;
214 } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
215 ar->auth_mode = NONE_AUTH;
216 }
217}
218
219static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
220{
221 if (!test_bit(WMI_READY, &ar->flag)) {
222 ath6kl_err("wmi is not ready\n");
223 return false;
224 }
225
226 if (!test_bit(WLAN_ENABLED, &ar->flag)) {
227 ath6kl_err("wlan disabled\n");
228 return false;
229 }
230
231 return true;
232}
233
234static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
235 struct cfg80211_connect_params *sme)
236{
237 struct ath6kl *ar = ath6kl_priv(dev);
238 int status;
239
240 ar->sme_state = SME_CONNECTING;
241
242 if (!ath6kl_cfg80211_ready(ar))
243 return -EIO;
244
245 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
246 ath6kl_err("destroy in progress\n");
247 return -EBUSY;
248 }
249
250 if (test_bit(SKIP_SCAN, &ar->flag) &&
251 ((sme->channel && sme->channel->center_freq == 0) ||
252 (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
253 ath6kl_err("SkipScan: channel or bssid invalid\n");
254 return -EINVAL;
255 }
256
257 if (down_interruptible(&ar->sem)) {
258 ath6kl_err("busy, couldn't get access\n");
259 return -ERESTARTSYS;
260 }
261
262 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
263 ath6kl_err("busy, destroy in progress\n");
264 up(&ar->sem);
265 return -EBUSY;
266 }
267
268 if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
269 /*
270 * sleep until the command queue drains
271 */
272 wait_event_interruptible_timeout(ar->event_wq,
273 ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
274 WMI_TIMEOUT);
275 if (signal_pending(current)) {
276 ath6kl_err("cmd queue drain timeout\n");
277 up(&ar->sem);
278 return -EINTR;
279 }
280 }
281
282 if (test_bit(CONNECTED, &ar->flag) &&
283 ar->ssid_len == sme->ssid_len &&
284 !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
285 ar->reconnect_flag = true;
286 status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
287 ar->ch_hint);
288
289 up(&ar->sem);
290 if (status) {
291 ath6kl_err("wmi_reconnect_cmd failed\n");
292 return -EIO;
293 }
294 return 0;
295 } else if (ar->ssid_len == sme->ssid_len &&
296 !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
297 ath6kl_disconnect(ar);
298 }
299
300 memset(ar->ssid, 0, sizeof(ar->ssid));
301 ar->ssid_len = sme->ssid_len;
302 memcpy(ar->ssid, sme->ssid, sme->ssid_len);
303
304 if (sme->channel)
305 ar->ch_hint = sme->channel->center_freq;
306
307 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
308 if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
309 memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
310
311 ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
312
313 status = ath6kl_set_auth_type(ar, sme->auth_type);
314 if (status) {
315 up(&ar->sem);
316 return status;
317 }
318
319 if (sme->crypto.n_ciphers_pairwise)
320 ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
321 else
322 ath6kl_set_cipher(ar, 0, true);
323
324 ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
325
326 if (sme->crypto.n_akm_suites)
327 ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
328
329 if ((sme->key_len) &&
330 (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
331 struct ath6kl_key *key = NULL;
332
333 if (sme->key_idx < WMI_MIN_KEY_INDEX ||
334 sme->key_idx > WMI_MAX_KEY_INDEX) {
335 ath6kl_err("key index %d out of bounds\n",
336 sme->key_idx);
337 up(&ar->sem);
338 return -ENOENT;
339 }
340
341 key = &ar->keys[sme->key_idx];
342 key->key_len = sme->key_len;
343 memcpy(key->key, sme->key, key->key_len);
344 key->cipher = ar->prwise_crypto;
345 ar->def_txkey_index = sme->key_idx;
346
347 ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
348 ar->prwise_crypto,
349 GROUP_USAGE | TX_USAGE,
350 key->key_len,
351 NULL,
352 key->key, KEY_OP_INIT_VAL, NULL,
353 NO_SYNC_WMIFLAG);
354 }
355
356 if (!ar->usr_bss_filter) {
357 if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
358 ath6kl_err("couldn't set bss filtering\n");
359 up(&ar->sem);
360 return -EIO;
361 }
362 }
363
364 ar->nw_type = ar->next_mode;
365
366 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
367 "%s: connect called with authmode %d dot11 auth %d"
368 " PW crypto %d PW crypto len %d GRP crypto %d"
369 " GRP crypto len %d channel hint %u\n",
370 __func__,
371 ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
372 ar->prwise_crypto_len, ar->grp_crypto,
373 ar->grp_crpto_len, ar->ch_hint);
374
375 ar->reconnect_flag = 0;
376 status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
377 ar->dot11_auth_mode, ar->auth_mode,
378 ar->prwise_crypto,
379 ar->prwise_crypto_len,
380 ar->grp_crypto, ar->grp_crpto_len,
381 ar->ssid_len, ar->ssid,
382 ar->req_bssid, ar->ch_hint,
383 ar->connect_ctrl_flags);
384
385 up(&ar->sem);
386
387 if (status == -EINVAL) {
388 memset(ar->ssid, 0, sizeof(ar->ssid));
389 ar->ssid_len = 0;
390 ath6kl_err("invalid request\n");
391 return -ENOENT;
392 } else if (status) {
393 ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
394 return -EIO;
395 }
396
397 if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
398 ((ar->auth_mode == WPA_PSK_AUTH)
399 || (ar->auth_mode == WPA2_PSK_AUTH))) {
400 mod_timer(&ar->disconnect_timer,
401 jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
402 }
403
404 ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
405 set_bit(CONNECT_PEND, &ar->flag);
406
407 return 0;
408}
409
410void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
411 u8 *bssid, u16 listen_intvl,
412 u16 beacon_intvl,
413 enum network_type nw_type,
414 u8 beacon_ie_len, u8 assoc_req_len,
415 u8 assoc_resp_len, u8 *assoc_info)
416{
417 u16 size = 0;
418 u16 capability = 0;
419 struct cfg80211_bss *bss = NULL;
420 struct ieee80211_mgmt *mgmt = NULL;
421 struct ieee80211_channel *ibss_ch = NULL;
422 s32 signal = 50 * 100;
423 u8 ie_buf_len = 0;
424 unsigned char ie_buf[256];
425 unsigned char *ptr_ie_buf = ie_buf;
426 unsigned char *ieeemgmtbuf = NULL;
427 u8 source_mac[ETH_ALEN];
428 u16 capa_mask;
429 u16 capa_val;
430
431 /* capinfo + listen interval */
432 u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
433
434 /* capinfo + status code + associd */
435 u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
436
437 u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
438 u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
439 assoc_resp_ie_offset;
440
441 assoc_req_len -= assoc_req_ie_offset;
442 assoc_resp_len -= assoc_resp_ie_offset;
443
444 ar->auto_auth_stage = AUTH_IDLE;
445
446 if (nw_type & ADHOC_NETWORK) {
447 if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
448 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
449 "%s: ath6k not in ibss mode\n", __func__);
450 return;
451 }
452 }
453
454 if (nw_type & INFRA_NETWORK) {
455 if (ar->wdev->iftype != NL80211_IFTYPE_STATION) {
456 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
457 "%s: ath6k not in station mode\n", __func__);
458 return;
459 }
460 }
461
462 if (nw_type & ADHOC_NETWORK) {
463 capa_mask = WLAN_CAPABILITY_IBSS;
464 capa_val = WLAN_CAPABILITY_IBSS;
465 } else {
466 capa_mask = WLAN_CAPABILITY_ESS;
467 capa_val = WLAN_CAPABILITY_ESS;
468 }
469
470 /* Before informing the join/connect event, make sure that
471 * bss entry is present in scan list, if it not present
472 * construct and insert into scan list, otherwise that
473 * event will be dropped on the way by cfg80211, due to
474 * this keys will not be plumbed in case of WEP and
475 * application will not be aware of join/connect status. */
476 bss = cfg80211_get_bss(ar->wdev->wiphy, NULL, bssid,
477 ar->wdev->ssid, ar->wdev->ssid_len,
478 capa_mask, capa_val);
479
480 /*
481 * Earlier we were updating the cfg about bss by making a beacon frame
482 * only if the entry for bss is not there. This can have some issue if
483 * ROAM event is generated and a heavy traffic is ongoing. The ROAM
484 * event is handled through a work queue and by the time it really gets
485 * handled, BSS would have been aged out. So it is better to update the
486 * cfg about BSS irrespective of its entry being present right now or
487 * not.
488 */
489
490 if (nw_type & ADHOC_NETWORK) {
491 /* construct 802.11 mgmt beacon */
492 if (ptr_ie_buf) {
493 *ptr_ie_buf++ = WLAN_EID_SSID;
494 *ptr_ie_buf++ = ar->ssid_len;
495 memcpy(ptr_ie_buf, ar->ssid, ar->ssid_len);
496 ptr_ie_buf += ar->ssid_len;
497
498 *ptr_ie_buf++ = WLAN_EID_IBSS_PARAMS;
499 *ptr_ie_buf++ = 2; /* length */
500 *ptr_ie_buf++ = 0; /* ATIM window */
501 *ptr_ie_buf++ = 0; /* ATIM window */
502
503 /* TODO: update ibss params and include supported rates,
504 * DS param set, extened support rates, wmm. */
505
506 ie_buf_len = ptr_ie_buf - ie_buf;
507 }
508
509 capability |= WLAN_CAPABILITY_IBSS;
510
511 if (ar->prwise_crypto == WEP_CRYPT)
512 capability |= WLAN_CAPABILITY_PRIVACY;
513
514 memcpy(source_mac, ar->net_dev->dev_addr, ETH_ALEN);
515 ptr_ie_buf = ie_buf;
516 } else {
517 capability = *(u16 *) (&assoc_info[beacon_ie_len]);
518 memcpy(source_mac, bssid, ETH_ALEN);
519 ptr_ie_buf = assoc_req_ie;
520 ie_buf_len = assoc_req_len;
521 }
522
523 size = offsetof(struct ieee80211_mgmt, u)
524 + sizeof(mgmt->u.beacon)
525 + ie_buf_len;
526
527 ieeemgmtbuf = kzalloc(size, GFP_ATOMIC);
528 if (!ieeemgmtbuf) {
529 ath6kl_err("ieee mgmt buf alloc error\n");
530 cfg80211_put_bss(bss);
531 return;
532 }
533
534 mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
535 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
536 IEEE80211_STYPE_BEACON);
537 memset(mgmt->da, 0xff, ETH_ALEN); /* broadcast addr */
538 memcpy(mgmt->sa, source_mac, ETH_ALEN);
539 memcpy(mgmt->bssid, bssid, ETH_ALEN);
540 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_intvl);
541 mgmt->u.beacon.capab_info = cpu_to_le16(capability);
542 memcpy(mgmt->u.beacon.variable, ptr_ie_buf, ie_buf_len);
543
544 ibss_ch = ieee80211_get_channel(ar->wdev->wiphy, (int)channel);
545
546 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
547 "%s: inform bss with bssid %pM channel %d beacon_intvl %d capability 0x%x\n",
548 __func__, mgmt->bssid, ibss_ch->hw_value,
549 beacon_intvl, capability);
550
551 bss = cfg80211_inform_bss_frame(ar->wdev->wiphy,
552 ibss_ch, mgmt,
553 size, signal, GFP_KERNEL);
554 kfree(ieeemgmtbuf);
555 cfg80211_put_bss(bss);
556
557 if (nw_type & ADHOC_NETWORK) {
558 cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
559 return;
560 }
561
562 if (ar->sme_state == SME_CONNECTING) {
563 /* inform connect result to cfg80211 */
564 ar->sme_state = SME_CONNECTED;
565 cfg80211_connect_result(ar->net_dev, bssid,
566 assoc_req_ie, assoc_req_len,
567 assoc_resp_ie, assoc_resp_len,
568 WLAN_STATUS_SUCCESS, GFP_KERNEL);
569 } else if (ar->sme_state == SME_CONNECTED) {
570 /* inform roam event to cfg80211 */
571 cfg80211_roamed(ar->net_dev, ibss_ch, bssid,
572 assoc_req_ie, assoc_req_len,
573 assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
574 }
575}
576
577static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
578 struct net_device *dev, u16 reason_code)
579{
580 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
581
582 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
583 reason_code);
584
585 if (!ath6kl_cfg80211_ready(ar))
586 return -EIO;
587
588 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
589 ath6kl_err("busy, destroy in progress\n");
590 return -EBUSY;
591 }
592
593 if (down_interruptible(&ar->sem)) {
594 ath6kl_err("busy, couldn't get access\n");
595 return -ERESTARTSYS;
596 }
597
598 ar->reconnect_flag = 0;
599 ath6kl_disconnect(ar);
600 memset(ar->ssid, 0, sizeof(ar->ssid));
601 ar->ssid_len = 0;
602
603 if (!test_bit(SKIP_SCAN, &ar->flag))
604 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
605
606 up(&ar->sem);
607
608 return 0;
609}
610
611void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
612 u8 *bssid, u8 assoc_resp_len,
613 u8 *assoc_info, u16 proto_reason)
614{
615 struct ath6kl_key *key = NULL;
616 u16 status;
617
618 if (ar->scan_req) {
619 cfg80211_scan_done(ar->scan_req, true);
620 ar->scan_req = NULL;
621 }
622
623 if (ar->nw_type & ADHOC_NETWORK) {
624 if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
625 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
626 "%s: ath6k not in ibss mode\n", __func__);
627 return;
628 }
629 memset(bssid, 0, ETH_ALEN);
630 cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
631 return;
632 }
633
634 if (ar->nw_type & INFRA_NETWORK) {
635 if (ar->wdev->iftype != NL80211_IFTYPE_STATION) {
636 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
637 "%s: ath6k not in station mode\n", __func__);
638 return;
639 }
640 }
641
642 if (!test_bit(CONNECT_PEND, &ar->flag)) {
643 if (reason != DISCONNECT_CMD)
644 ath6kl_wmi_disconnect_cmd(ar->wmi);
645
646 return;
647 }
648
649 if (reason == NO_NETWORK_AVAIL) {
650 /* connect cmd failed */
651 ath6kl_wmi_disconnect_cmd(ar->wmi);
652 return;
653 }
654
655 if (reason != DISCONNECT_CMD)
656 return;
657
658 if (!ar->auto_auth_stage) {
659 clear_bit(CONNECT_PEND, &ar->flag);
660
661 if (ar->sme_state == SME_CONNECTING) {
662 cfg80211_connect_result(ar->net_dev,
663 bssid, NULL, 0,
664 NULL, 0,
665 WLAN_STATUS_UNSPECIFIED_FAILURE,
666 GFP_KERNEL);
667 } else {
668 cfg80211_disconnected(ar->net_dev, reason,
669 NULL, 0, GFP_KERNEL);
670 }
671
672 ar->sme_state = SME_DISCONNECTED;
673 return;
674 }
675
676 if (ar->dot11_auth_mode != OPEN_AUTH)
677 return;
678
679 /*
680 * If the current auth algorithm is open, try shared and
681 * make autoAuthStage idle. We do not make it leap for now
682 * being.
683 */
684 key = &ar->keys[ar->def_txkey_index];
685 if (down_interruptible(&ar->sem)) {
686 ath6kl_err("busy, couldn't get access\n");
687 return;
688 }
689
690 ar->dot11_auth_mode = SHARED_AUTH;
691 ar->auto_auth_stage = AUTH_IDLE;
692
693 ath6kl_wmi_addkey_cmd(ar->wmi,
694 ar->def_txkey_index,
695 ar->prwise_crypto,
696 GROUP_USAGE | TX_USAGE,
697 key->key_len, NULL,
698 key->key,
699 KEY_OP_INIT_VAL, NULL,
700 NO_SYNC_WMIFLAG);
701
702 status = ath6kl_wmi_connect_cmd(ar->wmi,
703 ar->nw_type,
704 ar->dot11_auth_mode,
705 ar->auth_mode,
706 ar->prwise_crypto,
707 ar->prwise_crypto_len,
708 ar->grp_crypto,
709 ar->grp_crpto_len,
710 ar->ssid_len,
711 ar->ssid,
712 ar->req_bssid,
713 ar->ch_hint,
714 ar->connect_ctrl_flags);
715 up(&ar->sem);
716}
717
718static inline bool is_ch_11a(u16 ch)
719{
720 return (!((ch >= 2412) && (ch <= 2484)));
721}
722
723/* struct ath6kl_node_table::nt_nodelock is locked when calling this */
724void ath6kl_cfg80211_scan_node(struct wiphy *wiphy, struct bss *ni)
725{
726 u16 size;
727 unsigned char *ieeemgmtbuf = NULL;
728 struct ieee80211_mgmt *mgmt;
729 struct ieee80211_channel *channel;
730 struct ieee80211_supported_band *band;
731 struct ath6kl_common_ie *cie;
732 s32 signal;
733 int freq;
734
735 cie = &ni->ni_cie;
736
737 if (is_ch_11a(cie->ie_chan))
738 band = wiphy->bands[IEEE80211_BAND_5GHZ]; /* 11a */
739 else if ((cie->ie_erp) || (cie->ie_xrates))
740 band = wiphy->bands[IEEE80211_BAND_2GHZ]; /* 11g */
741 else
742 band = wiphy->bands[IEEE80211_BAND_2GHZ]; /* 11b */
743
744 size = ni->ni_framelen + offsetof(struct ieee80211_mgmt, u);
745 ieeemgmtbuf = kmalloc(size, GFP_ATOMIC);
746 if (!ieeemgmtbuf) {
747 ath6kl_err("ieee mgmt buf alloc error\n");
748 return;
749 }
750
751 /*
752 * TODO: Update target to include 802.11 mac header while sending
753 * bss info. Target removes 802.11 mac header while sending the bss
754 * info to host, cfg80211 needs it, for time being just filling the
755 * da, sa and bssid fields alone.
756 */
757 mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
758 memset(mgmt->da, 0xff, ETH_ALEN); /*broadcast addr */
759 memcpy(mgmt->sa, ni->ni_macaddr, ETH_ALEN);
760 memcpy(mgmt->bssid, ni->ni_macaddr, ETH_ALEN);
761 memcpy(ieeemgmtbuf + offsetof(struct ieee80211_mgmt, u),
762 ni->ni_buf, ni->ni_framelen);
763
764 freq = cie->ie_chan;
765 channel = ieee80211_get_channel(wiphy, freq);
766 signal = ni->ni_snr * 100;
767
768 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
769 "%s: bssid %pM ch %d freq %d size %d\n", __func__,
770 mgmt->bssid, channel->hw_value, freq, size);
771 cfg80211_inform_bss_frame(wiphy, channel, mgmt,
772 size, signal, GFP_ATOMIC);
773
774 kfree(ieeemgmtbuf);
775}
776
777static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
778 struct cfg80211_scan_request *request)
779{
780 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
781 int ret = 0;
782
783 if (!ath6kl_cfg80211_ready(ar))
784 return -EIO;
785
786 if (!ar->usr_bss_filter) {
787 if (ath6kl_wmi_bssfilter_cmd(ar->wmi,
788 (test_bit(CONNECTED, &ar->flag) ?
789 ALL_BUT_BSS_FILTER :
790 ALL_BSS_FILTER), 0) != 0) {
791 ath6kl_err("couldn't set bss filtering\n");
792 return -EIO;
793 }
794 }
795
796 if (request->n_ssids && request->ssids[0].ssid_len) {
797 u8 i;
798
799 if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
800 request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
801
802 for (i = 0; i < request->n_ssids; i++)
803 ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
804 SPECIFIC_SSID_FLAG,
805 request->ssids[i].ssid_len,
806 request->ssids[i].ssid);
807 }
808
809 if (ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
810 false, 0, 0, 0, NULL) != 0) {
811 ath6kl_err("wmi_startscan_cmd failed\n");
812 ret = -EIO;
813 }
814
815 ar->scan_req = request;
816
817 return ret;
818}
819
820void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
821{
822 int i;
823
824 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
825
826 if (!ar->scan_req)
827 return;
828
829 if ((status == -ECANCELED) || (status == -EBUSY)) {
830 cfg80211_scan_done(ar->scan_req, true);
831 goto out;
832 }
833
834 /* Translate data to cfg80211 mgmt format */
835 wlan_iterate_nodes(&ar->scan_table, ar->wdev->wiphy);
836
837 cfg80211_scan_done(ar->scan_req, false);
838
839 if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
840 for (i = 0; i < ar->scan_req->n_ssids; i++) {
841 ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
842 DISABLE_SSID_FLAG,
843 0, NULL);
844 }
845 }
846
847out:
848 ar->scan_req = NULL;
849}
850
851static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
852 u8 key_index, bool pairwise,
853 const u8 *mac_addr,
854 struct key_params *params)
855{
856 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
857 struct ath6kl_key *key = NULL;
858 u8 key_usage;
859 u8 key_type;
860 int status = 0;
861
862 if (!ath6kl_cfg80211_ready(ar))
863 return -EIO;
864
865 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
866 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
867 "%s: key index %d out of bounds\n", __func__,
868 key_index);
869 return -ENOENT;
870 }
871
872 key = &ar->keys[key_index];
873 memset(key, 0, sizeof(struct ath6kl_key));
874
875 if (pairwise)
876 key_usage = PAIRWISE_USAGE;
877 else
878 key_usage = GROUP_USAGE;
879
880 if (params) {
881 if (params->key_len > WLAN_MAX_KEY_LEN ||
882 params->seq_len > sizeof(key->seq))
883 return -EINVAL;
884
885 key->key_len = params->key_len;
886 memcpy(key->key, params->key, key->key_len);
887 key->seq_len = params->seq_len;
888 memcpy(key->seq, params->seq, key->seq_len);
889 key->cipher = params->cipher;
890 }
891
892 switch (key->cipher) {
893 case WLAN_CIPHER_SUITE_WEP40:
894 case WLAN_CIPHER_SUITE_WEP104:
895 key_type = WEP_CRYPT;
896 break;
897
898 case WLAN_CIPHER_SUITE_TKIP:
899 key_type = TKIP_CRYPT;
900 break;
901
902 case WLAN_CIPHER_SUITE_CCMP:
903 key_type = AES_CRYPT;
904 break;
905
906 default:
907 return -ENOTSUPP;
908 }
909
910 if (((ar->auth_mode == WPA_PSK_AUTH)
911 || (ar->auth_mode == WPA2_PSK_AUTH))
912 && (key_usage & GROUP_USAGE))
913 del_timer(&ar->disconnect_timer);
914
915 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
916 "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
917 __func__, key_index, key->key_len, key_type,
918 key_usage, key->seq_len);
919
920 ar->def_txkey_index = key_index;
921 status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
922 key_type, key_usage, key->key_len,
923 key->seq, key->key, KEY_OP_INIT_VAL,
924 (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
925
926 if (status)
927 return -EIO;
928
929 return 0;
930}
931
932static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
933 u8 key_index, bool pairwise,
934 const u8 *mac_addr)
935{
936 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
937
938 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
939
940 if (!ath6kl_cfg80211_ready(ar))
941 return -EIO;
942
943 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
944 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
945 "%s: key index %d out of bounds\n", __func__,
946 key_index);
947 return -ENOENT;
948 }
949
950 if (!ar->keys[key_index].key_len) {
951 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
952 "%s: index %d is empty\n", __func__, key_index);
953 return 0;
954 }
955
956 ar->keys[key_index].key_len = 0;
957
958 return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
959}
960
961static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
962 u8 key_index, bool pairwise,
963 const u8 *mac_addr, void *cookie,
964 void (*callback) (void *cookie,
965 struct key_params *))
966{
967 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
968 struct ath6kl_key *key = NULL;
969 struct key_params params;
970
971 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
972
973 if (!ath6kl_cfg80211_ready(ar))
974 return -EIO;
975
976 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
977 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
978 "%s: key index %d out of bounds\n", __func__,
979 key_index);
980 return -ENOENT;
981 }
982
983 key = &ar->keys[key_index];
984 memset(&params, 0, sizeof(params));
985 params.cipher = key->cipher;
986 params.key_len = key->key_len;
987 params.seq_len = key->seq_len;
988 params.seq = key->seq;
989 params.key = key->key;
990
991 callback(cookie, &params);
992
993 return key->key_len ? 0 : -ENOENT;
994}
995
996static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
997 struct net_device *ndev,
998 u8 key_index, bool unicast,
999 bool multicast)
1000{
1001 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
1002 struct ath6kl_key *key = NULL;
1003 int status = 0;
1004 u8 key_usage;
1005
1006 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
1007
1008 if (!ath6kl_cfg80211_ready(ar))
1009 return -EIO;
1010
1011 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
1012 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1013 "%s: key index %d out of bounds\n",
1014 __func__, key_index);
1015 return -ENOENT;
1016 }
1017
1018 if (!ar->keys[key_index].key_len) {
1019 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
1020 __func__, key_index);
1021 return -EINVAL;
1022 }
1023
1024 ar->def_txkey_index = key_index;
1025 key = &ar->keys[ar->def_txkey_index];
1026 key_usage = GROUP_USAGE;
1027 if (ar->prwise_crypto == WEP_CRYPT)
1028 key_usage |= TX_USAGE;
1029
1030 status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
1031 ar->prwise_crypto, key_usage,
1032 key->key_len, key->seq, key->key,
1033 KEY_OP_INIT_VAL, NULL,
1034 SYNC_BOTH_WMIFLAG);
1035 if (status)
1036 return -EIO;
1037
1038 return 0;
1039}
1040
1041void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
1042 bool ismcast)
1043{
1044 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1045 "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
1046
1047 cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
1048 (ismcast ? NL80211_KEYTYPE_GROUP :
1049 NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
1050 GFP_KERNEL);
1051}
1052
1053static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1054{
1055 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1056 int ret;
1057
1058 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
1059 changed);
1060
1061 if (!ath6kl_cfg80211_ready(ar))
1062 return -EIO;
1063
1064 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1065 ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
1066 if (ret != 0) {
1067 ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
1068 return -EIO;
1069 }
1070 }
1071
1072 return 0;
1073}
1074
1075/*
1076 * The type nl80211_tx_power_setting replaces the following
1077 * data type from 2.6.36 onwards
1078*/
1079static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1080 enum nl80211_tx_power_setting type,
1081 int dbm)
1082{
1083 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1084 u8 ath6kl_dbm;
1085
1086 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
1087 type, dbm);
1088
1089 if (!ath6kl_cfg80211_ready(ar))
1090 return -EIO;
1091
1092 switch (type) {
1093 case NL80211_TX_POWER_AUTOMATIC:
1094 return 0;
1095 case NL80211_TX_POWER_LIMITED:
1096 ar->tx_pwr = ath6kl_dbm = dbm;
1097 break;
1098 default:
1099 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
1100 __func__, type);
1101 return -EOPNOTSUPP;
1102 }
1103
1104 ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
1105
1106 return 0;
1107}
1108
1109static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
1110{
1111 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1112
1113 if (!ath6kl_cfg80211_ready(ar))
1114 return -EIO;
1115
1116 if (test_bit(CONNECTED, &ar->flag)) {
1117 ar->tx_pwr = 0;
1118
1119 if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
1120 ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
1121 return -EIO;
1122 }
1123
1124 wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
1125 5 * HZ);
1126
1127 if (signal_pending(current)) {
1128 ath6kl_err("target did not respond\n");
1129 return -EINTR;
1130 }
1131 }
1132
1133 *dbm = ar->tx_pwr;
1134 return 0;
1135}
1136
1137static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1138 struct net_device *dev,
1139 bool pmgmt, int timeout)
1140{
1141 struct ath6kl *ar = ath6kl_priv(dev);
1142 struct wmi_power_mode_cmd mode;
1143
1144 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
1145 __func__, pmgmt, timeout);
1146
1147 if (!ath6kl_cfg80211_ready(ar))
1148 return -EIO;
1149
1150 if (pmgmt) {
1151 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
1152 mode.pwr_mode = REC_POWER;
1153 } else {
1154 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
1155 mode.pwr_mode = MAX_PERF_POWER;
1156 }
1157
1158 if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
1159 ath6kl_err("wmi_powermode_cmd failed\n");
1160 return -EIO;
1161 }
1162
1163 return 0;
1164}
1165
1166static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1167 struct net_device *ndev,
1168 enum nl80211_iftype type, u32 *flags,
1169 struct vif_params *params)
1170{
1171 struct ath6kl *ar = ath6kl_priv(ndev);
1172 struct wireless_dev *wdev = ar->wdev;
1173
1174 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
1175
1176 if (!ath6kl_cfg80211_ready(ar))
1177 return -EIO;
1178
1179 switch (type) {
1180 case NL80211_IFTYPE_STATION:
1181 ar->next_mode = INFRA_NETWORK;
1182 break;
1183 case NL80211_IFTYPE_ADHOC:
1184 ar->next_mode = ADHOC_NETWORK;
1185 break;
1186 default:
1187 ath6kl_err("invalid interface type %u\n", type);
1188 return -EOPNOTSUPP;
1189 }
1190
1191 wdev->iftype = type;
1192
1193 return 0;
1194}
1195
1196static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
1197 struct net_device *dev,
1198 struct cfg80211_ibss_params *ibss_param)
1199{
1200 struct ath6kl *ar = ath6kl_priv(dev);
1201 int status;
1202
1203 if (!ath6kl_cfg80211_ready(ar))
1204 return -EIO;
1205
1206 ar->ssid_len = ibss_param->ssid_len;
1207 memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
1208
1209 if (ibss_param->channel)
1210 ar->ch_hint = ibss_param->channel->center_freq;
1211
1212 if (ibss_param->channel_fixed) {
1213 /*
1214 * TODO: channel_fixed: The channel should be fixed, do not
1215 * search for IBSSs to join on other channels. Target
1216 * firmware does not support this feature, needs to be
1217 * updated.
1218 */
1219 return -EOPNOTSUPP;
1220 }
1221
1222 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
1223 if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
1224 memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
1225
1226 ath6kl_set_wpa_version(ar, 0);
1227
1228 status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
1229 if (status)
1230 return status;
1231
1232 if (ibss_param->privacy) {
1233 ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
1234 ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
1235 } else {
1236 ath6kl_set_cipher(ar, 0, true);
1237 ath6kl_set_cipher(ar, 0, false);
1238 }
1239
1240 ar->nw_type = ar->next_mode;
1241
1242 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1243 "%s: connect called with authmode %d dot11 auth %d"
1244 " PW crypto %d PW crypto len %d GRP crypto %d"
1245 " GRP crypto len %d channel hint %u\n",
1246 __func__,
1247 ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
1248 ar->prwise_crypto_len, ar->grp_crypto,
1249 ar->grp_crpto_len, ar->ch_hint);
1250
1251 status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
1252 ar->dot11_auth_mode, ar->auth_mode,
1253 ar->prwise_crypto,
1254 ar->prwise_crypto_len,
1255 ar->grp_crypto, ar->grp_crpto_len,
1256 ar->ssid_len, ar->ssid,
1257 ar->req_bssid, ar->ch_hint,
1258 ar->connect_ctrl_flags);
1259 set_bit(CONNECT_PEND, &ar->flag);
1260
1261 return 0;
1262}
1263
1264static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
1265 struct net_device *dev)
1266{
1267 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
1268
1269 if (!ath6kl_cfg80211_ready(ar))
1270 return -EIO;
1271
1272 ath6kl_disconnect(ar);
1273 memset(ar->ssid, 0, sizeof(ar->ssid));
1274 ar->ssid_len = 0;
1275
1276 return 0;
1277}
1278
1279static const u32 cipher_suites[] = {
1280 WLAN_CIPHER_SUITE_WEP40,
1281 WLAN_CIPHER_SUITE_WEP104,
1282 WLAN_CIPHER_SUITE_TKIP,
1283 WLAN_CIPHER_SUITE_CCMP,
1284};
1285
1286static bool is_rate_legacy(s32 rate)
1287{
1288 static const s32 legacy[] = { 1000, 2000, 5500, 11000,
1289 6000, 9000, 12000, 18000, 24000,
1290 36000, 48000, 54000
1291 };
1292 u8 i;
1293
1294 for (i = 0; i < ARRAY_SIZE(legacy); i++)
1295 if (rate == legacy[i])
1296 return true;
1297
1298 return false;
1299}
1300
1301static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
1302{
1303 static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
1304 52000, 58500, 65000, 72200
1305 };
1306 u8 i;
1307
1308 for (i = 0; i < ARRAY_SIZE(ht20); i++) {
1309 if (rate == ht20[i]) {
1310 if (i == ARRAY_SIZE(ht20) - 1)
1311 /* last rate uses sgi */
1312 *sgi = true;
1313 else
1314 *sgi = false;
1315
1316 *mcs = i;
1317 return true;
1318 }
1319 }
1320 return false;
1321}
1322
1323static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
1324{
1325 static const s32 ht40[] = { 13500, 27000, 40500, 54000,
1326 81000, 108000, 121500, 135000,
1327 150000
1328 };
1329 u8 i;
1330
1331 for (i = 0; i < ARRAY_SIZE(ht40); i++) {
1332 if (rate == ht40[i]) {
1333 if (i == ARRAY_SIZE(ht40) - 1)
1334 /* last rate uses sgi */
1335 *sgi = true;
1336 else
1337 *sgi = false;
1338
1339 *mcs = i;
1340 return true;
1341 }
1342 }
1343
1344 return false;
1345}
1346
1347static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1348 u8 *mac, struct station_info *sinfo)
1349{
1350 struct ath6kl *ar = ath6kl_priv(dev);
1351 long left;
1352 bool sgi;
1353 s32 rate;
1354 int ret;
1355 u8 mcs;
1356
1357 if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
1358 return -ENOENT;
1359
1360 if (down_interruptible(&ar->sem))
1361 return -EBUSY;
1362
1363 set_bit(STATS_UPDATE_PEND, &ar->flag);
1364
1365 ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
1366
1367 if (ret != 0) {
1368 up(&ar->sem);
1369 return -EIO;
1370 }
1371
1372 left = wait_event_interruptible_timeout(ar->event_wq,
1373 !test_bit(STATS_UPDATE_PEND,
1374 &ar->flag),
1375 WMI_TIMEOUT);
1376
1377 up(&ar->sem);
1378
1379 if (left == 0)
1380 return -ETIMEDOUT;
1381 else if (left < 0)
1382 return left;
1383
1384 if (ar->target_stats.rx_byte) {
1385 sinfo->rx_bytes = ar->target_stats.rx_byte;
1386 sinfo->filled |= STATION_INFO_RX_BYTES;
1387 sinfo->rx_packets = ar->target_stats.rx_pkt;
1388 sinfo->filled |= STATION_INFO_RX_PACKETS;
1389 }
1390
1391 if (ar->target_stats.tx_byte) {
1392 sinfo->tx_bytes = ar->target_stats.tx_byte;
1393 sinfo->filled |= STATION_INFO_TX_BYTES;
1394 sinfo->tx_packets = ar->target_stats.tx_pkt;
1395 sinfo->filled |= STATION_INFO_TX_PACKETS;
1396 }
1397
1398 sinfo->signal = ar->target_stats.cs_rssi;
1399 sinfo->filled |= STATION_INFO_SIGNAL;
1400
1401 rate = ar->target_stats.tx_ucast_rate;
1402
1403 if (is_rate_legacy(rate)) {
1404 sinfo->txrate.legacy = rate / 100;
1405 } else if (is_rate_ht20(rate, &mcs, &sgi)) {
1406 if (sgi) {
1407 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1408 sinfo->txrate.mcs = mcs - 1;
1409 } else {
1410 sinfo->txrate.mcs = mcs;
1411 }
1412
1413 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1414 } else if (is_rate_ht40(rate, &mcs, &sgi)) {
1415 if (sgi) {
1416 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1417 sinfo->txrate.mcs = mcs - 1;
1418 } else {
1419 sinfo->txrate.mcs = mcs;
1420 }
1421
1422 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
1423 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1424 } else {
1425 ath6kl_warn("invalid rate: %d\n", rate);
1426 return 0;
1427 }
1428
1429 sinfo->filled |= STATION_INFO_TX_BITRATE;
1430
1431 return 0;
1432}
1433
1434static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
1435 struct cfg80211_pmksa *pmksa)
1436{
1437 struct ath6kl *ar = ath6kl_priv(netdev);
1438 return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
1439 pmksa->pmkid, true);
1440}
1441
1442static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
1443 struct cfg80211_pmksa *pmksa)
1444{
1445 struct ath6kl *ar = ath6kl_priv(netdev);
1446 return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
1447 pmksa->pmkid, false);
1448}
1449
1450static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
1451{
1452 struct ath6kl *ar = ath6kl_priv(netdev);
1453 if (test_bit(CONNECTED, &ar->flag))
1454 return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
1455 return 0;
1456}
1457
1458static struct cfg80211_ops ath6kl_cfg80211_ops = {
1459 .change_virtual_intf = ath6kl_cfg80211_change_iface,
1460 .scan = ath6kl_cfg80211_scan,
1461 .connect = ath6kl_cfg80211_connect,
1462 .disconnect = ath6kl_cfg80211_disconnect,
1463 .add_key = ath6kl_cfg80211_add_key,
1464 .get_key = ath6kl_cfg80211_get_key,
1465 .del_key = ath6kl_cfg80211_del_key,
1466 .set_default_key = ath6kl_cfg80211_set_default_key,
1467 .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
1468 .set_tx_power = ath6kl_cfg80211_set_txpower,
1469 .get_tx_power = ath6kl_cfg80211_get_txpower,
1470 .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
1471 .join_ibss = ath6kl_cfg80211_join_ibss,
1472 .leave_ibss = ath6kl_cfg80211_leave_ibss,
1473 .get_station = ath6kl_get_station,
1474 .set_pmksa = ath6kl_set_pmksa,
1475 .del_pmksa = ath6kl_del_pmksa,
1476 .flush_pmksa = ath6kl_flush_pmksa,
1477};
1478
1479struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
1480{
1481 int ret = 0;
1482 struct wireless_dev *wdev;
1483
1484 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1485 if (!wdev) {
1486 ath6kl_err("couldn't allocate wireless device\n");
1487 return NULL;
1488 }
1489
1490 /* create a new wiphy for use with cfg80211 */
1491 wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
1492 if (!wdev->wiphy) {
1493 ath6kl_err("couldn't allocate wiphy device\n");
1494 kfree(wdev);
1495 return NULL;
1496 }
1497
1498 /* set device pointer for wiphy */
1499 set_wiphy_dev(wdev->wiphy, dev);
1500
1501 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1502 BIT(NL80211_IFTYPE_ADHOC);
1503 /* max num of ssids that can be probed during scanning */
1504 wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
1505 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
1506 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
1507 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1508
1509 wdev->wiphy->cipher_suites = cipher_suites;
1510 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
1511
1512 ret = wiphy_register(wdev->wiphy);
1513 if (ret < 0) {
1514 ath6kl_err("couldn't register wiphy device\n");
1515 wiphy_free(wdev->wiphy);
1516 kfree(wdev);
1517 return NULL;
1518 }
1519
1520 return wdev;
1521}
1522
1523void ath6kl_cfg80211_deinit(struct ath6kl *ar)
1524{
1525 struct wireless_dev *wdev = ar->wdev;
1526
1527 if (ar->scan_req) {
1528 cfg80211_scan_done(ar->scan_req, true);
1529 ar->scan_req = NULL;
1530 }
1531
1532 if (!wdev)
1533 return;
1534
1535 wiphy_unregister(wdev->wiphy);
1536 wiphy_free(wdev->wiphy);
1537 kfree(wdev);
1538}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
new file mode 100644
index 000000000000..a84adc249c61
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH6KL_CFG80211_H
18#define ATH6KL_CFG80211_H
19
20struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
21void ath6kl_cfg80211_deinit(struct ath6kl *ar);
22
23void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
24
25void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
26 u8 *bssid, u16 listen_intvl,
27 u16 beacon_intvl,
28 enum network_type nw_type,
29 u8 beacon_ie_len, u8 assoc_req_len,
30 u8 assoc_resp_len, u8 *assoc_info);
31
32void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
33 u8 *bssid, u8 assoc_resp_len,
34 u8 *assoc_info, u16 proto_reason);
35
36void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
37 bool ismcast);
38
39#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
new file mode 100644
index 000000000000..6b0d45642fe3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -0,0 +1,180 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef COMMON_H
18#define COMMON_H
19
20#include <linux/netdevice.h>
21
22#define ATH6KL_MAX_IE 256
23
24extern int ath6kl_printk(const char *level, const char *fmt, ...);
25
26#define A_CACHE_LINE_PAD 128
27
28/*
29 * Reflects the version of binary interface exposed by ATH6KL target
30 * firmware. Needs to be incremented by 1 for any change in the firmware
31 * that requires upgrade of the driver on the host side for the change to
32 * work correctly
33 */
34#define ATH6KL_ABI_VERSION 1
35
36#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
37
38enum {
39 SIGNAL_QUALITY_METRICS_SNR = 0,
40 SIGNAL_QUALITY_METRICS_RSSI,
41 SIGNAL_QUALITY_METRICS_ALL,
42};
43
44/*
45 * Data Path
46 */
47
48#define WMI_MAX_TX_DATA_FRAME_LENGTH \
49 (1500 + sizeof(struct wmi_data_hdr) + \
50 sizeof(struct ethhdr) + \
51 sizeof(struct ath6kl_llc_snap_hdr))
52
53/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
54#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \
55 (3840 + sizeof(struct wmi_data_hdr) + \
56 sizeof(struct ethhdr) + \
57 sizeof(struct ath6kl_llc_snap_hdr))
58
59#define EPPING_ALIGNMENT_PAD \
60 (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \
61 - sizeof(struct htc_frame_hdr))
62
63struct ath6kl_llc_snap_hdr {
64 u8 dsap;
65 u8 ssap;
66 u8 cntl;
67 u8 org_code[3];
68 __be16 eth_type;
69} __packed;
70
71enum crypto_type {
72 NONE_CRYPT = 0x01,
73 WEP_CRYPT = 0x02,
74 TKIP_CRYPT = 0x04,
75 AES_CRYPT = 0x08,
76};
77
78#define ATH6KL_NODE_HASHSIZE 32
79/* simple hash is enough for variation of macaddr */
80#define ATH6KL_NODE_HASH(addr) \
81 (((const u8 *)(addr))[ETH_ALEN - 1] % \
82 ATH6KL_NODE_HASHSIZE)
83
84/*
85 * Table of ath6kl_node instances. Each ieee80211com
86 * has at least one for holding the scan candidates.
87 * When operating as an access point or in ibss mode there
88 * is a second table for associated stations or neighbors.
89 */
90struct ath6kl_node_table {
91 spinlock_t nt_nodelock; /* on node table */
92 struct bss *nt_node_first; /* information of all nodes */
93 struct bss *nt_node_last; /* information of all nodes */
94 struct bss *nt_hash[ATH6KL_NODE_HASHSIZE];
95 const char *nt_name; /* for debugging */
96 u32 nt_node_age; /* node aging time */
97};
98
99#define WLAN_NODE_INACT_TIMEOUT_MSEC 120000
100#define WLAN_NODE_INACT_CNT 4
101
102struct ath6kl_common_ie {
103 u16 ie_chan;
104 u8 *ie_tstamp;
105 u8 *ie_ssid;
106 u8 *ie_rates;
107 u8 *ie_xrates;
108 u8 *ie_country;
109 u8 *ie_wpa;
110 u8 *ie_rsn;
111 u8 *ie_wmm;
112 u8 *ie_ath;
113 u16 ie_capInfo;
114 u16 ie_beaconInt;
115 u8 *ie_tim;
116 u8 *ie_chswitch;
117 u8 ie_erp;
118 u8 *ie_wsc;
119 u8 *ie_htcap;
120 u8 *ie_htop;
121};
122
123struct bss {
124 u8 ni_macaddr[ETH_ALEN];
125 u8 ni_snr;
126 s16 ni_rssi;
127 struct bss *ni_list_next;
128 struct bss *ni_list_prev;
129 struct bss *ni_hash_next;
130 struct bss *ni_hash_prev;
131 struct ath6kl_common_ie ni_cie;
132 u8 *ni_buf;
133 u16 ni_framelen;
134 struct ath6kl_node_table *ni_table;
135 u32 ni_refcnt;
136
137 u32 ni_tstamp;
138 u32 ni_actcnt;
139};
140
141struct htc_endpoint_credit_dist;
142struct ath6kl;
143enum htc_credit_dist_reason;
144struct htc_credit_state_info;
145
146struct bss *wlan_node_alloc(int wh_size);
147void wlan_node_free(struct bss *ni);
148void wlan_setup_node(struct ath6kl_node_table *nt, struct bss *ni,
149 const u8 *mac_addr);
150struct bss *wlan_find_node(struct ath6kl_node_table *nt,
151 const u8 *mac_addr);
152void wlan_node_reclaim(struct ath6kl_node_table *nt, struct bss *ni);
153void wlan_free_allnodes(struct ath6kl_node_table *nt);
154void wlan_iterate_nodes(struct ath6kl_node_table *nt, void *arg);
155
156void wlan_node_table_init(struct ath6kl_node_table *nt);
157void wlan_node_table_cleanup(struct ath6kl_node_table *nt);
158
159void wlan_refresh_inactive_nodes(struct ath6kl *ar);
160
161struct bss *wlan_find_ssid_node(struct ath6kl_node_table *nt, u8 *ssid,
162 u32 ssid_len, bool is_wpa2, bool match_ssid);
163
164void wlan_node_return(struct ath6kl_node_table *nt, struct bss *ni);
165
166int ath6k_setup_credit_dist(void *htc_handle,
167 struct htc_credit_state_info *cred_info);
168void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
169 struct list_head *epdist_list,
170 enum htc_credit_dist_reason reason);
171void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
172 struct list_head *ep_list,
173 int tot_credits);
174void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
175 struct htc_endpoint_credit_dist *ep_dist);
176struct ath6kl *ath6kl_core_alloc(struct device *sdev);
177int ath6kl_core_init(struct ath6kl *ar);
178int ath6kl_unavail_ev(struct ath6kl *ar);
179struct sk_buff *ath6kl_buf_alloc(int size);
180#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
new file mode 100644
index 000000000000..74170229523f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -0,0 +1,544 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CORE_H
18#define CORE_H
19
20#include <linux/etherdevice.h>
21#include <linux/rtnetlink.h>
22#include <linux/firmware.h>
23#include <linux/sched.h>
24#include <net/cfg80211.h>
25#include "htc.h"
26#include "wmi.h"
27#include "bmi.h"
28
29#define MAX_ATH6KL 1
30#define ATH6KL_MAX_RX_BUFFERS 16
31#define ATH6KL_BUFFER_SIZE 1664
32#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4
33#define ATH6KL_AMSDU_REFILL_THRESHOLD 3
34#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
35#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
36#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
37
38#define USER_SAVEDKEYS_STAT_INIT 0
39#define USER_SAVEDKEYS_STAT_RUN 1
40
41#define ATH6KL_TX_TIMEOUT 10
42#define ATH6KL_MAX_ENDPOINTS 4
43#define MAX_NODE_NUM 15
44
45/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
46#define MAX_DEF_COOKIE_NUM 180
47#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
48#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
49
50#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
51
52#define DISCON_TIMER_INTVAL 10000 /* in msec */
53#define A_DEFAULT_LISTEN_INTERVAL 100
54#define A_MAX_WOW_LISTEN_INTERVAL 1000
55
56/* AR6003 1.0 definitions */
57#define AR6003_REV1_VERSION 0x300002ba
58
59/* AR6003 2.0 definitions */
60#define AR6003_REV2_VERSION 0x30000384
61#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
62#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
63#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
64#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
65#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
66#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
67
68/* AR6003 3.0 definitions */
69#define AR6003_REV3_VERSION 0x30000582
70#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
71#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
72#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
73#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
74#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
75 "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
76
77/* Per STA data, used in AP mode */
78#define STA_PS_AWAKE BIT(0)
79#define STA_PS_SLEEP BIT(1)
80#define STA_PS_POLLED BIT(2)
81
82/* HTC TX packet tagging definitions */
83#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
84#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1)
85
86#define AR6003_CUST_DATA_SIZE 16
87
88#define AGGR_WIN_IDX(x, y) ((x) % (y))
89#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y))
90#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y))
91#define ATH6KL_MAX_SEQ_NO 0xFFF
92#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO)
93
94#define NUM_OF_TIDS 8
95#define AGGR_SZ_DEFAULT 8
96
97#define AGGR_WIN_SZ_MIN 2
98#define AGGR_WIN_SZ_MAX 8
99
100#define TID_WINDOW_SZ(_x) ((_x) << 1)
101
102#define AGGR_NUM_OF_FREE_NETBUFS 16
103
104#define AGGR_RX_TIMEOUT 400 /* in ms */
105
106#define WMI_TIMEOUT (2 * HZ)
107
108#define MBOX_YIELD_LIMIT 99
109
110/* configuration lags */
111/*
112 * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
113 * ERP IE of beacon to determine the short premable support when
114 * sending (Re)Assoc req.
115 * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power
116 * module state transition failure events which happen during
117 * scan, to the host.
118 */
119#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0)
120#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
121#define ATH6KL_CONF_ENABLE_11N BIT(2)
122#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
123
124enum wlan_low_pwr_state {
125 WLAN_POWER_STATE_ON,
126 WLAN_POWER_STATE_CUT_PWR,
127 WLAN_POWER_STATE_DEEP_SLEEP,
128 WLAN_POWER_STATE_WOW
129};
130
131enum sme_state {
132 SME_DISCONNECTED,
133 SME_CONNECTING,
134 SME_CONNECTED
135};
136
137struct skb_hold_q {
138 struct sk_buff *skb;
139 bool is_amsdu;
140 u16 seq_no;
141};
142
143struct rxtid {
144 bool aggr;
145 bool progress;
146 bool timer_mon;
147 u16 win_sz;
148 u16 seq_next;
149 u32 hold_q_sz;
150 struct skb_hold_q *hold_q;
151 struct sk_buff_head q;
152 spinlock_t lock;
153};
154
155struct rxtid_stats {
156 u32 num_into_aggr;
157 u32 num_dups;
158 u32 num_oow;
159 u32 num_mpdu;
160 u32 num_amsdu;
161 u32 num_delivered;
162 u32 num_timeouts;
163 u32 num_hole;
164 u32 num_bar;
165};
166
167struct aggr_info {
168 u8 aggr_sz;
169 u8 timer_scheduled;
170 struct timer_list timer;
171 struct net_device *dev;
172 struct rxtid rx_tid[NUM_OF_TIDS];
173 struct sk_buff_head free_q;
174 struct rxtid_stats stat[NUM_OF_TIDS];
175};
176
177struct ath6kl_wep_key {
178 u8 key_index;
179 u8 key_len;
180 u8 key[64];
181};
182
183#define ATH6KL_KEY_SEQ_LEN 8
184
185struct ath6kl_key {
186 u8 key[WLAN_MAX_KEY_LEN];
187 u8 key_len;
188 u8 seq[ATH6KL_KEY_SEQ_LEN];
189 u8 seq_len;
190 u32 cipher;
191};
192
193struct ath6kl_node_mapping {
194 u8 mac_addr[ETH_ALEN];
195 u8 ep_id;
196 u8 tx_pend;
197};
198
199struct ath6kl_cookie {
200 struct sk_buff *skb;
201 u32 map_no;
202 struct htc_packet htc_pkt;
203 struct ath6kl_cookie *arc_list_next;
204};
205
206struct ath6kl_sta {
207 u16 sta_flags;
208 u8 mac[ETH_ALEN];
209 u8 aid;
210 u8 keymgmt;
211 u8 ucipher;
212 u8 auth;
213 u8 wpa_ie[ATH6KL_MAX_IE];
214 struct sk_buff_head psq;
215 spinlock_t psq_lock;
216};
217
218struct ath6kl_version {
219 u32 target_ver;
220 u32 wlan_ver;
221 u32 abi_ver;
222};
223
224struct ath6kl_bmi {
225 u32 cmd_credits;
226 bool done_sent;
227 u8 *cmd_buf;
228};
229
230struct target_stats {
231 u64 tx_pkt;
232 u64 tx_byte;
233 u64 tx_ucast_pkt;
234 u64 tx_ucast_byte;
235 u64 tx_mcast_pkt;
236 u64 tx_mcast_byte;
237 u64 tx_bcast_pkt;
238 u64 tx_bcast_byte;
239 u64 tx_rts_success_cnt;
240 u64 tx_pkt_per_ac[4];
241
242 u64 tx_err;
243 u64 tx_fail_cnt;
244 u64 tx_retry_cnt;
245 u64 tx_mult_retry_cnt;
246 u64 tx_rts_fail_cnt;
247
248 u64 rx_pkt;
249 u64 rx_byte;
250 u64 rx_ucast_pkt;
251 u64 rx_ucast_byte;
252 u64 rx_mcast_pkt;
253 u64 rx_mcast_byte;
254 u64 rx_bcast_pkt;
255 u64 rx_bcast_byte;
256 u64 rx_frgment_pkt;
257
258 u64 rx_err;
259 u64 rx_crc_err;
260 u64 rx_key_cache_miss;
261 u64 rx_decrypt_err;
262 u64 rx_dupl_frame;
263
264 u64 tkip_local_mic_fail;
265 u64 tkip_cnter_measures_invoked;
266 u64 tkip_replays;
267 u64 tkip_fmt_err;
268 u64 ccmp_fmt_err;
269 u64 ccmp_replays;
270
271 u64 pwr_save_fail_cnt;
272
273 u64 cs_bmiss_cnt;
274 u64 cs_low_rssi_cnt;
275 u64 cs_connect_cnt;
276 u64 cs_discon_cnt;
277
278 s32 tx_ucast_rate;
279 s32 rx_ucast_rate;
280
281 u32 lq_val;
282
283 u32 wow_pkt_dropped;
284 u16 wow_evt_discarded;
285
286 s16 noise_floor_calib;
287 s16 cs_rssi;
288 s16 cs_ave_beacon_rssi;
289 u8 cs_ave_beacon_snr;
290 u8 cs_last_roam_msec;
291 u8 cs_snr;
292
293 u8 wow_host_pkt_wakeups;
294 u8 wow_host_evt_wakeups;
295
296 u32 arp_received;
297 u32 arp_matched;
298 u32 arp_replied;
299};
300
301struct ath6kl_mbox_info {
302 u32 htc_addr;
303 u32 htc_ext_addr;
304 u32 htc_ext_sz;
305
306 u32 block_size;
307
308 u32 gmbox_addr;
309
310 u32 gmbox_sz;
311};
312
313/*
314 * 802.11i defines an extended IV for use with non-WEP ciphers.
315 * When the EXTIV bit is set in the key id byte an additional
316 * 4 bytes immediately follow the IV for TKIP. For CCMP the
317 * EXTIV bit is likewise set but the 8 bytes represent the
318 * CCMP header rather than IV+extended-IV.
319 */
320
321#define ATH6KL_KEYBUF_SIZE 16
322#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */
323
324#define ATH6KL_KEY_XMIT 0x01
325#define ATH6KL_KEY_RECV 0x02
326#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */
327
328/*
329 * WPA/RSN get/set key request. Specify the key/cipher
330 * type and whether the key is to be used for sending and/or
331 * receiving. The key index should be set only when working
332 * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
333 * Otherwise a unicast/pairwise key is specified by the bssid
334 * (on a station) or mac address (on an ap). They key length
335 * must include any MIC key data; otherwise it should be no
336 * more than ATH6KL_KEYBUF_SIZE.
337 */
338struct ath6kl_req_key {
339 u8 ik_type; /* key/cipher type */
340 u8 ik_pad;
341 u16 ik_keyix; /* key index */
342 u8 ik_keylen; /* key length in bytes */
343 u8 ik_flags;
344 u8 ik_macaddr[ETH_ALEN];
345 u64 ik_keyrsc; /* key receive sequence counter */
346 u64 ik_keytsc; /* key transmit sequence counter */
347 u8 ik_keydata[ATH6KL_KEYBUF_SIZE + ATH6KL_MICBUF_SIZE];
348};
349
350/* Flag info */
351#define WMI_ENABLED 0
352#define WMI_READY 1
353#define CONNECTED 2
354#define STATS_UPDATE_PEND 3
355#define CONNECT_PEND 4
356#define WMM_ENABLED 5
357#define NETQ_STOPPED 6
358#define WMI_CTRL_EP_FULL 7
359#define DTIM_EXPIRED 8
360#define DESTROY_IN_PROGRESS 9
361#define NETDEV_REGISTERED 10
362#define SKIP_SCAN 11
363#define WLAN_ENABLED 12
364
365struct ath6kl {
366 struct device *dev;
367 struct net_device *net_dev;
368 struct ath6kl_bmi bmi;
369 const struct ath6kl_hif_ops *hif_ops;
370 struct wmi *wmi;
371 int tx_pending[ENDPOINT_MAX];
372 int total_tx_data_pend;
373 struct htc_target *htc_target;
374 void *hif_priv;
375 spinlock_t lock;
376 struct semaphore sem;
377 int ssid_len;
378 u8 ssid[IEEE80211_MAX_SSID_LEN];
379 u8 next_mode;
380 u8 nw_type;
381 u8 dot11_auth_mode;
382 u8 auth_mode;
383 u8 prwise_crypto;
384 u8 prwise_crypto_len;
385 u8 grp_crypto;
386 u8 grp_crpto_len;
387 u8 def_txkey_index;
388 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
389 u8 bssid[ETH_ALEN];
390 u8 req_bssid[ETH_ALEN];
391 u16 ch_hint;
392 u16 bss_ch;
393 u16 listen_intvl_b;
394 u16 listen_intvl_t;
395 struct ath6kl_version version;
396 u32 target_type;
397 u8 tx_pwr;
398 struct net_device_stats net_stats;
399 struct target_stats target_stats;
400 struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
401 u8 ibss_ps_enable;
402 u8 node_num;
403 u8 next_ep_id;
404 struct ath6kl_cookie *cookie_list;
405 u32 cookie_count;
406 enum htc_endpoint_id ac2ep_map[WMM_NUM_AC];
407 bool ac_stream_active[WMM_NUM_AC];
408 u8 ac_stream_pri_map[WMM_NUM_AC];
409 u8 hiac_stream_active_pri;
410 u8 ep2ac_map[ENDPOINT_MAX];
411 enum htc_endpoint_id ctrl_ep;
412 struct htc_credit_state_info credit_state_info;
413 u32 connect_ctrl_flags;
414 u32 user_key_ctrl;
415 u8 usr_bss_filter;
416 struct ath6kl_sta sta_list[AP_MAX_NUM_STA];
417 u8 sta_list_index;
418 struct ath6kl_req_key ap_mode_bkey;
419 struct sk_buff_head mcastpsq;
420 spinlock_t mcastpsq_lock;
421 u8 intra_bss;
422 struct aggr_info *aggr_cntxt;
423 struct wmi_ap_mode_stat ap_stats;
424 u8 ap_country_code[3];
425 struct list_head amsdu_rx_buffer_queue;
426 struct timer_list disconnect_timer;
427 u8 rx_meta_ver;
428 struct wireless_dev *wdev;
429 struct cfg80211_scan_request *scan_req;
430 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
431 enum sme_state sme_state;
432 enum wlan_low_pwr_state wlan_pwr_state;
433 struct wmi_scan_params_cmd sc_params;
434#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
435 u8 auto_auth_stage;
436
437 u16 conf_flags;
438 wait_queue_head_t event_wq;
439 struct ath6kl_mbox_info mbox_info;
440
441 struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
442 int reconnect_flag;
443 unsigned long flag;
444
445 u8 *fw_board;
446 size_t fw_board_len;
447
448 u8 *fw_otp;
449 size_t fw_otp_len;
450
451 u8 *fw;
452 size_t fw_len;
453
454 u8 *fw_patch;
455 size_t fw_patch_len;
456
457 struct workqueue_struct *ath6kl_wq;
458
459 struct ath6kl_node_table scan_table;
460};
461
462static inline void *ath6kl_priv(struct net_device *dev)
463{
464 return wdev_priv(dev->ieee80211_ptr);
465}
466
467static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
468 *cred_info,
469 struct htc_endpoint_credit_dist
470 *ep_dist, int credits)
471{
472 ep_dist->credits += credits;
473 ep_dist->cred_assngd += credits;
474 cred_info->cur_free_credits -= credits;
475}
476
477void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
478int ath6kl_configure_target(struct ath6kl *ar);
479void ath6kl_detect_error(unsigned long ptr);
480void disconnect_timer_handler(unsigned long ptr);
481void init_netdev(struct net_device *dev);
482void ath6kl_cookie_init(struct ath6kl *ar);
483void ath6kl_cookie_cleanup(struct ath6kl *ar);
484void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
485void ath6kl_tx_complete(void *context, struct list_head *packet_queue);
486enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
487 struct htc_packet *packet);
488void ath6kl_stop_txrx(struct ath6kl *ar);
489void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
490int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
491 u8 *data, u32 length, bool read);
492int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data);
493void ath6kl_init_profile_info(struct ath6kl *ar);
494void ath6kl_tx_data_cleanup(struct ath6kl *ar);
495void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
496 bool get_dbglogs);
497
498struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
499void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
500int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
501
502struct aggr_info *aggr_init(struct net_device *dev);
503void ath6kl_rx_refill(struct htc_target *target,
504 enum htc_endpoint_id endpoint);
505void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
506struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
507 enum htc_endpoint_id endpoint,
508 int len);
509void aggr_module_destroy(struct aggr_info *aggr_info);
510void aggr_reset_state(struct aggr_info *aggr_info);
511
512struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
513struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
514
515void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
516int ath6kl_control_tx(void *devt, struct sk_buff *skb,
517 enum htc_endpoint_id eid);
518void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
519 u8 *bssid, u16 listen_int,
520 u16 beacon_int, enum network_type net_type,
521 u8 beacon_ie_len, u8 assoc_req_len,
522 u8 assoc_resp_len, u8 *assoc_info);
523void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
524 u8 *bssid, u8 assoc_resp_len,
525 u8 *assoc_info, u16 prot_reason_status);
526void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
527void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
528void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
529void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
530void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
531enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
532
533void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
534
535void ath6kl_dtimexpiry_event(struct ath6kl *ar);
536void ath6kl_disconnect(struct ath6kl *ar);
537void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
538void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
539 u8 win_sz);
540void ath6kl_wakeup_event(void *dev);
541void ath6kl_target_failure(struct ath6kl *ar);
542
543void ath6kl_cfg80211_scan_node(struct wiphy *wiphy, struct bss *ni);
544#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
new file mode 100644
index 000000000000..316136c8b903
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19
20int ath6kl_printk(const char *level, const char *fmt, ...)
21{
22 struct va_format vaf;
23 va_list args;
24 int rtn;
25
26 va_start(args, fmt);
27
28 vaf.fmt = fmt;
29 vaf.va = &args;
30
31 rtn = printk("%sath6kl: %pV", level, &vaf);
32
33 va_end(args);
34
35 return rtn;
36}
37
38#ifdef CONFIG_ATH6KL_DEBUG
39void ath6kl_dump_registers(struct ath6kl_device *dev,
40 struct ath6kl_irq_proc_registers *irq_proc_reg,
41 struct ath6kl_irq_enable_reg *irq_enable_reg)
42{
43
44 ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
45
46 if (irq_proc_reg != NULL) {
47 ath6kl_dbg(ATH6KL_DBG_ANY,
48 "Host Int status: 0x%x\n",
49 irq_proc_reg->host_int_status);
50 ath6kl_dbg(ATH6KL_DBG_ANY,
51 "CPU Int status: 0x%x\n",
52 irq_proc_reg->cpu_int_status);
53 ath6kl_dbg(ATH6KL_DBG_ANY,
54 "Error Int status: 0x%x\n",
55 irq_proc_reg->error_int_status);
56 ath6kl_dbg(ATH6KL_DBG_ANY,
57 "Counter Int status: 0x%x\n",
58 irq_proc_reg->counter_int_status);
59 ath6kl_dbg(ATH6KL_DBG_ANY,
60 "Mbox Frame: 0x%x\n",
61 irq_proc_reg->mbox_frame);
62 ath6kl_dbg(ATH6KL_DBG_ANY,
63 "Rx Lookahead Valid: 0x%x\n",
64 irq_proc_reg->rx_lkahd_valid);
65 ath6kl_dbg(ATH6KL_DBG_ANY,
66 "Rx Lookahead 0: 0x%x\n",
67 irq_proc_reg->rx_lkahd[0]);
68 ath6kl_dbg(ATH6KL_DBG_ANY,
69 "Rx Lookahead 1: 0x%x\n",
70 irq_proc_reg->rx_lkahd[1]);
71
72 if (dev->ar->mbox_info.gmbox_addr != 0) {
73 /*
74 * If the target supports GMBOX hardware, dump some
75 * additional state.
76 */
77 ath6kl_dbg(ATH6KL_DBG_ANY,
78 "GMBOX Host Int status 2: 0x%x\n",
79 irq_proc_reg->host_int_status2);
80 ath6kl_dbg(ATH6KL_DBG_ANY,
81 "GMBOX RX Avail: 0x%x\n",
82 irq_proc_reg->gmbox_rx_avail);
83 ath6kl_dbg(ATH6KL_DBG_ANY,
84 "GMBOX lookahead alias 0: 0x%x\n",
85 irq_proc_reg->rx_gmbox_lkahd_alias[0]);
86 ath6kl_dbg(ATH6KL_DBG_ANY,
87 "GMBOX lookahead alias 1: 0x%x\n",
88 irq_proc_reg->rx_gmbox_lkahd_alias[1]);
89 }
90
91 }
92
93 if (irq_enable_reg != NULL) {
94 ath6kl_dbg(ATH6KL_DBG_ANY,
95 "Int status Enable: 0x%x\n",
96 irq_enable_reg->int_status_en);
97 ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
98 irq_enable_reg->cntr_int_status_en);
99 }
100 ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
101}
102
103static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
104{
105 ath6kl_dbg(ATH6KL_DBG_ANY,
106 "--- endpoint: %d svc_id: 0x%X ---\n",
107 ep_dist->endpoint, ep_dist->svc_id);
108 ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
109 ep_dist->dist_flags);
110 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
111 ep_dist->cred_norm);
112 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
113 ep_dist->cred_min);
114 ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
115 ep_dist->credits);
116 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
117 ep_dist->cred_assngd);
118 ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
119 ep_dist->seek_cred);
120 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
121 ep_dist->cred_sz);
122 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
123 ep_dist->cred_per_msg);
124 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
125 ep_dist->cred_to_dist);
126 ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
127 get_queue_depth(&((struct htc_endpoint *)
128 ep_dist->htc_rsvd)->txq));
129 ath6kl_dbg(ATH6KL_DBG_ANY,
130 "----------------------------------\n");
131}
132
133void dump_cred_dist_stats(struct htc_target *target)
134{
135 struct htc_endpoint_credit_dist *ep_list;
136
137 if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
138 return;
139
140 list_for_each_entry(ep_list, &target->cred_dist_list, list)
141 dump_cred_dist(ep_list);
142
143 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
144 target->cred_dist_cntxt, NULL);
145 ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
146 target->cred_dist_cntxt->total_avail_credits,
147 target->cred_dist_cntxt->cur_free_credits);
148}
149
150#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
new file mode 100644
index 000000000000..66b399962f01
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DEBUG_H
18#define DEBUG_H
19
20#include "htc_hif.h"
21
22enum ATH6K_DEBUG_MASK {
23 ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
24 ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
25 ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
26 ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
27 ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
28 ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
29 ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
30 ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
31 ATH6KL_DBG_PM = BIT(8), /* power management */
32 ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
33 ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
34 ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
35 ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
36 ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */
37 ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx and wmi frames */
38 ATH6KL_DBG_AGGR = BIT(15), /* aggregation */
39 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
40};
41
42extern unsigned int debug_mask;
43extern int ath6kl_printk(const char *level, const char *fmt, ...)
44 __attribute__ ((format (printf, 2, 3)));
45
46#define ath6kl_info(fmt, ...) \
47 ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
48#define ath6kl_err(fmt, ...) \
49 ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
50#define ath6kl_warn(fmt, ...) \
51 ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
52
53#define AR_DBG_LVL_CHECK(mask) (debug_mask & mask)
54
55#ifdef CONFIG_ATH6KL_DEBUG
56#define ath6kl_dbg(mask, fmt, ...) \
57 ({ \
58 int rtn; \
59 if (debug_mask & mask) \
60 rtn = ath6kl_printk(KERN_DEBUG, fmt, ##__VA_ARGS__); \
61 else \
62 rtn = 0; \
63 \
64 rtn; \
65 })
66
67static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
68 const char *msg, const void *buf,
69 size_t len)
70{
71 if (debug_mask & mask) {
72 ath6kl_dbg(mask, "%s\n", msg);
73 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
74 }
75}
76
77void ath6kl_dump_registers(struct ath6kl_device *dev,
78 struct ath6kl_irq_proc_registers *irq_proc_reg,
79 struct ath6kl_irq_enable_reg *irq_en_reg);
80void dump_cred_dist_stats(struct htc_target *target);
81#else
82static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
83 const char *fmt, ...)
84{
85 return 0;
86}
87
88static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
89 const char *msg, const void *buf,
90 size_t len)
91{
92}
93
94static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
95 struct ath6kl_irq_proc_registers *irq_proc_reg,
96 struct ath6kl_irq_enable_reg *irq_en_reg)
97{
98
99}
100static inline void dump_cred_dist_stats(struct htc_target *target)
101{
102}
103#endif
104
105#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
new file mode 100644
index 000000000000..c923979776a0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -0,0 +1,72 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HIF_OPS_H
18#define HIF_OPS_H
19
20#include "hif.h"
21
22static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
23 u32 len, u32 request)
24{
25 return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
26}
27
28static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
29 u32 length, u32 request,
30 struct htc_packet *packet)
31{
32 return ar->hif_ops->write_async(ar, address, buffer, length,
33 request, packet);
34}
35static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
36{
37 return ar->hif_ops->irq_enable(ar);
38}
39
40static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
41{
42 return ar->hif_ops->irq_disable(ar);
43}
44
45static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar)
46{
47 return ar->hif_ops->scatter_req_get(ar);
48}
49
50static inline void hif_scatter_req_add(struct ath6kl *ar,
51 struct hif_scatter_req *s_req)
52{
53 return ar->hif_ops->scatter_req_add(ar, s_req);
54}
55
56static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar)
57{
58 return ar->hif_ops->enable_scatter(ar);
59}
60
61static inline int ath6kl_hif_scat_req_rw(struct ath6kl *ar,
62 struct hif_scatter_req *scat_req)
63{
64 return ar->hif_ops->scat_req_rw(ar, scat_req);
65}
66
67static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
68{
69 return ar->hif_ops->cleanup_scatter(ar);
70}
71
72#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
new file mode 100644
index 000000000000..5ceff54775a1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HIF_H
18#define HIF_H
19
20#include "common.h"
21#include "core.h"
22
23#include <linux/scatterlist.h>
24
25#define BUS_REQUEST_MAX_NUM 64
26#define HIF_MBOX_BLOCK_SIZE 128
27#define HIF_MBOX0_BLOCK_SIZE 1
28
29#define HIF_DMA_BUFFER_SIZE (32 * 1024)
30#define CMD53_FIXED_ADDRESS 1
31#define CMD53_INCR_ADDRESS 2
32
33#define MAX_SCATTER_REQUESTS 4
34#define MAX_SCATTER_ENTRIES_PER_REQ 16
35#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
36
37#define MANUFACTURER_ID_AR6003_BASE 0x300
38 /* SDIO manufacturer ID and Codes */
39#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
40#define MANUFACTURER_CODE 0x271 /* Atheros */
41
42/* Mailbox address in SDIO address space */
43#define HIF_MBOX_BASE_ADDR 0x800
44#define HIF_MBOX_WIDTH 0x800
45
46#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1)
47
48/* version 1 of the chip has only a 12K extended mbox range */
49#define HIF_MBOX0_EXT_BASE_ADDR 0x4000
50#define HIF_MBOX0_EXT_WIDTH (12*1024)
51
52/* GMBOX addresses */
53#define HIF_GMBOX_BASE_ADDR 0x7000
54#define HIF_GMBOX_WIDTH 0x4000
55
56/* interrupt mode register */
57#define CCCR_SDIO_IRQ_MODE_REG 0xF0
58
59/* mode to enable special 4-bit interrupt assertion without clock */
60#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
61
62struct bus_request {
63 struct list_head list;
64
65 /* request data */
66 u32 address;
67
68 u8 *buffer;
69 u32 length;
70 u32 request;
71 struct htc_packet *packet;
72 int status;
73
74 /* this is a scatter request */
75 struct hif_scatter_req *scat_req;
76};
77
78/* direction of transfer (read/write) */
79#define HIF_READ 0x00000001
80#define HIF_WRITE 0x00000002
81#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
82
83/*
84 * emode - This indicates the whether the command is to be executed in a
85 * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
86 * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
87 * implemented using the asynchronous mode allowing the the bus
88 * driver to indicate the completion of operation through the
89 * registered callback routine. The requirement primarily comes
90 * from the contexts these operations get called from (a driver's
91 * transmit context or the ISR context in case of receive).
92 * Support for both of these modes is essential.
93 */
94#define HIF_SYNCHRONOUS 0x00000010
95#define HIF_ASYNCHRONOUS 0x00000020
96#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
97
98/*
99 * dmode - An interface may support different kinds of commands based on
100 * the tradeoff between the amount of data it can carry and the
101 * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
102 * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
103 * to the nearest block size by padding. The size of the block is
104 * configurable at compile time using the HIF_BLOCK_SIZE and is
105 * negotiated with the target during initialization after the
106 * ATH6KL interrupts are enabled.
107 */
108#define HIF_BYTE_BASIS 0x00000040
109#define HIF_BLOCK_BASIS 0x00000080
110#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
111
112/*
113 * amode - This indicates if the address has to be incremented on ATH6KL
114 * after every read/write operation (HIF?FIXED_ADDRESS/
115 * HIF_INCREMENTAL_ADDRESS).
116 */
117#define HIF_FIXED_ADDRESS 0x00000100
118#define HIF_INCREMENTAL_ADDRESS 0x00000200
119#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
120
121#define HIF_WR_ASYNC_BYTE_INC \
122 (HIF_WRITE | HIF_ASYNCHRONOUS | \
123 HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
124
125#define HIF_WR_ASYNC_BLOCK_INC \
126 (HIF_WRITE | HIF_ASYNCHRONOUS | \
127 HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
128
129#define HIF_WR_SYNC_BYTE_FIX \
130 (HIF_WRITE | HIF_SYNCHRONOUS | \
131 HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
132
133#define HIF_WR_SYNC_BYTE_INC \
134 (HIF_WRITE | HIF_SYNCHRONOUS | \
135 HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
136
137#define HIF_WR_SYNC_BLOCK_INC \
138 (HIF_WRITE | HIF_SYNCHRONOUS | \
139 HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
140
141#define HIF_RD_SYNC_BYTE_INC \
142 (HIF_READ | HIF_SYNCHRONOUS | \
143 HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
144
145#define HIF_RD_SYNC_BYTE_FIX \
146 (HIF_READ | HIF_SYNCHRONOUS | \
147 HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
148
149#define HIF_RD_ASYNC_BLOCK_FIX \
150 (HIF_READ | HIF_ASYNCHRONOUS | \
151 HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
152
153#define HIF_RD_SYNC_BLOCK_FIX \
154 (HIF_READ | HIF_SYNCHRONOUS | \
155 HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
156
157struct hif_scatter_item {
158 u8 *buf;
159 int len;
160 struct htc_packet *packet;
161};
162
163struct hif_scatter_req {
164 struct list_head list;
165 /* address for the read/write operation */
166 u32 addr;
167
168 /* request flags */
169 u32 req;
170
171 /* total length of entire transfer */
172 u32 len;
173
174 bool virt_scat;
175
176 void (*complete) (struct htc_target *, struct hif_scatter_req *);
177 int status;
178 int scat_entries;
179
180 struct bus_request *busrequest;
181 struct scatterlist *sgentries;
182
183 /* bounce buffer for upper layers to copy to/from */
184 u8 *virt_dma_buf;
185
186 struct hif_scatter_item scat_list[1];
187};
188
189struct ath6kl_hif_ops {
190 int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
191 u32 len, u32 request);
192 int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
193 u32 length, u32 request, struct htc_packet *packet);
194
195 void (*irq_enable)(struct ath6kl *ar);
196 void (*irq_disable)(struct ath6kl *ar);
197
198 struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
199 void (*scatter_req_add)(struct ath6kl *ar,
200 struct hif_scatter_req *s_req);
201 int (*enable_scatter)(struct ath6kl *ar);
202 int (*scat_req_rw) (struct ath6kl *ar,
203 struct hif_scatter_req *scat_req);
204 void (*cleanup_scatter)(struct ath6kl *ar);
205};
206
207#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
new file mode 100644
index 000000000000..5580e22c19f4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -0,0 +1,2456 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "htc_hif.h"
19#include "debug.h"
20#include "hif-ops.h"
21#include <asm/unaligned.h>
22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
25static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
26 int ctrl1)
27{
28 struct htc_frame_hdr *hdr;
29
30 packet->buf -= HTC_HDR_LENGTH;
31 hdr = (struct htc_frame_hdr *)packet->buf;
32
33 /* Endianess? */
34 put_unaligned((u16)packet->act_len, &hdr->payld_len);
35 hdr->flags = flags;
36 hdr->eid = packet->endpoint;
37 hdr->ctrl[0] = ctrl0;
38 hdr->ctrl[1] = ctrl1;
39}
40
41static void htc_reclaim_txctrl_buf(struct htc_target *target,
42 struct htc_packet *pkt)
43{
44 spin_lock_bh(&target->htc_lock);
45 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
46 spin_unlock_bh(&target->htc_lock);
47}
48
49static struct htc_packet *htc_get_control_buf(struct htc_target *target,
50 bool tx)
51{
52 struct htc_packet *packet = NULL;
53 struct list_head *buf_list;
54
55 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
56
57 spin_lock_bh(&target->htc_lock);
58
59 if (list_empty(buf_list)) {
60 spin_unlock_bh(&target->htc_lock);
61 return NULL;
62 }
63
64 packet = list_first_entry(buf_list, struct htc_packet, list);
65 list_del(&packet->list);
66 spin_unlock_bh(&target->htc_lock);
67
68 if (tx)
69 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
70
71 return packet;
72}
73
74static void htc_tx_comp_update(struct htc_target *target,
75 struct htc_endpoint *endpoint,
76 struct htc_packet *packet)
77{
78 packet->completion = NULL;
79 packet->buf += HTC_HDR_LENGTH;
80
81 if (!packet->status)
82 return;
83
84 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
85 packet->status, packet->endpoint, packet->act_len,
86 packet->info.tx.cred_used);
87
88 /* on failure to submit, reclaim credits for this packet */
89 spin_lock_bh(&target->tx_lock);
90 endpoint->cred_dist.cred_to_dist +=
91 packet->info.tx.cred_used;
92 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
93
94 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
95 target->cred_dist_cntxt, &target->cred_dist_list);
96
97 ath6k_credit_distribute(target->cred_dist_cntxt,
98 &target->cred_dist_list,
99 HTC_CREDIT_DIST_SEND_COMPLETE);
100
101 spin_unlock_bh(&target->tx_lock);
102}
103
104static void htc_tx_complete(struct htc_endpoint *endpoint,
105 struct list_head *txq)
106{
107 if (list_empty(txq))
108 return;
109
110 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
111 "send complete ep %d, (%d pkts)\n",
112 endpoint->eid, get_queue_depth(txq));
113
114 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
115}
116
117static void htc_tx_comp_handler(struct htc_target *target,
118 struct htc_packet *packet)
119{
120 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
121 struct list_head container;
122
123 htc_tx_comp_update(target, endpoint, packet);
124 INIT_LIST_HEAD(&container);
125 list_add_tail(&packet->list, &container);
126 /* do completion */
127 htc_tx_complete(endpoint, &container);
128}
129
130static void htc_async_tx_scat_complete(struct htc_target *target,
131 struct hif_scatter_req *scat_req)
132{
133 struct htc_endpoint *endpoint;
134 struct htc_packet *packet;
135 struct list_head tx_compq;
136 int i;
137
138 INIT_LIST_HEAD(&tx_compq);
139
140 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
141 "htc_async_tx_scat_complete total len: %d entries: %d\n",
142 scat_req->len, scat_req->scat_entries);
143
144 if (scat_req->status)
145 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
146
147 packet = scat_req->scat_list[0].packet;
148 endpoint = &target->endpoint[packet->endpoint];
149
150 /* walk through the scatter list and process */
151 for (i = 0; i < scat_req->scat_entries; i++) {
152 packet = scat_req->scat_list[i].packet;
153 if (!packet) {
154 WARN_ON(1);
155 return;
156 }
157
158 packet->status = scat_req->status;
159 htc_tx_comp_update(target, endpoint, packet);
160 list_add_tail(&packet->list, &tx_compq);
161 }
162
163 /* free scatter request */
164 hif_scatter_req_add(target->dev->ar, scat_req);
165
166 /* complete all packets */
167 htc_tx_complete(endpoint, &tx_compq);
168}
169
170static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
171{
172 int status;
173 bool sync = false;
174 u32 padded_len, send_len;
175
176 if (!packet->completion)
177 sync = true;
178
179 send_len = packet->act_len + HTC_HDR_LENGTH;
180
181 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
182 __func__, send_len, sync ? "sync" : "async");
183
184 padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
185
186 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
187 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
188 padded_len,
189 target->dev->ar->mbox_info.htc_addr,
190 sync ? "sync" : "async");
191
192 if (sync) {
193 status = hif_read_write_sync(target->dev->ar,
194 target->dev->ar->mbox_info.htc_addr,
195 packet->buf, padded_len,
196 HIF_WR_SYNC_BLOCK_INC);
197
198 packet->status = status;
199 packet->buf += HTC_HDR_LENGTH;
200 } else
201 status = hif_write_async(target->dev->ar,
202 target->dev->ar->mbox_info.htc_addr,
203 packet->buf, padded_len,
204 HIF_WR_ASYNC_BLOCK_INC, packet);
205
206 return status;
207}
208
209static int htc_check_credits(struct htc_target *target,
210 struct htc_endpoint *ep, u8 *flags,
211 enum htc_endpoint_id eid, unsigned int len,
212 int *req_cred)
213{
214
215 *req_cred = (len > target->tgt_cred_sz) ?
216 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
217
218 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
219 *req_cred, ep->cred_dist.credits);
220
221 if (ep->cred_dist.credits < *req_cred) {
222 if (eid == ENDPOINT_0)
223 return -EINVAL;
224
225 /* Seek more credits */
226 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
227
228 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
229 target->cred_dist_cntxt, &ep->cred_dist);
230
231 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
232
233 ep->cred_dist.seek_cred = 0;
234
235 if (ep->cred_dist.credits < *req_cred) {
236 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
237 "not enough credits for ep %d - leaving packet in queue\n",
238 eid);
239 return -EINVAL;
240 }
241 }
242
243 ep->cred_dist.credits -= *req_cred;
244 ep->ep_st.cred_cosumd += *req_cred;
245
246 /* When we are getting low on credits, ask for more */
247 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
248 ep->cred_dist.seek_cred =
249 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
250
251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
252 target->cred_dist_cntxt, &ep->cred_dist);
253
254 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
255
256 /* see if we were successful in getting more */
257 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
258 /* tell the target we need credits ASAP! */
259 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
260 ep->ep_st.cred_low_indicate += 1;
261 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
262 }
263 }
264
265 return 0;
266}
267
268static void htc_tx_pkts_get(struct htc_target *target,
269 struct htc_endpoint *endpoint,
270 struct list_head *queue)
271{
272 int req_cred;
273 u8 flags;
274 struct htc_packet *packet;
275 unsigned int len;
276
277 while (true) {
278
279 flags = 0;
280
281 if (list_empty(&endpoint->txq))
282 break;
283 packet = list_first_entry(&endpoint->txq, struct htc_packet,
284 list);
285
286 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
287 "got head pkt:0x%p , queue depth: %d\n",
288 packet, get_queue_depth(&endpoint->txq));
289
290 len = CALC_TXRX_PADDED_LEN(target,
291 packet->act_len + HTC_HDR_LENGTH);
292
293 if (htc_check_credits(target, endpoint, &flags,
294 packet->endpoint, len, &req_cred))
295 break;
296
297 /* now we can fully move onto caller's queue */
298 packet = list_first_entry(&endpoint->txq, struct htc_packet,
299 list);
300 list_move_tail(&packet->list, queue);
301
302 /* save the number of credits this packet consumed */
303 packet->info.tx.cred_used = req_cred;
304
305 /* all TX packets are handled asynchronously */
306 packet->completion = htc_tx_comp_handler;
307 packet->context = target;
308 endpoint->ep_st.tx_issued += 1;
309
310 /* save send flags */
311 packet->info.tx.flags = flags;
312 packet->info.tx.seqno = endpoint->seqno;
313 endpoint->seqno++;
314 }
315}
316
317/* See if the padded tx length falls on a credit boundary */
318static int htc_get_credit_padding(unsigned int cred_sz, int *len,
319 struct htc_endpoint *ep)
320{
321 int rem_cred, cred_pad;
322
323 rem_cred = *len % cred_sz;
324
325 /* No padding needed */
326 if (!rem_cred)
327 return 0;
328
329 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
330 return -1;
331
332 /*
333 * The transfer consumes a "partial" credit, this
334 * packet cannot be bundled unless we add
335 * additional "dummy" padding (max 255 bytes) to
336 * consume the entire credit.
337 */
338 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
339
340 if ((cred_pad > 0) && (cred_pad <= 255))
341 *len += cred_pad;
342 else
343 /* The amount of padding is too large, send as non-bundled */
344 return -1;
345
346 return cred_pad;
347}
348
349static int htc_setup_send_scat_list(struct htc_target *target,
350 struct htc_endpoint *endpoint,
351 struct hif_scatter_req *scat_req,
352 int n_scat,
353 struct list_head *queue)
354{
355 struct htc_packet *packet;
356 int i, len, rem_scat, cred_pad;
357 int status = 0;
358
359 rem_scat = target->max_tx_bndl_sz;
360
361 for (i = 0; i < n_scat; i++) {
362 scat_req->scat_list[i].packet = NULL;
363
364 if (list_empty(queue))
365 break;
366
367 packet = list_first_entry(queue, struct htc_packet, list);
368 len = CALC_TXRX_PADDED_LEN(target,
369 packet->act_len + HTC_HDR_LENGTH);
370
371 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
372 &len, endpoint);
373 if (cred_pad < 0) {
374 status = -EINVAL;
375 break;
376 }
377
378 if (rem_scat < len) {
379 /* exceeds what we can transfer */
380 status = -ENOSPC;
381 break;
382 }
383
384 rem_scat -= len;
385 /* now remove it from the queue */
386 packet = list_first_entry(queue, struct htc_packet, list);
387 list_del(&packet->list);
388
389 scat_req->scat_list[i].packet = packet;
390 /* prepare packet and flag message as part of a send bundle */
391 htc_prep_send_pkt(packet,
392 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
393 cred_pad, packet->info.tx.seqno);
394 scat_req->scat_list[i].buf = packet->buf;
395 scat_req->scat_list[i].len = len;
396
397 scat_req->len += len;
398 scat_req->scat_entries++;
399 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
400 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
401 i, packet, len, rem_scat);
402 }
403
404 /* Roll back scatter setup in case of any failure */
405 if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
406 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
407 packet = scat_req->scat_list[i].packet;
408 if (packet) {
409 packet->buf += HTC_HDR_LENGTH;
410 list_add(&packet->list, queue);
411 }
412 }
413 return -EINVAL;
414 }
415
416 return 0;
417}
418
419/*
420 * htc_issue_send_bundle: drain a queue and send as bundles
421 * this function may return without fully draining the queue
422 * when
423 *
424 * 1. scatter resources are exhausted
425 * 2. a message that will consume a partial credit will stop the
426 * bundling process early
427 * 3. we drop below the minimum number of messages for a bundle
428 */
429static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
430 struct list_head *queue,
431 int *sent_bundle, int *n_bundle_pkts)
432{
433 struct htc_target *target = endpoint->target;
434 struct hif_scatter_req *scat_req = NULL;
435 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
436
437 while (true) {
438 n_scat = get_queue_depth(queue);
439 n_scat = min(n_scat, target->msg_per_bndl_max);
440
441 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
442 /* not enough to bundle */
443 break;
444
445 scat_req = hif_scatter_req_get(target->dev->ar);
446
447 if (!scat_req) {
448 /* no scatter resources */
449 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
450 "no more scatter resources\n");
451 break;
452 }
453
454 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
455 n_scat);
456
457 scat_req->len = 0;
458 scat_req->scat_entries = 0;
459
460 if (htc_setup_send_scat_list(target, endpoint, scat_req,
461 n_scat, queue)) {
462 hif_scatter_req_add(target->dev->ar, scat_req);
463 break;
464 }
465
466 /* send path is always asynchronous */
467 scat_req->complete = htc_async_tx_scat_complete;
468 n_sent_bundle++;
469 tot_pkts_bundle += scat_req->scat_entries;
470
471 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
472 "send scatter total bytes: %d , entries: %d\n",
473 scat_req->len, scat_req->scat_entries);
474 ath6kldev_submit_scat_req(target->dev, scat_req, false);
475 }
476
477 *sent_bundle = n_sent_bundle;
478 *n_bundle_pkts = tot_pkts_bundle;
479 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
480 n_sent_bundle);
481
482 return;
483}
484
485static void htc_tx_from_ep_txq(struct htc_target *target,
486 struct htc_endpoint *endpoint)
487{
488 struct list_head txq;
489 struct htc_packet *packet;
490 int bundle_sent;
491 int n_pkts_bundle;
492
493 spin_lock_bh(&target->tx_lock);
494
495 endpoint->tx_proc_cnt++;
496 if (endpoint->tx_proc_cnt > 1) {
497 endpoint->tx_proc_cnt--;
498 spin_unlock_bh(&target->tx_lock);
499 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
500 return;
501 }
502
503 /*
504 * drain the endpoint TX queue for transmission as long
505 * as we have enough credits.
506 */
507 INIT_LIST_HEAD(&txq);
508
509 while (true) {
510
511 if (list_empty(&endpoint->txq))
512 break;
513
514 htc_tx_pkts_get(target, endpoint, &txq);
515
516 if (list_empty(&txq))
517 break;
518
519 spin_unlock_bh(&target->tx_lock);
520
521 bundle_sent = 0;
522 n_pkts_bundle = 0;
523
524 while (true) {
525 /* try to send a bundle on each pass */
526 if ((target->tx_bndl_enable) &&
527 (get_queue_depth(&txq) >=
528 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
529 int temp1 = 0, temp2 = 0;
530
531 htc_issue_send_bundle(endpoint, &txq,
532 &temp1, &temp2);
533 bundle_sent += temp1;
534 n_pkts_bundle += temp2;
535 }
536
537 if (list_empty(&txq))
538 break;
539
540 packet = list_first_entry(&txq, struct htc_packet,
541 list);
542 list_del(&packet->list);
543
544 htc_prep_send_pkt(packet, packet->info.tx.flags,
545 0, packet->info.tx.seqno);
546 htc_issue_send(target, packet);
547 }
548
549 spin_lock_bh(&target->tx_lock);
550
551 endpoint->ep_st.tx_bundles += bundle_sent;
552 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
553 }
554
555 endpoint->tx_proc_cnt = 0;
556 spin_unlock_bh(&target->tx_lock);
557}
558
559static bool htc_try_send(struct htc_target *target,
560 struct htc_endpoint *endpoint,
561 struct htc_packet *tx_pkt)
562{
563 struct htc_ep_callbacks ep_cb;
564 int txq_depth;
565 bool overflow = false;
566
567 ep_cb = endpoint->ep_cb;
568
569 spin_lock_bh(&target->tx_lock);
570 txq_depth = get_queue_depth(&endpoint->txq);
571 spin_unlock_bh(&target->tx_lock);
572
573 if (txq_depth >= endpoint->max_txq_depth)
574 overflow = true;
575
576 if (overflow)
577 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
578 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
579 endpoint->eid, overflow, txq_depth,
580 endpoint->max_txq_depth);
581
582 if (overflow && ep_cb.tx_full) {
583 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
584 "indicating overflowed tx packet: 0x%p\n", tx_pkt);
585
586 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
587 HTC_SEND_FULL_DROP) {
588 endpoint->ep_st.tx_dropped += 1;
589 return false;
590 }
591 }
592
593 spin_lock_bh(&target->tx_lock);
594 list_add_tail(&tx_pkt->list, &endpoint->txq);
595 spin_unlock_bh(&target->tx_lock);
596
597 htc_tx_from_ep_txq(target, endpoint);
598
599 return true;
600}
601
602static void htc_chk_ep_txq(struct htc_target *target)
603{
604 struct htc_endpoint *endpoint;
605 struct htc_endpoint_credit_dist *cred_dist;
606
607 /*
608 * Run through the credit distribution list to see if there are
609 * packets queued. NOTE: no locks need to be taken since the
610 * distribution list is not dynamic (cannot be re-ordered) and we
611 * are not modifying any state.
612 */
613 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
614 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
615
616 spin_lock_bh(&target->tx_lock);
617 if (!list_empty(&endpoint->txq)) {
618 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
619 "ep %d has %d credits and %d packets in tx queue\n",
620 cred_dist->endpoint,
621 endpoint->cred_dist.credits,
622 get_queue_depth(&endpoint->txq));
623 spin_unlock_bh(&target->tx_lock);
624 /*
625 * Try to start the stalled queue, this list is
626 * ordered by priority. If there are credits
627 * available the highest priority queue will get a
628 * chance to reclaim credits from lower priority
629 * ones.
630 */
631 htc_tx_from_ep_txq(target, endpoint);
632 spin_lock_bh(&target->tx_lock);
633 }
634 spin_unlock_bh(&target->tx_lock);
635 }
636}
637
638static int htc_setup_tx_complete(struct htc_target *target)
639{
640 struct htc_packet *send_pkt = NULL;
641 int status;
642
643 send_pkt = htc_get_control_buf(target, true);
644
645 if (!send_pkt)
646 return -ENOMEM;
647
648 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
649 struct htc_setup_comp_ext_msg *setup_comp_ext;
650 u32 flags = 0;
651
652 setup_comp_ext =
653 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
654 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
655 setup_comp_ext->msg_id =
656 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
657
658 if (target->msg_per_bndl_max > 0) {
659 /* Indicate HTC bundling to the target */
660 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
661 setup_comp_ext->msg_per_rxbndl =
662 target->msg_per_bndl_max;
663 }
664
665 memcpy(&setup_comp_ext->flags, &flags,
666 sizeof(setup_comp_ext->flags));
667 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
668 sizeof(struct htc_setup_comp_ext_msg),
669 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
670
671 } else {
672 struct htc_setup_comp_msg *setup_comp;
673 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
674 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
675 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
676 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
677 sizeof(struct htc_setup_comp_msg),
678 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
679 }
680
681 /* we want synchronous operation */
682 send_pkt->completion = NULL;
683 htc_prep_send_pkt(send_pkt, 0, 0, 0);
684 status = htc_issue_send(target, send_pkt);
685
686 if (send_pkt != NULL)
687 htc_reclaim_txctrl_buf(target, send_pkt);
688
689 return status;
690}
691
692void htc_set_credit_dist(struct htc_target *target,
693 struct htc_credit_state_info *cred_dist_cntxt,
694 u16 srvc_pri_order[], int list_len)
695{
696 struct htc_endpoint *endpoint;
697 int i, ep;
698
699 target->cred_dist_cntxt = cred_dist_cntxt;
700
701 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
702 &target->cred_dist_list);
703
704 for (i = 0; i < list_len; i++) {
705 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
706 endpoint = &target->endpoint[ep];
707 if (endpoint->svc_id == srvc_pri_order[i]) {
708 list_add_tail(&endpoint->cred_dist.list,
709 &target->cred_dist_list);
710 break;
711 }
712 }
713 if (ep >= ENDPOINT_MAX) {
714 WARN_ON(1);
715 return;
716 }
717 }
718}
719
720int htc_tx(struct htc_target *target, struct htc_packet *packet)
721{
722 struct htc_endpoint *endpoint;
723 struct list_head queue;
724
725 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
726 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
727 packet->endpoint, packet->buf, packet->act_len);
728
729 if (packet->endpoint >= ENDPOINT_MAX) {
730 WARN_ON(1);
731 return -EINVAL;
732 }
733
734 endpoint = &target->endpoint[packet->endpoint];
735
736 if (!htc_try_send(target, endpoint, packet)) {
737 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
738 -ECANCELED : -ENOSPC;
739 INIT_LIST_HEAD(&queue);
740 list_add(&packet->list, &queue);
741 htc_tx_complete(endpoint, &queue);
742 }
743
744 return 0;
745}
746
747/* flush endpoint TX queue */
748void htc_flush_txep(struct htc_target *target,
749 enum htc_endpoint_id eid, u16 tag)
750{
751 struct htc_packet *packet, *tmp_pkt;
752 struct list_head discard_q, container;
753 struct htc_endpoint *endpoint = &target->endpoint[eid];
754
755 if (!endpoint->svc_id) {
756 WARN_ON(1);
757 return;
758 }
759
760 /* initialize the discard queue */
761 INIT_LIST_HEAD(&discard_q);
762
763 spin_lock_bh(&target->tx_lock);
764
765 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
766 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
767 (tag == packet->info.tx.tag))
768 list_move_tail(&packet->list, &discard_q);
769 }
770
771 spin_unlock_bh(&target->tx_lock);
772
773 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
774 packet->status = -ECANCELED;
775 list_del(&packet->list);
776 ath6kl_dbg(ATH6KL_DBG_TRC,
777 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
778 packet, packet->act_len,
779 packet->endpoint, packet->info.tx.tag);
780
781 INIT_LIST_HEAD(&container);
782 list_add_tail(&packet->list, &container);
783 htc_tx_complete(endpoint, &container);
784 }
785
786}
787
788static void htc_flush_txep_all(struct htc_target *target)
789{
790 struct htc_endpoint *endpoint;
791 int i;
792
793 dump_cred_dist_stats(target);
794
795 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
796 endpoint = &target->endpoint[i];
797 if (endpoint->svc_id == 0)
798 /* not in use.. */
799 continue;
800 htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
801 }
802}
803
804void htc_indicate_activity_change(struct htc_target *target,
805 enum htc_endpoint_id eid, bool active)
806{
807 struct htc_endpoint *endpoint = &target->endpoint[eid];
808 bool dist = false;
809
810 if (endpoint->svc_id == 0) {
811 WARN_ON(1);
812 return;
813 }
814
815 spin_lock_bh(&target->tx_lock);
816
817 if (active) {
818 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
819 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
820 dist = true;
821 }
822 } else {
823 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
824 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
825 dist = true;
826 }
827 }
828
829 if (dist) {
830 endpoint->cred_dist.txq_depth =
831 get_queue_depth(&endpoint->txq);
832
833 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
834 target->cred_dist_cntxt, &target->cred_dist_list);
835
836 ath6k_credit_distribute(target->cred_dist_cntxt,
837 &target->cred_dist_list,
838 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
839 }
840
841 spin_unlock_bh(&target->tx_lock);
842
843 if (dist && !active)
844 htc_chk_ep_txq(target);
845}
846
847/* HTC Rx */
848
849static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
850 int n_look_ahds)
851{
852 endpoint->ep_st.rx_pkts++;
853 if (n_look_ahds == 1)
854 endpoint->ep_st.rx_lkahds++;
855 else if (n_look_ahds > 1)
856 endpoint->ep_st.rx_bundle_lkahd++;
857}
858
859static inline bool htc_valid_rx_frame_len(struct htc_target *target,
860 enum htc_endpoint_id eid, int len)
861{
862 return (eid == target->dev->ar->ctrl_ep) ?
863 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
864}
865
866static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
867{
868 struct list_head queue;
869
870 INIT_LIST_HEAD(&queue);
871 list_add_tail(&packet->list, &queue);
872 return htc_add_rxbuf_multiple(target, &queue);
873}
874
875static void htc_reclaim_rxbuf(struct htc_target *target,
876 struct htc_packet *packet,
877 struct htc_endpoint *ep)
878{
879 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
880 htc_rxpkt_reset(packet);
881 packet->status = -ECANCELED;
882 ep->ep_cb.rx(ep->target, packet);
883 } else {
884 htc_rxpkt_reset(packet);
885 htc_add_rxbuf((void *)(target), packet);
886 }
887}
888
889static void reclaim_rx_ctrl_buf(struct htc_target *target,
890 struct htc_packet *packet)
891{
892 spin_lock_bh(&target->htc_lock);
893 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
894 spin_unlock_bh(&target->htc_lock);
895}
896
897static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
898 u32 rx_len)
899{
900 struct ath6kl_device *dev = target->dev;
901 u32 padded_len;
902 int status;
903
904 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
905
906 if (padded_len > packet->buf_len) {
907 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
908 padded_len, rx_len, packet->buf_len);
909 return -ENOMEM;
910 }
911
912 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
913 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
914 packet, packet->info.rx.exp_hdr,
915 padded_len, dev->ar->mbox_info.htc_addr, "sync");
916
917 status = hif_read_write_sync(dev->ar,
918 dev->ar->mbox_info.htc_addr,
919 packet->buf, padded_len,
920 HIF_RD_SYNC_BLOCK_FIX);
921
922 packet->status = status;
923
924 return status;
925}
926
927/*
928 * optimization for recv packets, we can indicate a
929 * "hint" that there are more single-packets to fetch
930 * on this endpoint.
931 */
932static void set_rxpkt_indication_flag(u32 lk_ahd,
933 struct htc_endpoint *endpoint,
934 struct htc_packet *packet)
935{
936 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
937
938 if (htc_hdr->eid == packet->endpoint) {
939 if (!list_empty(&endpoint->rx_bufq))
940 packet->info.rx.indicat_flags |=
941 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
942 }
943}
944
945static void chk_rx_water_mark(struct htc_endpoint *endpoint)
946{
947 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
948
949 if (ep_cb.rx_refill_thresh > 0) {
950 spin_lock_bh(&endpoint->target->rx_lock);
951 if (get_queue_depth(&endpoint->rx_bufq)
952 < ep_cb.rx_refill_thresh) {
953 spin_unlock_bh(&endpoint->target->rx_lock);
954 ep_cb.rx_refill(endpoint->target, endpoint->eid);
955 return;
956 }
957 spin_unlock_bh(&endpoint->target->rx_lock);
958 }
959}
960
961/* This function is called with rx_lock held */
962static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
963 u32 *lk_ahds, struct list_head *queue, int n_msg)
964{
965 struct htc_packet *packet;
966 /* FIXME: type of lk_ahds can't be right */
967 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
968 struct htc_ep_callbacks ep_cb;
969 int status = 0, j, full_len;
970 bool no_recycle;
971
972 full_len = CALC_TXRX_PADDED_LEN(target,
973 le16_to_cpu(htc_hdr->payld_len) +
974 sizeof(*htc_hdr));
975
976 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
977 ath6kl_warn("Rx buffer requested with invalid length\n");
978 return -EINVAL;
979 }
980
981 ep_cb = ep->ep_cb;
982 for (j = 0; j < n_msg; j++) {
983
984 /*
985 * Reset flag, any packets allocated using the
986 * rx_alloc() API cannot be recycled on
987 * cleanup,they must be explicitly returned.
988 */
989 no_recycle = false;
990
991 if (ep_cb.rx_allocthresh &&
992 (full_len > ep_cb.rx_alloc_thresh)) {
993 ep->ep_st.rx_alloc_thresh_hit += 1;
994 ep->ep_st.rxalloc_thresh_byte +=
995 le16_to_cpu(htc_hdr->payld_len);
996
997 spin_unlock_bh(&target->rx_lock);
998 no_recycle = true;
999
1000 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1001 full_len);
1002 spin_lock_bh(&target->rx_lock);
1003 } else {
1004 /* refill handler is being used */
1005 if (list_empty(&ep->rx_bufq)) {
1006 if (ep_cb.rx_refill) {
1007 spin_unlock_bh(&target->rx_lock);
1008 ep_cb.rx_refill(ep->target, ep->eid);
1009 spin_lock_bh(&target->rx_lock);
1010 }
1011 }
1012
1013 if (list_empty(&ep->rx_bufq))
1014 packet = NULL;
1015 else {
1016 packet = list_first_entry(&ep->rx_bufq,
1017 struct htc_packet, list);
1018 list_del(&packet->list);
1019 }
1020 }
1021
1022 if (!packet) {
1023 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1024 target->ep_waiting = ep->eid;
1025 return -ENOSPC;
1026 }
1027
1028 /* clear flags */
1029 packet->info.rx.rx_flags = 0;
1030 packet->info.rx.indicat_flags = 0;
1031 packet->status = 0;
1032
1033 if (no_recycle)
1034 /*
1035 * flag that these packets cannot be
1036 * recycled, they have to be returned to
1037 * the user
1038 */
1039 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1040
1041 /* Caller needs to free this upon any failure */
1042 list_add_tail(&packet->list, queue);
1043
1044 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1045 status = -ECANCELED;
1046 break;
1047 }
1048
1049 if (j) {
1050 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1051 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1052 } else
1053 /* set expected look ahead */
1054 packet->info.rx.exp_hdr = *lk_ahds;
1055
1056 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1057 HTC_HDR_LENGTH;
1058 }
1059
1060 return status;
1061}
1062
1063static int alloc_and_prep_rxpkts(struct htc_target *target,
1064 u32 lk_ahds[], int msg,
1065 struct htc_endpoint *endpoint,
1066 struct list_head *queue)
1067{
1068 int status = 0;
1069 struct htc_packet *packet, *tmp_pkt;
1070 struct htc_frame_hdr *htc_hdr;
1071 int i, n_msg;
1072
1073 spin_lock_bh(&target->rx_lock);
1074
1075 for (i = 0; i < msg; i++) {
1076
1077 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1078
1079 if (htc_hdr->eid >= ENDPOINT_MAX) {
1080 ath6kl_err("invalid ep in look-ahead: %d\n",
1081 htc_hdr->eid);
1082 status = -ENOMEM;
1083 break;
1084 }
1085
1086 if (htc_hdr->eid != endpoint->eid) {
1087 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1088 htc_hdr->eid, endpoint->eid, i);
1089 status = -ENOMEM;
1090 break;
1091 }
1092
1093 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1094 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1095 htc_hdr->payld_len,
1096 (u32) HTC_MAX_PAYLOAD_LENGTH);
1097 status = -ENOMEM;
1098 break;
1099 }
1100
1101 if (endpoint->svc_id == 0) {
1102 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1103 status = -ENOMEM;
1104 break;
1105 }
1106
1107 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1108 /*
1109 * HTC header indicates that every packet to follow
1110 * has the same padded length so that it can be
1111 * optimally fetched as a full bundle.
1112 */
1113 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1114 HTC_FLG_RX_BNDL_CNT_S;
1115
1116 /* the count doesn't include the starter frame */
1117 n_msg++;
1118 if (n_msg > target->msg_per_bndl_max) {
1119 status = -ENOMEM;
1120 break;
1121 }
1122
1123 endpoint->ep_st.rx_bundle_from_hdr += 1;
1124 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1125 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1126 n_msg);
1127 } else
1128 /* HTC header only indicates 1 message to fetch */
1129 n_msg = 1;
1130
1131 /* Setup packet buffers for each message */
1132 status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
1133 n_msg);
1134
1135 /*
1136 * This is due to unavailabilty of buffers to rx entire data.
1137 * Return no error so that free buffers from queue can be used
1138 * to receive partial data.
1139 */
1140 if (status == -ENOSPC) {
1141 spin_unlock_bh(&target->rx_lock);
1142 return 0;
1143 }
1144
1145 if (status)
1146 break;
1147 }
1148
1149 spin_unlock_bh(&target->rx_lock);
1150
1151 if (status) {
1152 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1153 list_del(&packet->list);
1154 htc_reclaim_rxbuf(target, packet,
1155 &target->endpoint[packet->endpoint]);
1156 }
1157 }
1158
1159 return status;
1160}
1161
1162static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1163{
1164 if (packets->endpoint != ENDPOINT_0) {
1165 WARN_ON(1);
1166 return;
1167 }
1168
1169 if (packets->status == -ECANCELED) {
1170 reclaim_rx_ctrl_buf(context, packets);
1171 return;
1172 }
1173
1174 if (packets->act_len > 0) {
1175 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1176 packets->act_len + HTC_HDR_LENGTH);
1177
1178 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1179 "Unexpected ENDPOINT 0 Message",
1180 packets->buf - HTC_HDR_LENGTH,
1181 packets->act_len + HTC_HDR_LENGTH);
1182 }
1183
1184 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1185}
1186
1187static void htc_proc_cred_rpt(struct htc_target *target,
1188 struct htc_credit_report *rpt,
1189 int n_entries,
1190 enum htc_endpoint_id from_ep)
1191{
1192 struct htc_endpoint *endpoint;
1193 int tot_credits = 0, i;
1194 bool dist = false;
1195
1196 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1197 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1198
1199 spin_lock_bh(&target->tx_lock);
1200
1201 for (i = 0; i < n_entries; i++, rpt++) {
1202 if (rpt->eid >= ENDPOINT_MAX) {
1203 WARN_ON(1);
1204 spin_unlock_bh(&target->tx_lock);
1205 return;
1206 }
1207
1208 endpoint = &target->endpoint[rpt->eid];
1209
1210 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1211 rpt->eid, rpt->credits);
1212
1213 endpoint->ep_st.tx_cred_rpt += 1;
1214 endpoint->ep_st.cred_retnd += rpt->credits;
1215
1216 if (from_ep == rpt->eid) {
1217 /*
1218 * This credit report arrived on the same endpoint
1219 * indicating it arrived in an RX packet.
1220 */
1221 endpoint->ep_st.cred_from_rx += rpt->credits;
1222 endpoint->ep_st.cred_rpt_from_rx += 1;
1223 } else if (from_ep == ENDPOINT_0) {
1224 /* credit arrived on endpoint 0 as a NULL message */
1225 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1226 endpoint->ep_st.cred_rpt_ep0 += 1;
1227 } else {
1228 endpoint->ep_st.cred_from_other += rpt->credits;
1229 endpoint->ep_st.cred_rpt_from_other += 1;
1230 }
1231
1232 if (rpt->eid == ENDPOINT_0)
1233 /* always give endpoint 0 credits back */
1234 endpoint->cred_dist.credits += rpt->credits;
1235 else {
1236 endpoint->cred_dist.cred_to_dist += rpt->credits;
1237 dist = true;
1238 }
1239
1240 /*
1241 * Refresh tx depth for distribution function that will
1242 * recover these credits NOTE: this is only valid when
1243 * there are credits to recover!
1244 */
1245 endpoint->cred_dist.txq_depth =
1246 get_queue_depth(&endpoint->txq);
1247
1248 tot_credits += rpt->credits;
1249 }
1250
1251 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1252 "report indicated %d credits to distribute\n",
1253 tot_credits);
1254
1255 if (dist) {
1256 /*
1257 * This was a credit return based on a completed send
1258 * operations note, this is done with the lock held
1259 */
1260 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1261 target->cred_dist_cntxt, &target->cred_dist_list);
1262
1263 ath6k_credit_distribute(target->cred_dist_cntxt,
1264 &target->cred_dist_list,
1265 HTC_CREDIT_DIST_SEND_COMPLETE);
1266 }
1267
1268 spin_unlock_bh(&target->tx_lock);
1269
1270 if (tot_credits)
1271 htc_chk_ep_txq(target);
1272}
1273
1274static int htc_parse_trailer(struct htc_target *target,
1275 struct htc_record_hdr *record,
1276 u8 *record_buf, u32 *next_lk_ahds,
1277 enum htc_endpoint_id endpoint,
1278 int *n_lk_ahds)
1279{
1280 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1281 struct htc_lookahead_report *lk_ahd;
1282 int len;
1283
1284 switch (record->rec_id) {
1285 case HTC_RECORD_CREDITS:
1286 len = record->len / sizeof(struct htc_credit_report);
1287 if (!len) {
1288 WARN_ON(1);
1289 return -EINVAL;
1290 }
1291
1292 htc_proc_cred_rpt(target,
1293 (struct htc_credit_report *) record_buf,
1294 len, endpoint);
1295 break;
1296 case HTC_RECORD_LOOKAHEAD:
1297 len = record->len / sizeof(*lk_ahd);
1298 if (!len) {
1299 WARN_ON(1);
1300 return -EINVAL;
1301 }
1302
1303 lk_ahd = (struct htc_lookahead_report *) record_buf;
1304 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1305 && next_lk_ahds) {
1306
1307 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1308 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1309 lk_ahd->pre_valid, lk_ahd->post_valid);
1310
1311 /* look ahead bytes are valid, copy them over */
1312 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1313
1314 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
1315 next_lk_ahds, 4);
1316
1317 *n_lk_ahds = 1;
1318 }
1319 break;
1320 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1321 len = record->len / sizeof(*bundle_lkahd_rpt);
1322 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1323 WARN_ON(1);
1324 return -EINVAL;
1325 }
1326
1327 if (next_lk_ahds) {
1328 int i;
1329
1330 bundle_lkahd_rpt =
1331 (struct htc_bundle_lkahd_rpt *) record_buf;
1332
1333 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
1334 record_buf, record->len);
1335
1336 for (i = 0; i < len; i++) {
1337 memcpy((u8 *)&next_lk_ahds[i],
1338 bundle_lkahd_rpt->lk_ahd, 4);
1339 bundle_lkahd_rpt++;
1340 }
1341
1342 *n_lk_ahds = i;
1343 }
1344 break;
1345 default:
1346 ath6kl_err("unhandled record: id:%d len:%d\n",
1347 record->rec_id, record->len);
1348 break;
1349 }
1350
1351 return 0;
1352
1353}
1354
1355static int htc_proc_trailer(struct htc_target *target,
1356 u8 *buf, int len, u32 *next_lk_ahds,
1357 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1358{
1359 struct htc_record_hdr *record;
1360 int orig_len;
1361 int status;
1362 u8 *record_buf;
1363 u8 *orig_buf;
1364
1365 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1366
1367 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
1368
1369 orig_buf = buf;
1370 orig_len = len;
1371 status = 0;
1372
1373 while (len > 0) {
1374
1375 if (len < sizeof(struct htc_record_hdr)) {
1376 status = -ENOMEM;
1377 break;
1378 }
1379 /* these are byte aligned structs */
1380 record = (struct htc_record_hdr *) buf;
1381 len -= sizeof(struct htc_record_hdr);
1382 buf += sizeof(struct htc_record_hdr);
1383
1384 if (record->len > len) {
1385 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1386 record->len, record->rec_id, len);
1387 status = -ENOMEM;
1388 break;
1389 }
1390 record_buf = buf;
1391
1392 status = htc_parse_trailer(target, record, record_buf,
1393 next_lk_ahds, endpoint, n_lk_ahds);
1394
1395 if (status)
1396 break;
1397
1398 /* advance buffer past this record for next time around */
1399 buf += record->len;
1400 len -= record->len;
1401 }
1402
1403 if (status)
1404 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
1405 orig_buf, orig_len);
1406
1407 return status;
1408}
1409
1410static int htc_proc_rxhdr(struct htc_target *target,
1411 struct htc_packet *packet,
1412 u32 *next_lkahds, int *n_lkahds)
1413{
1414 int status = 0;
1415 u16 payload_len;
1416 u32 lk_ahd;
1417 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1418
1419 if (n_lkahds != NULL)
1420 *n_lkahds = 0;
1421
1422 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
1423 packet->act_len);
1424
1425 /*
1426 * NOTE: we cannot assume the alignment of buf, so we use the safe
1427 * macros to retrieve 16 bit fields.
1428 */
1429 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1430
1431 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1432
1433 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1434 /*
1435 * Refresh the expected header and the actual length as it
1436 * was unknown when this packet was grabbed as part of the
1437 * bundle.
1438 */
1439 packet->info.rx.exp_hdr = lk_ahd;
1440 packet->act_len = payload_len + HTC_HDR_LENGTH;
1441
1442 /* validate the actual header that was refreshed */
1443 if (packet->act_len > packet->buf_len) {
1444 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1445 payload_len, lk_ahd);
1446 /*
1447 * Limit this to max buffer just to print out some
1448 * of the buffer.
1449 */
1450 packet->act_len = min(packet->act_len, packet->buf_len);
1451 status = -ENOMEM;
1452 goto fail_rx;
1453 }
1454
1455 if (packet->endpoint != htc_hdr->eid) {
1456 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1457 htc_hdr->eid, packet->endpoint);
1458 status = -ENOMEM;
1459 goto fail_rx;
1460 }
1461 }
1462
1463 if (lk_ahd != packet->info.rx.exp_hdr) {
1464 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1465 packet, packet->info.rx.rx_flags);
1466 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
1467 &packet->info.rx.exp_hdr, 4);
1468 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
1469 (u8 *)&lk_ahd, sizeof(lk_ahd));
1470 status = -ENOMEM;
1471 goto fail_rx;
1472 }
1473
1474 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1475 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1476 htc_hdr->ctrl[0] > payload_len) {
1477 ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1478 payload_len, htc_hdr->ctrl[0]);
1479 status = -ENOMEM;
1480 goto fail_rx;
1481 }
1482
1483 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1484 next_lkahds = NULL;
1485 n_lkahds = NULL;
1486 }
1487
1488 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1489 + payload_len - htc_hdr->ctrl[0],
1490 htc_hdr->ctrl[0], next_lkahds,
1491 n_lkahds, packet->endpoint);
1492
1493 if (status)
1494 goto fail_rx;
1495
1496 packet->act_len -= htc_hdr->ctrl[0];
1497 }
1498
1499 packet->buf += HTC_HDR_LENGTH;
1500 packet->act_len -= HTC_HDR_LENGTH;
1501
1502fail_rx:
1503 if (status)
1504 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
1505 packet->buf,
1506 packet->act_len < 256 ? packet->act_len : 256);
1507 else {
1508 if (packet->act_len > 0)
1509 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1510 "HTC - Application Msg",
1511 packet->buf, packet->act_len);
1512 }
1513
1514 return status;
1515}
1516
1517static void do_rx_completion(struct htc_endpoint *endpoint,
1518 struct htc_packet *packet)
1519{
1520 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1521 "htc calling ep %d recv callback on packet 0x%p\n",
1522 endpoint->eid, packet);
1523 endpoint->ep_cb.rx(endpoint->target, packet);
1524}
1525
1526static int htc_issue_rxpkt_bundle(struct htc_target *target,
1527 struct list_head *rxq,
1528 struct list_head *sync_compq,
1529 int *n_pkt_fetched, bool part_bundle)
1530{
1531 struct hif_scatter_req *scat_req;
1532 struct htc_packet *packet;
1533 int rem_space = target->max_rx_bndl_sz;
1534 int n_scat_pkt, status = 0, i, len;
1535
1536 n_scat_pkt = get_queue_depth(rxq);
1537 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1538
1539 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1540 /*
1541 * We were forced to split this bundle receive operation
1542 * all packets in this partial bundle must have their
1543 * lookaheads ignored.
1544 */
1545 part_bundle = true;
1546
1547 /*
1548 * This would only happen if the target ignored our max
1549 * bundle limit.
1550 */
1551 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1552 get_queue_depth(rxq), n_scat_pkt);
1553 }
1554
1555 len = 0;
1556
1557 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1558 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1559 get_queue_depth(rxq), n_scat_pkt);
1560
1561 scat_req = hif_scatter_req_get(target->dev->ar);
1562
1563 if (scat_req == NULL)
1564 goto fail_rx_pkt;
1565
1566 for (i = 0; i < n_scat_pkt; i++) {
1567 int pad_len;
1568
1569 packet = list_first_entry(rxq, struct htc_packet, list);
1570 list_del(&packet->list);
1571
1572 pad_len = CALC_TXRX_PADDED_LEN(target,
1573 packet->act_len);
1574
1575 if ((rem_space - pad_len) < 0) {
1576 list_add(&packet->list, rxq);
1577 break;
1578 }
1579
1580 rem_space -= pad_len;
1581
1582 if (part_bundle || (i < (n_scat_pkt - 1)))
1583 /*
1584 * Packet 0..n-1 cannot be checked for look-aheads
1585 * since we are fetching a bundle the last packet
1586 * however can have it's lookahead used
1587 */
1588 packet->info.rx.rx_flags |=
1589 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1590
1591 /* NOTE: 1 HTC packet per scatter entry */
1592 scat_req->scat_list[i].buf = packet->buf;
1593 scat_req->scat_list[i].len = pad_len;
1594
1595 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1596
1597 list_add_tail(&packet->list, sync_compq);
1598
1599 WARN_ON(!scat_req->scat_list[i].len);
1600 len += scat_req->scat_list[i].len;
1601 }
1602
1603 scat_req->len = len;
1604 scat_req->scat_entries = i;
1605
1606 status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1607
1608 if (!status)
1609 *n_pkt_fetched = i;
1610
1611 /* free scatter request */
1612 hif_scatter_req_add(target->dev->ar, scat_req);
1613
1614fail_rx_pkt:
1615
1616 return status;
1617}
1618
1619static int htc_proc_fetched_rxpkts(struct htc_target *target,
1620 struct list_head *comp_pktq, u32 lk_ahds[],
1621 int *n_lk_ahd)
1622{
1623 struct htc_packet *packet, *tmp_pkt;
1624 struct htc_endpoint *ep;
1625 int status = 0;
1626
1627 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1628 list_del(&packet->list);
1629 ep = &target->endpoint[packet->endpoint];
1630
1631 /* process header for each of the recv packet */
1632 status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
1633 if (status)
1634 return status;
1635
1636 if (list_empty(comp_pktq)) {
1637 /*
1638 * Last packet's more packet flag is set
1639 * based on the lookahead.
1640 */
1641 if (*n_lk_ahd > 0)
1642 set_rxpkt_indication_flag(lk_ahds[0],
1643 ep, packet);
1644 } else
1645 /*
1646 * Packets in a bundle automatically have
1647 * this flag set.
1648 */
1649 packet->info.rx.indicat_flags |=
1650 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1651
1652 htc_update_rx_stats(ep, *n_lk_ahd);
1653
1654 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1655 ep->ep_st.rx_bundl += 1;
1656
1657 do_rx_completion(ep, packet);
1658 }
1659
1660 return status;
1661}
1662
1663static int htc_fetch_rxpkts(struct htc_target *target,
1664 struct list_head *rx_pktq,
1665 struct list_head *comp_pktq)
1666{
1667 int fetched_pkts;
1668 bool part_bundle = false;
1669 int status = 0;
1670
1671 /* now go fetch the list of HTC packets */
1672 while (!list_empty(rx_pktq)) {
1673 fetched_pkts = 0;
1674
1675 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1676 /*
1677 * There are enough packets to attempt a
1678 * bundle transfer and recv bundling is
1679 * allowed.
1680 */
1681 status = htc_issue_rxpkt_bundle(target, rx_pktq,
1682 comp_pktq,
1683 &fetched_pkts,
1684 part_bundle);
1685 if (status)
1686 return status;
1687
1688 if (!list_empty(rx_pktq))
1689 part_bundle = true;
1690 }
1691
1692 if (!fetched_pkts) {
1693 struct htc_packet *packet;
1694
1695 packet = list_first_entry(rx_pktq, struct htc_packet,
1696 list);
1697
1698 list_del(&packet->list);
1699
1700 /* fully synchronous */
1701 packet->completion = NULL;
1702
1703 if (!list_empty(rx_pktq))
1704 /*
1705 * look_aheads in all packet
1706 * except the last one in the
1707 * bundle must be ignored
1708 */
1709 packet->info.rx.rx_flags |=
1710 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1711
1712 /* go fetch the packet */
1713 status = dev_rx_pkt(target, packet, packet->act_len);
1714 if (status)
1715 return status;
1716
1717 list_add_tail(&packet->list, comp_pktq);
1718 }
1719 }
1720
1721 return status;
1722}
1723
1724int htc_rxmsg_pending_handler(struct htc_target *target, u32 msg_look_ahead[],
1725 int *num_pkts)
1726{
1727 struct htc_packet *packets, *tmp_pkt;
1728 struct htc_endpoint *endpoint;
1729 struct list_head rx_pktq, comp_pktq;
1730 int status = 0;
1731 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1732 int num_look_ahead = 1;
1733 enum htc_endpoint_id id;
1734 int n_fetched = 0;
1735
1736 *num_pkts = 0;
1737
1738 /*
1739 * On first entry copy the look_aheads into our temp array for
1740 * processing
1741 */
1742 memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1743
1744 while (true) {
1745
1746 /*
1747 * First lookahead sets the expected endpoint IDs for all
1748 * packets in a bundle.
1749 */
1750 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1751 endpoint = &target->endpoint[id];
1752
1753 if (id >= ENDPOINT_MAX) {
1754 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1755 id);
1756 status = -ENOMEM;
1757 break;
1758 }
1759
1760 INIT_LIST_HEAD(&rx_pktq);
1761 INIT_LIST_HEAD(&comp_pktq);
1762
1763 /*
1764 * Try to allocate as many HTC RX packets indicated by the
1765 * look_aheads.
1766 */
1767 status = alloc_and_prep_rxpkts(target, look_aheads,
1768 num_look_ahead, endpoint,
1769 &rx_pktq);
1770 if (status)
1771 break;
1772
1773 if (get_queue_depth(&rx_pktq) >= 2)
1774 /*
1775 * A recv bundle was detected, force IRQ status
1776 * re-check again
1777 */
1778 target->chk_irq_status_cnt = 1;
1779
1780 n_fetched += get_queue_depth(&rx_pktq);
1781
1782 num_look_ahead = 0;
1783
1784 status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
1785
1786 if (!status)
1787 chk_rx_water_mark(endpoint);
1788
1789 /* Process fetched packets */
1790 status = htc_proc_fetched_rxpkts(target, &comp_pktq,
1791 look_aheads, &num_look_ahead);
1792
1793 if (!num_look_ahead || status)
1794 break;
1795
1796 /*
1797 * For SYNCH processing, if we get here, we are running
1798 * through the loop again due to a detected lookahead. Set
1799 * flag that we should re-check IRQ status registers again
1800 * before leaving IRQ processing, this can net better
1801 * performance in high throughput situations.
1802 */
1803 target->chk_irq_status_cnt = 1;
1804 }
1805
1806 if (status) {
1807 ath6kl_err("failed to get pending recv messages: %d\n",
1808 status);
1809 /*
1810 * Cleanup any packets we allocated but didn't use to
1811 * actually fetch any packets.
1812 */
1813 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1814 list_del(&packets->list);
1815 htc_reclaim_rxbuf(target, packets,
1816 &target->endpoint[packets->endpoint]);
1817 }
1818
1819 /* cleanup any packets in sync completion queue */
1820 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1821 list_del(&packets->list);
1822 htc_reclaim_rxbuf(target, packets,
1823 &target->endpoint[packets->endpoint]);
1824 }
1825
1826 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1827 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1828 ath6kldev_rx_control(target->dev, false);
1829 }
1830 }
1831
1832 /*
1833 * Before leaving, check to see if host ran out of buffers and
1834 * needs to stop the receiver.
1835 */
1836 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1837 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1838 ath6kldev_rx_control(target->dev, false);
1839 }
1840 *num_pkts = n_fetched;
1841
1842 return status;
1843}
1844
1845/*
1846 * Synchronously wait for a control message from the target,
1847 * This function is used at initialization time ONLY. At init messages
1848 * on ENDPOINT 0 are expected.
1849 */
1850static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1851{
1852 struct htc_packet *packet = NULL;
1853 struct htc_frame_hdr *htc_hdr;
1854 u32 look_ahead;
1855
1856 if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1857 HTC_TARGET_RESPONSE_TIMEOUT))
1858 return NULL;
1859
1860 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1861 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1862
1863 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1864
1865 if (htc_hdr->eid != ENDPOINT_0)
1866 return NULL;
1867
1868 packet = htc_get_control_buf(target, false);
1869
1870 if (!packet)
1871 return NULL;
1872
1873 packet->info.rx.rx_flags = 0;
1874 packet->info.rx.exp_hdr = look_ahead;
1875 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1876
1877 if (packet->act_len > packet->buf_len)
1878 goto fail_ctrl_rx;
1879
1880 /* we want synchronous operation */
1881 packet->completion = NULL;
1882
1883 /* get the message from the device, this will block */
1884 if (dev_rx_pkt(target, packet, packet->act_len))
1885 goto fail_ctrl_rx;
1886
1887 /* process receive header */
1888 packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
1889
1890 if (packet->status) {
1891 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1892 packet->status);
1893 goto fail_ctrl_rx;
1894 }
1895
1896 return packet;
1897
1898fail_ctrl_rx:
1899 if (packet != NULL) {
1900 htc_rxpkt_reset(packet);
1901 reclaim_rx_ctrl_buf(target, packet);
1902 }
1903
1904 return NULL;
1905}
1906
1907int htc_add_rxbuf_multiple(struct htc_target *target,
1908 struct list_head *pkt_queue)
1909{
1910 struct htc_endpoint *endpoint;
1911 struct htc_packet *first_pkt;
1912 bool rx_unblock = false;
1913 int status = 0, depth;
1914
1915 if (list_empty(pkt_queue))
1916 return -ENOMEM;
1917
1918 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1919
1920 if (first_pkt->endpoint >= ENDPOINT_MAX)
1921 return status;
1922
1923 depth = get_queue_depth(pkt_queue);
1924
1925 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1926 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1927 first_pkt->endpoint, depth, first_pkt->buf_len);
1928
1929 endpoint = &target->endpoint[first_pkt->endpoint];
1930
1931 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1932 struct htc_packet *packet, *tmp_pkt;
1933
1934 /* walk through queue and mark each one canceled */
1935 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1936 packet->status = -ECANCELED;
1937 list_del(&packet->list);
1938 do_rx_completion(endpoint, packet);
1939 }
1940
1941 return status;
1942 }
1943
1944 spin_lock_bh(&target->rx_lock);
1945
1946 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1947
1948 /* check if we are blocked waiting for a new buffer */
1949 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1950 if (target->ep_waiting == first_pkt->endpoint) {
1951 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1952 "receiver was blocked on ep:%d, unblocking.\n",
1953 target->ep_waiting);
1954 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1955 target->ep_waiting = ENDPOINT_MAX;
1956 rx_unblock = true;
1957 }
1958 }
1959
1960 spin_unlock_bh(&target->rx_lock);
1961
1962 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1963 /* TODO : implement a buffer threshold count? */
1964 ath6kldev_rx_control(target->dev, true);
1965
1966 return status;
1967}
1968
1969void htc_flush_rx_buf(struct htc_target *target)
1970{
1971 struct htc_endpoint *endpoint;
1972 struct htc_packet *packet, *tmp_pkt;
1973 int i;
1974
1975 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1976 endpoint = &target->endpoint[i];
1977 if (!endpoint->svc_id)
1978 /* not in use.. */
1979 continue;
1980
1981 spin_lock_bh(&target->rx_lock);
1982 list_for_each_entry_safe(packet, tmp_pkt,
1983 &endpoint->rx_bufq, list) {
1984 list_del(&packet->list);
1985 spin_unlock_bh(&target->rx_lock);
1986 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1987 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
1988 packet, packet->buf_len,
1989 packet->endpoint);
1990 dev_kfree_skb(packet->pkt_cntxt);
1991 spin_lock_bh(&target->rx_lock);
1992 }
1993 spin_unlock_bh(&target->rx_lock);
1994 }
1995}
1996
1997int htc_conn_service(struct htc_target *target,
1998 struct htc_service_connect_req *conn_req,
1999 struct htc_service_connect_resp *conn_resp)
2000{
2001 struct htc_packet *rx_pkt = NULL;
2002 struct htc_packet *tx_pkt = NULL;
2003 struct htc_conn_service_resp *resp_msg;
2004 struct htc_conn_service_msg *conn_msg;
2005 struct htc_endpoint *endpoint;
2006 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2007 unsigned int max_msg_sz = 0;
2008 int status = 0;
2009
2010 ath6kl_dbg(ATH6KL_DBG_TRC,
2011 "htc_conn_service, target:0x%p service id:0x%X\n",
2012 target, conn_req->svc_id);
2013
2014 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2015 /* special case for pseudo control service */
2016 assigned_ep = ENDPOINT_0;
2017 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2018 } else {
2019 /* allocate a packet to send to the target */
2020 tx_pkt = htc_get_control_buf(target, true);
2021
2022 if (!tx_pkt)
2023 return -ENOMEM;
2024
2025 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2026 memset(conn_msg, 0, sizeof(*conn_msg));
2027 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2028 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2029 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2030
2031 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2032 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2033 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2034
2035 /* we want synchronous operation */
2036 tx_pkt->completion = NULL;
2037 htc_prep_send_pkt(tx_pkt, 0, 0, 0);
2038 status = htc_issue_send(target, tx_pkt);
2039
2040 if (status)
2041 goto fail_tx;
2042
2043 /* wait for response */
2044 rx_pkt = htc_wait_for_ctrl_msg(target);
2045
2046 if (!rx_pkt) {
2047 status = -ENOMEM;
2048 goto fail_tx;
2049 }
2050
2051 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2052
2053 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2054 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2055 status = -ENOMEM;
2056 goto fail_tx;
2057 }
2058
2059 conn_resp->resp_code = resp_msg->status;
2060 /* check response status */
2061 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2062 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2063 resp_msg->svc_id, resp_msg->status);
2064 status = -ENOMEM;
2065 goto fail_tx;
2066 }
2067
2068 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2069 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2070 }
2071
2072 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2073 status = -ENOMEM;
2074 goto fail_tx;
2075 }
2076
2077 endpoint = &target->endpoint[assigned_ep];
2078 endpoint->eid = assigned_ep;
2079 if (endpoint->svc_id) {
2080 status = -ENOMEM;
2081 goto fail_tx;
2082 }
2083
2084 /* return assigned endpoint to caller */
2085 conn_resp->endpoint = assigned_ep;
2086 conn_resp->len_max = max_msg_sz;
2087
2088 /* setup the endpoint */
2089
2090 /* this marks the endpoint in use */
2091 endpoint->svc_id = conn_req->svc_id;
2092
2093 endpoint->max_txq_depth = conn_req->max_txq_depth;
2094 endpoint->len_max = max_msg_sz;
2095 endpoint->ep_cb = conn_req->ep_cb;
2096 endpoint->cred_dist.svc_id = conn_req->svc_id;
2097 endpoint->cred_dist.htc_rsvd = endpoint;
2098 endpoint->cred_dist.endpoint = assigned_ep;
2099 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2100
2101 if (conn_req->max_rxmsg_sz) {
2102 /*
2103 * Override cred_per_msg calculation, this optimizes
2104 * the credit-low indications since the host will actually
2105 * issue smaller messages in the Send path.
2106 */
2107 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2108 status = -ENOMEM;
2109 goto fail_tx;
2110 }
2111 endpoint->cred_dist.cred_per_msg =
2112 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2113 } else
2114 endpoint->cred_dist.cred_per_msg =
2115 max_msg_sz / target->tgt_cred_sz;
2116
2117 if (!endpoint->cred_dist.cred_per_msg)
2118 endpoint->cred_dist.cred_per_msg = 1;
2119
2120 /* save local connection flags */
2121 endpoint->conn_flags = conn_req->flags;
2122
2123fail_tx:
2124 if (tx_pkt)
2125 htc_reclaim_txctrl_buf(target, tx_pkt);
2126
2127 if (rx_pkt) {
2128 htc_rxpkt_reset(rx_pkt);
2129 reclaim_rx_ctrl_buf(target, rx_pkt);
2130 }
2131
2132 return status;
2133}
2134
2135static void reset_ep_state(struct htc_target *target)
2136{
2137 struct htc_endpoint *endpoint;
2138 int i;
2139
2140 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2141 endpoint = &target->endpoint[i];
2142 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2143 endpoint->svc_id = 0;
2144 endpoint->len_max = 0;
2145 endpoint->max_txq_depth = 0;
2146 memset(&endpoint->ep_st, 0,
2147 sizeof(endpoint->ep_st));
2148 INIT_LIST_HEAD(&endpoint->rx_bufq);
2149 INIT_LIST_HEAD(&endpoint->txq);
2150 endpoint->target = target;
2151 }
2152
2153 /* reset distribution list */
2154 INIT_LIST_HEAD(&target->cred_dist_list);
2155}
2156
2157int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint)
2158{
2159 int num;
2160
2161 spin_lock_bh(&target->rx_lock);
2162 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2163 spin_unlock_bh(&target->rx_lock);
2164 return num;
2165}
2166
2167static void htc_setup_msg_bndl(struct htc_target *target)
2168{
2169 /* limit what HTC can handle */
2170 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2171 target->msg_per_bndl_max);
2172
2173 if (ath6kl_hif_enable_scatter(target->dev->ar)) {
2174 target->msg_per_bndl_max = 0;
2175 return;
2176 }
2177
2178 /* limit bundle what the device layer can handle */
2179 target->msg_per_bndl_max = min(target->max_scat_entries,
2180 target->msg_per_bndl_max);
2181
2182 ath6kl_dbg(ATH6KL_DBG_TRC,
2183 "htc bundling allowed. max msg per htc bundle: %d\n",
2184 target->msg_per_bndl_max);
2185
2186 /* Max rx bundle size is limited by the max tx bundle size */
2187 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq;
2188 /* Max tx bundle size if limited by the extended mbox address range */
2189 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2190 target->max_xfer_szper_scatreq);
2191
2192 ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
2193 target->max_rx_bndl_sz, target->max_tx_bndl_sz);
2194
2195 if (target->max_tx_bndl_sz)
2196 target->tx_bndl_enable = true;
2197
2198 if (target->max_rx_bndl_sz)
2199 target->rx_bndl_enable = true;
2200
2201 if ((target->tgt_cred_sz % target->block_sz) != 0) {
2202 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2203 target->tgt_cred_sz);
2204
2205 /*
2206 * Disallow send bundling since the credit size is
2207 * not aligned to a block size the I/O block
2208 * padding will spill into the next credit buffer
2209 * which is fatal.
2210 */
2211 target->tx_bndl_enable = false;
2212 }
2213}
2214
2215int htc_wait_target(struct htc_target *target)
2216{
2217 struct htc_packet *packet = NULL;
2218 struct htc_ready_ext_msg *rdy_msg;
2219 struct htc_service_connect_req connect;
2220 struct htc_service_connect_resp resp;
2221 int status;
2222
2223 /* we should be getting 1 control message that the target is ready */
2224 packet = htc_wait_for_ctrl_msg(target);
2225
2226 if (!packet)
2227 return -ENOMEM;
2228
2229 /* we controlled the buffer creation so it's properly aligned */
2230 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2231
2232 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2233 (packet->act_len < sizeof(struct htc_ready_msg))) {
2234 status = -ENOMEM;
2235 goto fail_wait_target;
2236 }
2237
2238 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2239 status = -ENOMEM;
2240 goto fail_wait_target;
2241 }
2242
2243 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2244 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2245
2246 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2247 "target ready: credits: %d credit size: %d\n",
2248 target->tgt_creds, target->tgt_cred_sz);
2249
2250 /* check if this is an extended ready message */
2251 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2252 /* this is an extended message */
2253 target->htc_tgt_ver = rdy_msg->htc_ver;
2254 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2255 } else {
2256 /* legacy */
2257 target->htc_tgt_ver = HTC_VERSION_2P0;
2258 target->msg_per_bndl_max = 0;
2259 }
2260
2261 ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2262 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2263 target->htc_tgt_ver);
2264
2265 if (target->msg_per_bndl_max > 0)
2266 htc_setup_msg_bndl(target);
2267
2268 /* setup our pseudo HTC control endpoint connection */
2269 memset(&connect, 0, sizeof(connect));
2270 memset(&resp, 0, sizeof(resp));
2271 connect.ep_cb.rx = htc_ctrl_rx;
2272 connect.ep_cb.rx_refill = NULL;
2273 connect.ep_cb.tx_full = NULL;
2274 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2275 connect.svc_id = HTC_CTRL_RSVD_SVC;
2276
2277 /* connect fake service */
2278 status = htc_conn_service((void *)target, &connect, &resp);
2279
2280 if (status)
2281 ath6kl_hif_cleanup_scatter(target->dev->ar);
2282
2283fail_wait_target:
2284 if (packet) {
2285 htc_rxpkt_reset(packet);
2286 reclaim_rx_ctrl_buf(target, packet);
2287 }
2288
2289 return status;
2290}
2291
2292/*
2293 * Start HTC, enable interrupts and let the target know
2294 * host has finished setup.
2295 */
2296int htc_start(struct htc_target *target)
2297{
2298 struct htc_packet *packet;
2299 int status;
2300
2301 /* Disable interrupts at the chip level */
2302 ath6kldev_disable_intrs(target->dev);
2303
2304 target->htc_flags = 0;
2305 target->rx_st_flags = 0;
2306
2307 /* Push control receive buffers into htc control endpoint */
2308 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2309 status = htc_add_rxbuf(target, packet);
2310 if (status)
2311 return status;
2312 }
2313
2314 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2315 ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2316 target->tgt_creds);
2317
2318 dump_cred_dist_stats(target);
2319
2320 /* Indicate to the target of the setup completion */
2321 status = htc_setup_tx_complete(target);
2322
2323 if (status)
2324 return status;
2325
2326 /* unmask interrupts */
2327 status = ath6kldev_unmask_intrs(target->dev);
2328
2329 if (status)
2330 htc_stop(target);
2331
2332 return status;
2333}
2334
2335/* htc_stop: stop interrupt reception, and flush all queued buffers */
2336void htc_stop(struct htc_target *target)
2337{
2338 spin_lock_bh(&target->htc_lock);
2339 target->htc_flags |= HTC_OP_STATE_STOPPING;
2340 spin_unlock_bh(&target->htc_lock);
2341
2342 /*
2343 * Masking interrupts is a synchronous operation, when this
2344 * function returns all pending HIF I/O has completed, we can
2345 * safely flush the queues.
2346 */
2347 ath6kldev_mask_intrs(target->dev);
2348
2349 htc_flush_txep_all(target);
2350
2351 htc_flush_rx_buf(target);
2352
2353 reset_ep_state(target);
2354}
2355
2356void *htc_create(struct ath6kl *ar)
2357{
2358 struct htc_target *target = NULL;
2359 struct htc_packet *packet;
2360 int status = 0, i = 0;
2361 u32 block_size, ctrl_bufsz;
2362
2363 target = kzalloc(sizeof(*target), GFP_KERNEL);
2364 if (!target) {
2365 ath6kl_err("unable to allocate memory\n");
2366 return NULL;
2367 }
2368
2369 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2370 if (!target->dev) {
2371 ath6kl_err("unable to allocate memory\n");
2372 status = -ENOMEM;
2373 goto fail_create_htc;
2374 }
2375
2376 spin_lock_init(&target->htc_lock);
2377 spin_lock_init(&target->rx_lock);
2378 spin_lock_init(&target->tx_lock);
2379
2380 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2381 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2382 INIT_LIST_HEAD(&target->cred_dist_list);
2383
2384 target->dev->ar = ar;
2385 target->dev->htc_cnxt = target;
2386 target->ep_waiting = ENDPOINT_MAX;
2387
2388 reset_ep_state(target);
2389
2390 status = ath6kldev_setup(target->dev);
2391
2392 if (status)
2393 goto fail_create_htc;
2394
2395 block_size = ar->mbox_info.block_size;
2396
2397 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2398 (block_size + HTC_HDR_LENGTH) :
2399 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2400
2401 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2402 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2403 if (!packet)
2404 break;
2405
2406 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2407 if (!packet->buf_start) {
2408 kfree(packet);
2409 break;
2410 }
2411
2412 packet->buf_len = ctrl_bufsz;
2413 if (i < NUM_CONTROL_RX_BUFFERS) {
2414 packet->act_len = 0;
2415 packet->buf = packet->buf_start;
2416 packet->endpoint = ENDPOINT_0;
2417 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2418 } else
2419 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2420 }
2421
2422fail_create_htc:
2423 if (i != NUM_CONTROL_BUFFERS || status) {
2424 if (target) {
2425 htc_cleanup(target);
2426 target = NULL;
2427 }
2428 }
2429
2430 return target;
2431}
2432
2433/* cleanup the HTC instance */
2434void htc_cleanup(struct htc_target *target)
2435{
2436 struct htc_packet *packet, *tmp_packet;
2437
2438 ath6kl_hif_cleanup_scatter(target->dev->ar);
2439
2440 list_for_each_entry_safe(packet, tmp_packet,
2441 &target->free_ctrl_txbuf, list) {
2442 list_del(&packet->list);
2443 kfree(packet->buf_start);
2444 kfree(packet);
2445 }
2446
2447 list_for_each_entry_safe(packet, tmp_packet,
2448 &target->free_ctrl_rxbuf, list) {
2449 list_del(&packet->list);
2450 kfree(packet->buf_start);
2451 kfree(packet);
2452 }
2453
2454 kfree(target->dev);
2455 kfree(target);
2456}
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
new file mode 100644
index 000000000000..d844d36e40cf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -0,0 +1,604 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_H
18#define HTC_H
19
20#include "common.h"
21
22/* frame header flags */
23
24/* send direction */
25#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
26#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
27
28/* receive direction */
29#define HTC_FLG_RX_UNUSED (1 << 0)
30#define HTC_FLG_RX_TRAILER (1 << 1)
31/* Bundle count maske and shift */
32#define HTC_FLG_RX_BNDL_CNT (0xF0)
33#define HTC_FLG_RX_BNDL_CNT_S 4
34
35#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
36#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
37
38/* HTC control message IDs */
39
40#define HTC_MSG_READY_ID 1
41#define HTC_MSG_CONN_SVC_ID 2
42#define HTC_MSG_CONN_SVC_RESP_ID 3
43#define HTC_MSG_SETUP_COMPLETE_ID 4
44#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
45
46#define HTC_MAX_CTRL_MSG_LEN 256
47
48#define HTC_VERSION_2P0 0x00
49#define HTC_VERSION_2P1 0x01
50
51#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
52
53#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0
54#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1
55#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
56#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
57#define HTC_CONN_FLGS_THRESH_MASK 0x3
58
59/* connect response status codes */
60#define HTC_SERVICE_SUCCESS 0
61#define HTC_SERVICE_NOT_FOUND 1
62#define HTC_SERVICE_FAILED 2
63
64/* no resources (i.e. no more endpoints) */
65#define HTC_SERVICE_NO_RESOURCES 3
66
67/* specific service is not allowing any more endpoints */
68#define HTC_SERVICE_NO_MORE_EP 4
69
70/* report record IDs */
71#define HTC_RECORD_NULL 0
72#define HTC_RECORD_CREDITS 1
73#define HTC_RECORD_LOOKAHEAD 2
74#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
75
76#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
77
78#define MAKE_SERVICE_ID(group, index) \
79 (int)(((int)group << 8) | (int)(index))
80
81/* NOTE: service ID of 0x0000 is reserved and should never be used */
82#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
83#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
84#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
85#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
86#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
87#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
88#define WMI_MAX_SERVICES 5
89
90/* reserved and used to flush ALL packets */
91#define HTC_TX_PACKET_TAG_ALL 0
92#define HTC_SERVICE_TX_PACKET_TAG 1
93#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9)
94
95/* more packets on this endpoint are being fetched */
96#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0)
97
98/* TODO.. for BMI */
99#define ENDPOINT1 0
100/* TODO -remove me, but we have to fix BMI first */
101#define HTC_MAILBOX_NUM_MAX 4
102
103/* enable send bundle padding for this endpoint */
104#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0)
105#define HTC_EP_ACTIVE ((u32) (1u << 31))
106
107/* HTC operational parameters */
108#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
109#define HTC_TARGET_DEBUG_INTR_MASK 0x01
110#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
111
112#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
113#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
114
115/* packet flags */
116
117#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
118#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
119#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
120#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
121
122#define NUM_CONTROL_BUFFERS 8
123#define NUM_CONTROL_TX_BUFFERS 2
124#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
125
126#define HTC_RECV_WAIT_BUFFERS (1 << 0)
127#define HTC_OP_STATE_STOPPING (1 << 0)
128
129/*
130 * The frame header length and message formats defined herein were selected
131 * to accommodate optimal alignment for target processing. This reduces
132 * code size and improves performance. Any changes to the header length may
133 * alter the alignment and cause exceptions on the target. When adding to
134 * the messagestructures insure that fields are properly aligned.
135 */
136
137/* HTC frame header
138 *
139 * NOTE: do not remove or re-arrange the fields, these are minimally
140 * required to take advantage of 4-byte lookaheads in some hardware
141 * implementations.
142 */
143struct htc_frame_hdr {
144 u8 eid;
145 u8 flags;
146
147 /* length of data (including trailer) that follows the header */
148 __le16 payld_len;
149
150 /* end of 4-byte lookahead */
151
152 u8 ctrl[2];
153} __packed;
154
155/* HTC ready message */
156struct htc_ready_msg {
157 __le16 msg_id;
158 __le16 cred_cnt;
159 __le16 cred_sz;
160 u8 max_ep;
161 u8 pad;
162} __packed;
163
164/* extended HTC ready message */
165struct htc_ready_ext_msg {
166 struct htc_ready_msg ver2_0_info;
167 u8 htc_ver;
168 u8 msg_per_htc_bndl;
169} __packed;
170
171/* connect service */
172struct htc_conn_service_msg {
173 __le16 msg_id;
174 __le16 svc_id;
175 __le16 conn_flags;
176 u8 svc_meta_len;
177 u8 pad;
178} __packed;
179
180/* connect response */
181struct htc_conn_service_resp {
182 __le16 msg_id;
183 __le16 svc_id;
184 u8 status;
185 u8 eid;
186 __le16 max_msg_sz;
187 u8 svc_meta_len;
188 u8 pad;
189} __packed;
190
191struct htc_setup_comp_msg {
192 __le16 msg_id;
193} __packed;
194
195/* extended setup completion message */
196struct htc_setup_comp_ext_msg {
197 __le16 msg_id;
198 __le32 flags;
199 u8 msg_per_rxbndl;
200 u8 Rsvd[3];
201} __packed;
202
203struct htc_record_hdr {
204 u8 rec_id;
205 u8 len;
206} __packed;
207
208struct htc_credit_report {
209 u8 eid;
210 u8 credits;
211} __packed;
212
213/*
214 * NOTE: The lk_ahd array is guarded by a pre_valid
215 * and Post Valid guard bytes. The pre_valid bytes must
216 * equal the inverse of the post_valid byte.
217 */
218struct htc_lookahead_report {
219 u8 pre_valid;
220 u8 lk_ahd[4];
221 u8 post_valid;
222} __packed;
223
224struct htc_bundle_lkahd_rpt {
225 u8 lk_ahd[4];
226} __packed;
227
228/* Current service IDs */
229
230enum htc_service_grp_ids {
231 RSVD_SERVICE_GROUP = 0,
232 WMI_SERVICE_GROUP = 1,
233
234 HTC_TEST_GROUP = 254,
235 HTC_SERVICE_GROUP_LAST = 255
236};
237
238/* ------ endpoint IDS ------ */
239
240enum htc_endpoint_id {
241 ENDPOINT_UNUSED = -1,
242 ENDPOINT_0 = 0,
243 ENDPOINT_1 = 1,
244 ENDPOINT_2 = 2,
245 ENDPOINT_3,
246 ENDPOINT_4,
247 ENDPOINT_5,
248 ENDPOINT_6,
249 ENDPOINT_7,
250 ENDPOINT_8,
251 ENDPOINT_MAX,
252};
253
254struct htc_tx_packet_info {
255 u16 tag;
256 int cred_used;
257 u8 flags;
258 int seqno;
259};
260
261struct htc_rx_packet_info {
262 u32 exp_hdr;
263 u32 rx_flags;
264 u32 indicat_flags;
265};
266
267struct htc_target;
268
269/* wrapper around endpoint-specific packets */
270struct htc_packet {
271 struct list_head list;
272
273 /* caller's per packet specific context */
274 void *pkt_cntxt;
275
276 /*
277 * the true buffer start , the caller can store the real
278 * buffer start here. In receive callbacks, the HTC layer
279 * sets buf to the start of the payload past the header.
280 * This field allows the caller to reset buf when it recycles
281 * receive packets back to HTC.
282 */
283 u8 *buf_start;
284
285 /*
286 * Pointer to the start of the buffer. In the transmit
287 * direction this points to the start of the payload. In the
288 * receive direction, however, the buffer when queued up
289 * points to the start of the HTC header but when returned
290 * to the caller points to the start of the payload
291 */
292 u8 *buf;
293 u32 buf_len;
294
295 /* actual length of payload */
296 u32 act_len;
297
298 /* endpoint that this packet was sent/recv'd from */
299 enum htc_endpoint_id endpoint;
300
301 /* completion status */
302
303 int status;
304 union {
305 struct htc_tx_packet_info tx;
306 struct htc_rx_packet_info rx;
307 } info;
308
309 void (*completion) (struct htc_target *, struct htc_packet *);
310 struct htc_target *context;
311};
312
313enum htc_send_full_action {
314 HTC_SEND_FULL_KEEP = 0,
315 HTC_SEND_FULL_DROP = 1,
316};
317
318struct htc_ep_callbacks {
319 void (*rx) (struct htc_target *, struct htc_packet *);
320 void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
321 enum htc_send_full_action (*tx_full) (struct htc_target *,
322 struct htc_packet *);
323 struct htc_packet *(*rx_allocthresh) (struct htc_target *,
324 enum htc_endpoint_id, int);
325 int rx_alloc_thresh;
326 int rx_refill_thresh;
327};
328
329/* service connection information */
330struct htc_service_connect_req {
331 u16 svc_id;
332 u16 conn_flags;
333 struct htc_ep_callbacks ep_cb;
334 int max_txq_depth;
335 u32 flags;
336 unsigned int max_rxmsg_sz;
337};
338
339/* service connection response information */
340struct htc_service_connect_resp {
341 u8 buf_len;
342 u8 act_len;
343 enum htc_endpoint_id endpoint;
344 unsigned int len_max;
345 u8 resp_code;
346};
347
348/* endpoint distributionstructure */
349struct htc_endpoint_credit_dist {
350 struct list_head list;
351
352 /* Service ID (set by HTC) */
353 u16 svc_id;
354
355 /* endpoint for this distributionstruct (set by HTC) */
356 enum htc_endpoint_id endpoint;
357
358 u32 dist_flags;
359
360 /*
361 * credits for normal operation, anything above this
362 * indicates the endpoint is over-subscribed.
363 */
364 int cred_norm;
365
366 /* floor for credit distribution */
367 int cred_min;
368
369 int cred_assngd;
370
371 /* current credits available */
372 int credits;
373
374 /*
375 * pending credits to distribute on this endpoint, this
376 * is set by HTC when credit reports arrive. The credit
377 * distribution functions sets this to zero when it distributes
378 * the credits.
379 */
380 int cred_to_dist;
381
382 /*
383 * the number of credits that the current pending TX packet needs
384 * to transmit. This is set by HTC when endpoint needs credits in
385 * order to transmit.
386 */
387 int seek_cred;
388
389 /* size in bytes of each credit */
390 int cred_sz;
391
392 /* credits required for a maximum sized messages */
393 int cred_per_msg;
394
395 /* reserved for HTC use */
396 void *htc_rsvd;
397
398 /*
399 * current depth of TX queue , i.e. messages waiting for credits
400 * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
401 * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
402 * that has non-zero credits to recover.
403 */
404 int txq_depth;
405};
406
407/*
408 * credit distibution code that is passed into the distrbution function,
409 * there are mandatory and optional codes that must be handled
410 */
411enum htc_credit_dist_reason {
412 HTC_CREDIT_DIST_SEND_COMPLETE = 0,
413 HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1,
414 HTC_CREDIT_DIST_SEEK_CREDITS,
415};
416
417struct htc_credit_state_info {
418 int total_avail_credits;
419 int cur_free_credits;
420 struct list_head lowestpri_ep_dist;
421};
422
423/* endpoint statistics */
424struct htc_endpoint_stats {
425 /*
426 * number of times the host set the credit-low flag in a send
427 * message on this endpoint
428 */
429 u32 cred_low_indicate;
430
431 u32 tx_issued;
432 u32 tx_pkt_bundled;
433 u32 tx_bundles;
434 u32 tx_dropped;
435
436 /* running count of total credit reports received for this endpoint */
437 u32 tx_cred_rpt;
438
439 /* credit reports received from this endpoint's RX packets */
440 u32 cred_rpt_from_rx;
441
442 /* credit reports received from RX packets of other endpoints */
443 u32 cred_rpt_from_other;
444
445 /* credit reports received from endpoint 0 RX packets */
446 u32 cred_rpt_ep0;
447
448 /* count of credits received via Rx packets on this endpoint */
449 u32 cred_from_rx;
450
451 /* count of credits received via another endpoint */
452 u32 cred_from_other;
453
454 /* count of credits received via another endpoint */
455 u32 cred_from_ep0;
456
457 /* count of consummed credits */
458 u32 cred_cosumd;
459
460 /* count of credits returned */
461 u32 cred_retnd;
462
463 u32 rx_pkts;
464
465 /* count of lookahead records found in Rx msg */
466 u32 rx_lkahds;
467
468 /* count of recv packets received in a bundle */
469 u32 rx_bundl;
470
471 /* count of number of bundled lookaheads */
472 u32 rx_bundle_lkahd;
473
474 /* count of the number of bundle indications from the HTC header */
475 u32 rx_bundle_from_hdr;
476
477 /* the number of times the recv allocation threshold was hit */
478 u32 rx_alloc_thresh_hit;
479
480 /* total number of bytes */
481 u32 rxalloc_thresh_byte;
482};
483
484struct htc_endpoint {
485 enum htc_endpoint_id eid;
486 u16 svc_id;
487 struct list_head txq;
488 struct list_head rx_bufq;
489 struct htc_endpoint_credit_dist cred_dist;
490 struct htc_ep_callbacks ep_cb;
491 int max_txq_depth;
492 int len_max;
493 int tx_proc_cnt;
494 int rx_proc_cnt;
495 struct htc_target *target;
496 u8 seqno;
497 u32 conn_flags;
498 struct htc_endpoint_stats ep_st;
499};
500
501struct htc_control_buffer {
502 struct htc_packet packet;
503 u8 *buf;
504};
505
506struct ath6kl_device;
507
508/* our HTC target state */
509struct htc_target {
510 struct htc_endpoint endpoint[ENDPOINT_MAX];
511 struct list_head cred_dist_list;
512 struct list_head free_ctrl_txbuf;
513 struct list_head free_ctrl_rxbuf;
514 struct htc_credit_state_info *cred_dist_cntxt;
515 int tgt_creds;
516 unsigned int tgt_cred_sz;
517 spinlock_t htc_lock;
518 spinlock_t rx_lock;
519 spinlock_t tx_lock;
520 struct ath6kl_device *dev;
521 u32 htc_flags;
522 u32 rx_st_flags;
523 enum htc_endpoint_id ep_waiting;
524 u8 htc_tgt_ver;
525
526 /* max messages per bundle for HTC */
527 int msg_per_bndl_max;
528
529 bool tx_bndl_enable;
530 int rx_bndl_enable;
531 int max_rx_bndl_sz;
532 int max_tx_bndl_sz;
533
534 u32 block_sz;
535 u32 block_mask;
536
537 int max_scat_entries;
538 int max_xfer_szper_scatreq;
539
540 int chk_irq_status_cnt;
541};
542
543void *htc_create(struct ath6kl *ar);
544void htc_set_credit_dist(struct htc_target *target,
545 struct htc_credit_state_info *cred_info,
546 u16 svc_pri_order[], int len);
547int htc_wait_target(struct htc_target *target);
548int htc_start(struct htc_target *target);
549int htc_conn_service(struct htc_target *target,
550 struct htc_service_connect_req *req,
551 struct htc_service_connect_resp *resp);
552int htc_tx(struct htc_target *target, struct htc_packet *packet);
553void htc_stop(struct htc_target *target);
554void htc_cleanup(struct htc_target *target);
555void htc_flush_txep(struct htc_target *target,
556 enum htc_endpoint_id endpoint, u16 tag);
557void htc_flush_rx_buf(struct htc_target *target);
558void htc_indicate_activity_change(struct htc_target *target,
559 enum htc_endpoint_id endpoint, bool active);
560int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint);
561int htc_add_rxbuf_multiple(struct htc_target *target, struct list_head *pktq);
562int htc_rxmsg_pending_handler(struct htc_target *target, u32 msg_look_ahead[],
563 int *n_pkts);
564
565static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
566 u8 *buf, unsigned int len,
567 enum htc_endpoint_id eid, u16 tag)
568{
569 packet->pkt_cntxt = context;
570 packet->buf = buf;
571 packet->act_len = len;
572 packet->endpoint = eid;
573 packet->info.tx.tag = tag;
574}
575
576static inline void htc_rxpkt_reset(struct htc_packet *packet)
577{
578 packet->buf = packet->buf_start;
579 packet->act_len = 0;
580}
581
582static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context,
583 u8 *buf, unsigned long len,
584 enum htc_endpoint_id eid)
585{
586 packet->pkt_cntxt = context;
587 packet->buf = buf;
588 packet->buf_start = buf;
589 packet->buf_len = len;
590 packet->endpoint = eid;
591}
592
593static inline int get_queue_depth(struct list_head *queue)
594{
595 struct list_head *tmp_list;
596 int depth = 0;
597
598 list_for_each(tmp_list, queue)
599 depth++;
600
601 return depth;
602}
603
604#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/htc_hif.c
new file mode 100644
index 000000000000..5d397b5c5efb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.c
@@ -0,0 +1,641 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "target.h"
19#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h"
22
23#define MAILBOX_FOR_BLOCK_SIZE 1
24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26
27static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
28{
29 u8 *buf;
30 int i;
31
32 buf = req->virt_dma_buf;
33
34 for (i = 0; i < req->scat_entries; i++) {
35
36 if (from_dma)
37 memcpy(req->scat_list[i].buf, buf,
38 req->scat_list[i].len);
39 else
40 memcpy(buf, req->scat_list[i].buf,
41 req->scat_list[i].len);
42
43 buf += req->scat_list[i].len;
44 }
45
46 return 0;
47}
48
49int ath6kldev_rw_comp_handler(void *context, int status)
50{
51 struct htc_packet *packet = context;
52
53 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
54 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
55 packet, status);
56
57 packet->status = status;
58 packet->completion(packet->context, packet);
59
60 return 0;
61}
62
63static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
64{
65 u32 dummy;
66 int status;
67
68 ath6kl_err("target debug interrupt\n");
69
70 ath6kl_target_failure(dev->ar);
71
72 /*
73 * read counter to clear the interrupt, the debug error interrupt is
74 * counter 0.
75 */
76 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
77 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
78 if (status)
79 WARN_ON(1);
80
81 return status;
82}
83
84/* mailbox recv message polling */
85int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
86 int timeout)
87{
88 struct ath6kl_irq_proc_registers *rg;
89 int status = 0, i;
90 u8 htc_mbox = 1 << HTC_MAILBOX;
91
92 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
93 /* this is the standard HIF way, load the reg table */
94 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
95 (u8 *) &dev->irq_proc_reg,
96 sizeof(dev->irq_proc_reg),
97 HIF_RD_SYNC_BYTE_INC);
98
99 if (status) {
100 ath6kl_err("failed to read reg table\n");
101 return status;
102 }
103
104 /* check for MBOX data and valid lookahead */
105 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
106 if (dev->irq_proc_reg.rx_lkahd_valid &
107 htc_mbox) {
108 /*
109 * Mailbox has a message and the look ahead
110 * is valid.
111 */
112 rg = &dev->irq_proc_reg;
113 *lk_ahd =
114 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
115 break;
116 }
117 }
118
119 /* delay a little */
120 mdelay(ATH6KL_TIME_QUANTUM);
121 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
122 }
123
124 if (i == 0) {
125 ath6kl_err("timeout waiting for recv message\n");
126 status = -ETIME;
127 /* check if the target asserted */
128 if (dev->irq_proc_reg.counter_int_status &
129 ATH6KL_TARGET_DEBUG_INTR_MASK)
130 /*
131 * Target failure handler will be called in case of
132 * an assert.
133 */
134 ath6kldev_proc_dbg_intr(dev);
135 }
136
137 return status;
138}
139
140/*
141 * Disable packet reception (used in case the host runs out of buffers)
142 * using the interrupt enable registers through the host I/F
143 */
144int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
145{
146 struct ath6kl_irq_enable_reg regs;
147 int status = 0;
148
149 /* take the lock to protect interrupt enable shadows */
150 spin_lock_bh(&dev->lock);
151
152 if (enable_rx)
153 dev->irq_en_reg.int_status_en |=
154 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
155 else
156 dev->irq_en_reg.int_status_en &=
157 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
158
159 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
160
161 spin_unlock_bh(&dev->lock);
162
163 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
164 &regs.int_status_en,
165 sizeof(struct ath6kl_irq_enable_reg),
166 HIF_WR_SYNC_BYTE_INC);
167
168 return status;
169}
170
171int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
172 struct hif_scatter_req *scat_req, bool read)
173{
174 int status = 0;
175
176 if (read) {
177 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
178 scat_req->addr = dev->ar->mbox_info.htc_addr;
179 } else {
180 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
181
182 scat_req->addr =
183 (scat_req->len > HIF_MBOX_WIDTH) ?
184 dev->ar->mbox_info.htc_ext_addr :
185 dev->ar->mbox_info.htc_addr;
186 }
187
188 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
189 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
190 scat_req->scat_entries, scat_req->len,
191 scat_req->addr, !read ? "async" : "sync",
192 (read) ? "rd" : "wr");
193
194 if (!read && scat_req->virt_scat) {
195 status = ath6kldev_cp_scat_dma_buf(scat_req, false);
196 if (status) {
197 scat_req->status = status;
198 scat_req->complete(dev->ar->htc_target, scat_req);
199 return 0;
200 }
201 }
202
203 status = ath6kl_hif_scat_req_rw(dev->ar, scat_req);
204
205 if (read) {
206 /* in sync mode, we can touch the scatter request */
207 scat_req->status = status;
208 if (!status && scat_req->virt_scat)
209 scat_req->status =
210 ath6kldev_cp_scat_dma_buf(scat_req, true);
211 }
212
213 return status;
214}
215
216static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
217{
218 u8 counter_int_status;
219
220 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
221
222 counter_int_status = dev->irq_proc_reg.counter_int_status &
223 dev->irq_en_reg.cntr_int_status_en;
224
225 ath6kl_dbg(ATH6KL_DBG_IRQ,
226 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
227 counter_int_status);
228
229 /*
230 * NOTE: other modules like GMBOX may use the counter interrupt for
231 * credit flow control on other counters, we only need to check for
232 * the debug assertion counter interrupt.
233 */
234 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
235 return ath6kldev_proc_dbg_intr(dev);
236
237 return 0;
238}
239
240static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
241{
242 int status;
243 u8 error_int_status;
244 u8 reg_buf[4];
245
246 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
247
248 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
249 if (!error_int_status) {
250 WARN_ON(1);
251 return -EIO;
252 }
253
254 ath6kl_dbg(ATH6KL_DBG_IRQ,
255 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
256 error_int_status);
257
258 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
259 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
260
261 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
262 ath6kl_err("rx underflow\n");
263
264 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
265 ath6kl_err("tx overflow\n");
266
267 /* Clear the interrupt */
268 dev->irq_proc_reg.error_int_status &= ~error_int_status;
269
270 /* set W1C value to clear the interrupt, this hits the register first */
271 reg_buf[0] = error_int_status;
272 reg_buf[1] = 0;
273 reg_buf[2] = 0;
274 reg_buf[3] = 0;
275
276 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
277 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
278
279 if (status)
280 WARN_ON(1);
281
282 return status;
283}
284
285static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
286{
287 int status;
288 u8 cpu_int_status;
289 u8 reg_buf[4];
290
291 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
292
293 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
294 dev->irq_en_reg.cpu_int_status_en;
295 if (!cpu_int_status) {
296 WARN_ON(1);
297 return -EIO;
298 }
299
300 ath6kl_dbg(ATH6KL_DBG_IRQ,
301 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
302 cpu_int_status);
303
304 /* Clear the interrupt */
305 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
306
307 /*
308 * Set up the register transfer buffer to hit the register 4 times ,
309 * this is done to make the access 4-byte aligned to mitigate issues
310 * with host bus interconnects that restrict bus transfer lengths to
311 * be a multiple of 4-bytes.
312 */
313
314 /* set W1C value to clear the interrupt, this hits the register first */
315 reg_buf[0] = cpu_int_status;
316 /* the remaining are set to zero which have no-effect */
317 reg_buf[1] = 0;
318 reg_buf[2] = 0;
319 reg_buf[3] = 0;
320
321 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
322 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
323
324 if (status)
325 WARN_ON(1);
326
327 return status;
328}
329
330/* process pending interrupts synchronously */
331static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
332{
333 struct ath6kl_irq_proc_registers *rg;
334 int status = 0;
335 u8 host_int_status = 0;
336 u32 lk_ahd = 0;
337 u8 htc_mbox = 1 << HTC_MAILBOX;
338
339 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
340
341 /*
342 * NOTE: HIF implementation guarantees that the context of this
343 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
344 * sleep or call any API that can block or switch thread/task
345 * contexts. This is a fully schedulable context.
346 */
347
348 /*
349 * Process pending intr only when int_status_en is clear, it may
350 * result in unnecessary bus transaction otherwise. Target may be
351 * unresponsive at the time.
352 */
353 if (dev->irq_en_reg.int_status_en) {
354 /*
355 * Read the first 28 bytes of the HTC register table. This
356 * will yield us the value of different int status
357 * registers and the lookahead registers.
358 *
359 * length = sizeof(int_status) + sizeof(cpu_int_status)
360 * + sizeof(error_int_status) +
361 * sizeof(counter_int_status) +
362 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
363 * + sizeof(hole) + sizeof(rx_lkahd) +
364 * sizeof(int_status_en) +
365 * sizeof(cpu_int_status_en) +
366 * sizeof(err_int_status_en) +
367 * sizeof(cntr_int_status_en);
368 */
369 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
370 (u8 *) &dev->irq_proc_reg,
371 sizeof(dev->irq_proc_reg),
372 HIF_RD_SYNC_BYTE_INC);
373 if (status)
374 goto out;
375
376 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
377 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
378 &dev->irq_en_reg);
379
380 /* Update only those registers that are enabled */
381 host_int_status = dev->irq_proc_reg.host_int_status &
382 dev->irq_en_reg.int_status_en;
383
384 /* Look at mbox status */
385 if (host_int_status & htc_mbox) {
386 /*
387 * Mask out pending mbox value, we use "lookAhead as
388 * the real flag for mbox processing.
389 */
390 host_int_status &= ~htc_mbox;
391 if (dev->irq_proc_reg.rx_lkahd_valid &
392 htc_mbox) {
393 rg = &dev->irq_proc_reg;
394 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
395 if (!lk_ahd)
396 ath6kl_err("lookAhead is zero!\n");
397 }
398 }
399 }
400
401 if (!host_int_status && !lk_ahd) {
402 *done = true;
403 goto out;
404 }
405
406 if (lk_ahd) {
407 int fetched = 0;
408
409 ath6kl_dbg(ATH6KL_DBG_IRQ,
410 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
411 /*
412 * Mailbox Interrupt, the HTC layer may issue async
413 * requests to empty the mailbox. When emptying the recv
414 * mailbox we use the async handler above called from the
415 * completion routine of the callers read request. This can
416 * improve performance by reducing context switching when
417 * we rapidly pull packets.
418 */
419 status = htc_rxmsg_pending_handler(dev->htc_cnxt,
420 &lk_ahd, &fetched);
421 if (status)
422 goto out;
423
424 if (!fetched)
425 /*
426 * HTC could not pull any messages out due to lack
427 * of resources.
428 */
429 dev->htc_cnxt->chk_irq_status_cnt = 0;
430 }
431
432 /* now handle the rest of them */
433 ath6kl_dbg(ATH6KL_DBG_IRQ,
434 "valid interrupt source(s) for other interrupts: 0x%x\n",
435 host_int_status);
436
437 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
438 /* CPU Interrupt */
439 status = ath6kldev_proc_cpu_intr(dev);
440 if (status)
441 goto out;
442 }
443
444 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
445 /* Error Interrupt */
446 status = ath6kldev_proc_err_intr(dev);
447 if (status)
448 goto out;
449 }
450
451 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
452 /* Counter Interrupt */
453 status = ath6kldev_proc_counter_intr(dev);
454
455out:
456 /*
457 * An optimization to bypass reading the IRQ status registers
458 * unecessarily which can re-wake the target, if upper layers
459 * determine that we are in a low-throughput mode, we can rely on
460 * taking another interrupt rather than re-checking the status
461 * registers which can re-wake the target.
462 *
463 * NOTE : for host interfaces that makes use of detecting pending
464 * mbox messages at hif can not use this optimization due to
465 * possible side effects, SPI requires the host to drain all
466 * messages from the mailbox before exiting the ISR routine.
467 */
468
469 ath6kl_dbg(ATH6KL_DBG_IRQ,
470 "bypassing irq status re-check, forcing done\n");
471
472 if (!dev->htc_cnxt->chk_irq_status_cnt)
473 *done = true;
474
475 ath6kl_dbg(ATH6KL_DBG_IRQ,
476 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
477
478 return status;
479}
480
481/* interrupt handler, kicks off all interrupt processing */
482int ath6kldev_intr_bh_handler(struct ath6kl *ar)
483{
484 struct ath6kl_device *dev = ar->htc_target->dev;
485 int status = 0;
486 bool done = false;
487
488 /*
489 * Reset counter used to flag a re-scan of IRQ status registers on
490 * the target.
491 */
492 dev->htc_cnxt->chk_irq_status_cnt = 0;
493
494 /*
495 * IRQ processing is synchronous, interrupt status registers can be
496 * re-read.
497 */
498 while (!done) {
499 status = proc_pending_irqs(dev, &done);
500 if (status)
501 break;
502 }
503
504 return status;
505}
506
507static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
508{
509 struct ath6kl_irq_enable_reg regs;
510 int status;
511
512 spin_lock_bh(&dev->lock);
513
514 /* Enable all but ATH6KL CPU interrupts */
515 dev->irq_en_reg.int_status_en =
516 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
517 SM(INT_STATUS_ENABLE_CPU, 0x01) |
518 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
519
520 /*
521 * NOTE: There are some cases where HIF can do detection of
522 * pending mbox messages which is disabled now.
523 */
524 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
525
526 /* Set up the CPU Interrupt status Register */
527 dev->irq_en_reg.cpu_int_status_en = 0;
528
529 /* Set up the Error Interrupt status Register */
530 dev->irq_en_reg.err_int_status_en =
531 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
532 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
533
534 /*
535 * Enable Counter interrupt status register to get fatal errors for
536 * debugging.
537 */
538 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
539 ATH6KL_TARGET_DEBUG_INTR_MASK);
540 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
541
542 spin_unlock_bh(&dev->lock);
543
544 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
545 &regs.int_status_en, sizeof(regs),
546 HIF_WR_SYNC_BYTE_INC);
547
548 if (status)
549 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
550 status);
551
552 return status;
553}
554
555int ath6kldev_disable_intrs(struct ath6kl_device *dev)
556{
557 struct ath6kl_irq_enable_reg regs;
558
559 spin_lock_bh(&dev->lock);
560 /* Disable all interrupts */
561 dev->irq_en_reg.int_status_en = 0;
562 dev->irq_en_reg.cpu_int_status_en = 0;
563 dev->irq_en_reg.err_int_status_en = 0;
564 dev->irq_en_reg.cntr_int_status_en = 0;
565 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
566 spin_unlock_bh(&dev->lock);
567
568 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
569 &regs.int_status_en, sizeof(regs),
570 HIF_WR_SYNC_BYTE_INC);
571}
572
573/* enable device interrupts */
574int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
575{
576 int status = 0;
577
578 /*
579 * Make sure interrupt are disabled before unmasking at the HIF
580 * layer. The rationale here is that between device insertion
581 * (where we clear the interrupts the first time) and when HTC
582 * is finally ready to handle interrupts, other software can perform
583 * target "soft" resets. The ATH6KL interrupt enables reset back to an
584 * "enabled" state when this happens.
585 */
586 ath6kldev_disable_intrs(dev);
587
588 /* unmask the host controller interrupts */
589 ath6kl_hif_irq_enable(dev->ar);
590 status = ath6kldev_enable_intrs(dev);
591
592 return status;
593}
594
595/* disable all device interrupts */
596int ath6kldev_mask_intrs(struct ath6kl_device *dev)
597{
598 /*
599 * Mask the interrupt at the HIF layer to avoid any stray interrupt
600 * taken while we zero out our shadow registers in
601 * ath6kldev_disable_intrs().
602 */
603 ath6kl_hif_irq_disable(dev->ar);
604
605 return ath6kldev_disable_intrs(dev);
606}
607
608int ath6kldev_setup(struct ath6kl_device *dev)
609{
610 int status = 0;
611
612 spin_lock_init(&dev->lock);
613
614 /*
615 * NOTE: we actually get the block size of a mailbox other than 0,
616 * for SDIO the block size on mailbox 0 is artificially set to 1.
617 * So we use the block size that is set for the other 3 mailboxes.
618 */
619 dev->htc_cnxt->block_sz = dev->ar->mbox_info.block_size;
620
621 /* must be a power of 2 */
622 if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
623 WARN_ON(1);
624 goto fail_setup;
625 }
626
627 /* assemble mask, used for padding to a block */
628 dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
629
630 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
631 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
632
633 ath6kl_dbg(ATH6KL_DBG_TRC,
634 "hif interrupt processing is sync only\n");
635
636 status = ath6kldev_disable_intrs(dev);
637
638fail_setup:
639 return status;
640
641}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
new file mode 100644
index 000000000000..171ad63d89b0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_HIF_H
18#define HTC_HIF_H
19
20#include "htc.h"
21#include "hif.h"
22
23#define ATH6KL_MAILBOXES 4
24
25/* HTC runs over mailbox 0 */
26#define HTC_MAILBOX 0
27
28#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
29
30#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
31 INT_STATUS_ENABLE_CPU_MASK | \
32 INT_STATUS_ENABLE_COUNTER_MASK)
33
34#define ATH6KL_REG_IO_BUFFER_SIZE 32
35#define ATH6KL_MAX_REG_IO_BUFFERS 8
36#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
37#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
38#define ATH6KL_SCATTER_REQS 4
39
40#ifndef A_CACHE_LINE_PAD
41#define A_CACHE_LINE_PAD 128
42#endif
43#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
44#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
45
46struct ath6kl_irq_proc_registers {
47 u8 host_int_status;
48 u8 cpu_int_status;
49 u8 error_int_status;
50 u8 counter_int_status;
51 u8 mbox_frame;
52 u8 rx_lkahd_valid;
53 u8 host_int_status2;
54 u8 gmbox_rx_avail;
55 __le32 rx_lkahd[2];
56 __le32 rx_gmbox_lkahd_alias[2];
57} __packed;
58
59struct ath6kl_irq_enable_reg {
60 u8 int_status_en;
61 u8 cpu_int_status_en;
62 u8 err_int_status_en;
63 u8 cntr_int_status_en;
64} __packed;
65
66struct ath6kl_device {
67 spinlock_t lock;
68 u8 pad1[A_CACHE_LINE_PAD];
69 struct ath6kl_irq_proc_registers irq_proc_reg;
70 u8 pad2[A_CACHE_LINE_PAD];
71 struct ath6kl_irq_enable_reg irq_en_reg;
72 u8 pad3[A_CACHE_LINE_PAD];
73 struct htc_target *htc_cnxt;
74 struct ath6kl *ar;
75};
76
77int ath6kldev_setup(struct ath6kl_device *dev);
78int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
79int ath6kldev_mask_intrs(struct ath6kl_device *dev);
80int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
81 u32 *lk_ahd, int timeout);
82int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
83int ath6kldev_disable_intrs(struct ath6kl_device *dev);
84
85int ath6kldev_rw_comp_handler(void *context, int status);
86int ath6kldev_intr_bh_handler(struct ath6kl *ar);
87
88/* Scatter Function and Definitions */
89int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
90 struct hif_scatter_req *scat_req, bool read);
91
92#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
new file mode 100644
index 000000000000..99ff2f94b6ce
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -0,0 +1,1303 @@
1
2/*
3 * Copyright (c) 2011 Atheros Communications Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/mmc/sdio_func.h>
19#include "core.h"
20#include "cfg80211.h"
21#include "target.h"
22#include "debug.h"
23#include "hif-ops.h"
24
25unsigned int debug_mask;
26
27module_param(debug_mask, uint, 0644);
28
29/*
30 * Include definitions here that can be used to tune the WLAN module
31 * behavior. Different customers can tune the behavior as per their needs,
32 * here.
33 */
34
35/*
36 * This configuration item enable/disable keepalive support.
37 * Keepalive support: In the absence of any data traffic to AP, null
38 * frames will be sent to the AP at periodic interval, to keep the association
39 * active. This configuration item defines the periodic interval.
40 * Use value of zero to disable keepalive support
41 * Default: 60 seconds
42 */
43#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
44
45/*
46 * This configuration item sets the value of disconnect timeout
47 * Firmware delays sending the disconnec event to the host for this
48 * timeout after is gets disconnected from the current AP.
49 * If the firmware successly roams within the disconnect timeout
50 * it sends a new connect event
51 */
52#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
53
54#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
55
56enum addr_type {
57 DATASET_PATCH_ADDR,
58 APP_LOAD_ADDR,
59 APP_START_OVERRIDE_ADDR,
60};
61
62#define ATH6KL_DATA_OFFSET 64
63struct sk_buff *ath6kl_buf_alloc(int size)
64{
65 struct sk_buff *skb;
66 u16 reserved;
67
68 /* Add chacheline space at front and back of buffer */
69 reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
70 sizeof(struct htc_packet);
71 skb = dev_alloc_skb(size + reserved);
72
73 if (skb)
74 skb_reserve(skb, reserved - L1_CACHE_BYTES);
75 return skb;
76}
77
78void ath6kl_init_profile_info(struct ath6kl *ar)
79{
80 ar->ssid_len = 0;
81 memset(ar->ssid, 0, sizeof(ar->ssid));
82
83 ar->dot11_auth_mode = OPEN_AUTH;
84 ar->auth_mode = NONE_AUTH;
85 ar->prwise_crypto = NONE_CRYPT;
86 ar->prwise_crypto_len = 0;
87 ar->grp_crypto = NONE_CRYPT;
88 ar->grp_crpto_len = 0;
89 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
90 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
91 memset(ar->bssid, 0, sizeof(ar->bssid));
92 ar->bss_ch = 0;
93 ar->nw_type = ar->next_mode = INFRA_NETWORK;
94}
95
96static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
97{
98 switch (ar->nw_type) {
99 case INFRA_NETWORK:
100 return HI_OPTION_FW_MODE_BSS_STA;
101 case ADHOC_NETWORK:
102 return HI_OPTION_FW_MODE_IBSS;
103 case AP_NETWORK:
104 return HI_OPTION_FW_MODE_AP;
105 default:
106 ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
107 return 0xff;
108 }
109}
110
111static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
112 u32 item_offset)
113{
114 u32 addr = 0;
115
116 if (ar->target_type == TARGET_TYPE_AR6003)
117 addr = ATH6KL_HI_START_ADDR + item_offset;
118
119 return addr;
120}
121
122static int ath6kl_set_host_app_area(struct ath6kl *ar)
123{
124 u32 address, data;
125 struct host_app_area host_app_area;
126
127 /* Fetch the address of the host_app_area_s
128 * instance in the host interest area */
129 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest));
130 address = TARG_VTOP(address);
131
132 if (ath6kl_read_reg_diag(ar, &address, &data))
133 return -EIO;
134
135 address = TARG_VTOP(data);
136 host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
137 if (ath6kl_access_datadiag(ar, address,
138 (u8 *)&host_app_area,
139 sizeof(struct host_app_area), false))
140 return -EIO;
141
142 return 0;
143}
144
145static inline void set_ac2_ep_map(struct ath6kl *ar,
146 u8 ac,
147 enum htc_endpoint_id ep)
148{
149 ar->ac2ep_map[ac] = ep;
150 ar->ep2ac_map[ep] = ac;
151}
152
153/* connect to a service */
154static int ath6kl_connectservice(struct ath6kl *ar,
155 struct htc_service_connect_req *con_req,
156 char *desc)
157{
158 int status;
159 struct htc_service_connect_resp response;
160
161 memset(&response, 0, sizeof(response));
162
163 status = htc_conn_service(ar->htc_target, con_req, &response);
164 if (status) {
165 ath6kl_err("failed to connect to %s service status:%d\n",
166 desc, status);
167 return status;
168 }
169
170 switch (con_req->svc_id) {
171 case WMI_CONTROL_SVC:
172 if (test_bit(WMI_ENABLED, &ar->flag))
173 ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint);
174 ar->ctrl_ep = response.endpoint;
175 break;
176 case WMI_DATA_BE_SVC:
177 set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint);
178 break;
179 case WMI_DATA_BK_SVC:
180 set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint);
181 break;
182 case WMI_DATA_VI_SVC:
183 set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint);
184 break;
185 case WMI_DATA_VO_SVC:
186 set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint);
187 break;
188 default:
189 ath6kl_err("service id is not mapped %d\n", con_req->svc_id);
190 return -EINVAL;
191 }
192
193 return 0;
194}
195
196static int ath6kl_init_service_ep(struct ath6kl *ar)
197{
198 struct htc_service_connect_req connect;
199
200 memset(&connect, 0, sizeof(connect));
201
202 /* these fields are the same for all service endpoints */
203 connect.ep_cb.rx = ath6kl_rx;
204 connect.ep_cb.rx_refill = ath6kl_rx_refill;
205 connect.ep_cb.tx_full = ath6kl_tx_queue_full;
206
207 /*
208 * Set the max queue depth so that our ath6kl_tx_queue_full handler
209 * gets called.
210 */
211 connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
212 connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4;
213 if (!connect.ep_cb.rx_refill_thresh)
214 connect.ep_cb.rx_refill_thresh++;
215
216 /* connect to control service */
217 connect.svc_id = WMI_CONTROL_SVC;
218 if (ath6kl_connectservice(ar, &connect, "WMI CONTROL"))
219 return -EIO;
220
221 connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN;
222
223 /*
224 * Limit the HTC message size on the send path, although e can
225 * receive A-MSDU frames of 4K, we will only send ethernet-sized
226 * (802.3) frames on the send path.
227 */
228 connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH;
229
230 /*
231 * To reduce the amount of committed memory for larger A_MSDU
232 * frames, use the recv-alloc threshold mechanism for larger
233 * packets.
234 */
235 connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE;
236 connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf;
237
238 /*
239 * For the remaining data services set the connection flag to
240 * reduce dribbling, if configured to do so.
241 */
242 connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB;
243 connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK;
244 connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF;
245
246 connect.svc_id = WMI_DATA_BE_SVC;
247
248 if (ath6kl_connectservice(ar, &connect, "WMI DATA BE"))
249 return -EIO;
250
251 /* connect to back-ground map this to WMI LOW_PRI */
252 connect.svc_id = WMI_DATA_BK_SVC;
253 if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
254 return -EIO;
255
256 /* connect to Video service, map this to to HI PRI */
257 connect.svc_id = WMI_DATA_VI_SVC;
258 if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
259 return -EIO;
260
261 /*
262 * Connect to VO service, this is currently not mapped to a WMI
263 * priority stream due to historical reasons. WMI originally
264 * defined 3 priorities over 3 mailboxes We can change this when
265 * WMI is reworked so that priorities are not dependent on
266 * mailboxes.
267 */
268 connect.svc_id = WMI_DATA_VO_SVC;
269 if (ath6kl_connectservice(ar, &connect, "WMI DATA VO"))
270 return -EIO;
271
272 return 0;
273}
274
275static void ath6kl_init_control_info(struct ath6kl *ar)
276{
277 u8 ctr;
278
279 clear_bit(WMI_ENABLED, &ar->flag);
280 ath6kl_init_profile_info(ar);
281 ar->def_txkey_index = 0;
282 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
283 ar->ch_hint = 0;
284 ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
285 ar->listen_intvl_b = 0;
286 ar->tx_pwr = 0;
287 clear_bit(SKIP_SCAN, &ar->flag);
288 set_bit(WMM_ENABLED, &ar->flag);
289 ar->intra_bss = 1;
290 memset(&ar->sc_params, 0, sizeof(ar->sc_params));
291 ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
292 ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
293
294 memset((u8 *)ar->sta_list, 0,
295 AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
296
297 spin_lock_init(&ar->mcastpsq_lock);
298
299 /* Init the PS queues */
300 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
301 spin_lock_init(&ar->sta_list[ctr].psq_lock);
302 skb_queue_head_init(&ar->sta_list[ctr].psq);
303 }
304
305 skb_queue_head_init(&ar->mcastpsq);
306
307 memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
308}
309
310/*
311 * Set HTC/Mbox operational parameters, this can only be called when the
312 * target is in the BMI phase.
313 */
314static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
315 u8 htc_ctrl_buf)
316{
317 int status;
318 u32 blk_size;
319
320 blk_size = ar->mbox_info.block_size;
321
322 if (htc_ctrl_buf)
323 blk_size |= ((u32)htc_ctrl_buf) << 16;
324
325 /* set the host interest area for the block size */
326 status = ath6kl_bmi_write(ar,
327 ath6kl_get_hi_item_addr(ar,
328 HI_ITEM(hi_mbox_io_block_sz)),
329 (u8 *)&blk_size,
330 4);
331 if (status) {
332 ath6kl_err("bmi_write_memory for IO block size failed\n");
333 goto out;
334 }
335
336 ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n",
337 blk_size,
338 ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz)));
339
340 if (mbox_isr_yield_val) {
341 /* set the host interest area for the mbox ISR yield limit */
342 status = ath6kl_bmi_write(ar,
343 ath6kl_get_hi_item_addr(ar,
344 HI_ITEM(hi_mbox_isr_yield_limit)),
345 (u8 *)&mbox_isr_yield_val,
346 4);
347 if (status) {
348 ath6kl_err("bmi_write_memory for yield limit failed\n");
349 goto out;
350 }
351 }
352
353out:
354 return status;
355}
356
357#define REG_DUMP_COUNT_AR6003 60
358#define REGISTER_DUMP_LEN_MAX 60
359
360static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
361{
362 u32 address;
363 u32 regdump_loc = 0;
364 int status;
365 u32 regdump_val[REGISTER_DUMP_LEN_MAX];
366 u32 i;
367
368 if (ar->target_type != TARGET_TYPE_AR6003)
369 return;
370
371 /* the reg dump pointer is copied to the host interest area */
372 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
373 address = TARG_VTOP(address);
374
375 /* read RAM location through diagnostic window */
376 status = ath6kl_read_reg_diag(ar, &address, &regdump_loc);
377
378 if (status || !regdump_loc) {
379 ath6kl_err("failed to get ptr to register dump area\n");
380 return;
381 }
382
383 ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
384 regdump_loc);
385
386 regdump_loc = TARG_VTOP(regdump_loc);
387
388 /* fetch register dump data */
389 status = ath6kl_access_datadiag(ar,
390 regdump_loc,
391 (u8 *)&regdump_val[0],
392 REG_DUMP_COUNT_AR6003 * (sizeof(u32)),
393 true);
394
395 if (status) {
396 ath6kl_err("failed to get register dump\n");
397 return;
398 }
399 ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
400
401 for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
402 ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
403 i, regdump_val[i]);
404
405}
406
407void ath6kl_target_failure(struct ath6kl *ar)
408{
409 ath6kl_err("target asserted\n");
410
411 /* try dumping target assertion information (if any) */
412 ath6kl_dump_target_assert_info(ar);
413
414}
415
416static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
417{
418 int status = 0;
419
420 /*
421 * Configure the device for rx dot11 header rules. "0,0" are the
422 * default values. Required if checksum offload is needed. Set
423 * RxMetaVersion to 2.
424 */
425 if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
426 ar->rx_meta_ver, 0, 0)) {
427 ath6kl_err("unable to set the rx frame format\n");
428 status = -EIO;
429 }
430
431 if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
432 if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
433 IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
434 ath6kl_err("unable to set power save fail event policy\n");
435 status = -EIO;
436 }
437
438 if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
439 if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
440 WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
441 ath6kl_err("unable to set barker preamble policy\n");
442 status = -EIO;
443 }
444
445 if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
446 WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
447 ath6kl_err("unable to set keep alive interval\n");
448 status = -EIO;
449 }
450
451 if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
452 WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
453 ath6kl_err("unable to set disconnect timeout\n");
454 status = -EIO;
455 }
456
457 if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
458 if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
459 ath6kl_err("unable to set txop bursting\n");
460 status = -EIO;
461 }
462
463 return status;
464}
465
466int ath6kl_configure_target(struct ath6kl *ar)
467{
468 u32 param, ram_reserved_size;
469 u8 fw_iftype;
470
471 fw_iftype = ath6kl_get_fw_iftype(ar);
472 if (fw_iftype == 0xff)
473 return -EINVAL;
474
475 /* Tell target which HTC version it is used*/
476 param = HTC_PROTOCOL_VERSION;
477 if (ath6kl_bmi_write(ar,
478 ath6kl_get_hi_item_addr(ar,
479 HI_ITEM(hi_app_host_interest)),
480 (u8 *)&param, 4) != 0) {
481 ath6kl_err("bmi_write_memory for htc version failed\n");
482 return -EIO;
483 }
484
485 /* set the firmware mode to STA/IBSS/AP */
486 param = 0;
487
488 if (ath6kl_bmi_read(ar,
489 ath6kl_get_hi_item_addr(ar,
490 HI_ITEM(hi_option_flag)),
491 (u8 *)&param, 4) != 0) {
492 ath6kl_err("bmi_read_memory for setting fwmode failed\n");
493 return -EIO;
494 }
495
496 param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
497 param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
498 param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
499 param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
500
501 if (ath6kl_bmi_write(ar,
502 ath6kl_get_hi_item_addr(ar,
503 HI_ITEM(hi_option_flag)),
504 (u8 *)&param,
505 4) != 0) {
506 ath6kl_err("bmi_write_memory for setting fwmode failed\n");
507 return -EIO;
508 }
509
510 ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n");
511
512 /*
513 * Hardcode the address use for the extended board data
514 * Ideally this should be pre-allocate by the OS at boot time
515 * But since it is a new feature and board data is loaded
516 * at init time, we have to workaround this from host.
517 * It is difficult to patch the firmware boot code,
518 * but possible in theory.
519 */
520
521 if (ar->target_type == TARGET_TYPE_AR6003) {
522 if (ar->version.target_ver == AR6003_REV2_VERSION) {
523 param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
524 ram_reserved_size = AR6003_REV2_RAM_RESERVE_SIZE;
525 } else {
526 param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
527 ram_reserved_size = AR6003_REV3_RAM_RESERVE_SIZE;
528 }
529
530 if (ath6kl_bmi_write(ar,
531 ath6kl_get_hi_item_addr(ar,
532 HI_ITEM(hi_board_ext_data)),
533 (u8 *)&param, 4) != 0) {
534 ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
535 return -EIO;
536 }
537 if (ath6kl_bmi_write(ar,
538 ath6kl_get_hi_item_addr(ar,
539 HI_ITEM(hi_end_ram_reserve_sz)),
540 (u8 *)&ram_reserved_size, 4) != 0) {
541 ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
542 return -EIO;
543 }
544 }
545
546 /* set the block size for the target */
547 if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0))
548 /* use default number of control buffers */
549 return -EIO;
550
551 return 0;
552}
553
554struct ath6kl *ath6kl_core_alloc(struct device *sdev)
555{
556 struct net_device *dev;
557 struct ath6kl *ar;
558 struct wireless_dev *wdev;
559
560 wdev = ath6kl_cfg80211_init(sdev);
561 if (!wdev) {
562 ath6kl_err("ath6kl_cfg80211_init failed\n");
563 return NULL;
564 }
565
566 ar = wdev_priv(wdev);
567 ar->dev = sdev;
568 ar->wdev = wdev;
569 wdev->iftype = NL80211_IFTYPE_STATION;
570
571 dev = alloc_netdev(0, "wlan%d", ether_setup);
572 if (!dev) {
573 ath6kl_err("no memory for network device instance\n");
574 ath6kl_cfg80211_deinit(ar);
575 return NULL;
576 }
577
578 dev->ieee80211_ptr = wdev;
579 SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
580 wdev->netdev = dev;
581 ar->sme_state = SME_DISCONNECTED;
582 ar->auto_auth_stage = AUTH_IDLE;
583
584 init_netdev(dev);
585
586 ar->net_dev = dev;
587 set_bit(WLAN_ENABLED, &ar->flag);
588
589 ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
590
591 spin_lock_init(&ar->lock);
592
593 ath6kl_init_control_info(ar);
594 init_waitqueue_head(&ar->event_wq);
595 sema_init(&ar->sem, 1);
596 clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
597
598 INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
599
600 setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
601 (unsigned long) dev);
602
603 return ar;
604}
605
606int ath6kl_unavail_ev(struct ath6kl *ar)
607{
608 ath6kl_destroy(ar->net_dev, 1);
609
610 return 0;
611}
612
613/* firmware upload */
614static u32 ath6kl_get_load_address(u32 target_ver, enum addr_type type)
615{
616 WARN_ON(target_ver != AR6003_REV2_VERSION &&
617 target_ver != AR6003_REV3_VERSION);
618
619 switch (type) {
620 case DATASET_PATCH_ADDR:
621 return (target_ver == AR6003_REV2_VERSION) ?
622 AR6003_REV2_DATASET_PATCH_ADDRESS :
623 AR6003_REV3_DATASET_PATCH_ADDRESS;
624 case APP_LOAD_ADDR:
625 return (target_ver == AR6003_REV2_VERSION) ?
626 AR6003_REV2_APP_LOAD_ADDRESS :
627 0x1234;
628 case APP_START_OVERRIDE_ADDR:
629 return (target_ver == AR6003_REV2_VERSION) ?
630 AR6003_REV2_APP_START_OVERRIDE :
631 AR6003_REV3_APP_START_OVERRIDE;
632 default:
633 return 0;
634 }
635}
636
637static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
638 u8 **fw, size_t *fw_len)
639{
640 const struct firmware *fw_entry;
641 int ret;
642
643 ret = request_firmware(&fw_entry, filename, ar->dev);
644 if (ret)
645 return ret;
646
647 *fw_len = fw_entry->size;
648 *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
649
650 if (*fw == NULL)
651 ret = -ENOMEM;
652
653 release_firmware(fw_entry);
654
655 return ret;
656}
657
658static int ath6kl_fetch_board_file(struct ath6kl *ar)
659{
660 const char *filename;
661 int ret;
662
663 switch (ar->version.target_ver) {
664 case AR6003_REV2_VERSION:
665 filename = AR6003_REV2_BOARD_DATA_FILE;
666 break;
667 default:
668 filename = AR6003_REV3_BOARD_DATA_FILE;
669 break;
670 }
671
672 ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
673 &ar->fw_board_len);
674 if (ret == 0) {
675 /* managed to get proper board file */
676 return 0;
677 }
678
679 /* there was no proper board file, try to use default instead */
680 ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
681 filename, ret);
682
683 switch (ar->version.target_ver) {
684 case AR6003_REV2_VERSION:
685 filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
686 break;
687 default:
688 filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
689 break;
690 }
691
692 ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
693 &ar->fw_board_len);
694 if (ret) {
695 ath6kl_err("Failed to get default board file %s: %d\n",
696 filename, ret);
697 return ret;
698 }
699
700 ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n");
701 ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n");
702
703 return 0;
704}
705
706
707static int ath6kl_upload_board_file(struct ath6kl *ar)
708{
709 u32 board_address, board_ext_address, param;
710 int ret;
711
712 if (ar->fw_board == NULL) {
713 ret = ath6kl_fetch_board_file(ar);
714 if (ret)
715 return ret;
716 }
717
718 /* Determine where in Target RAM to write Board Data */
719 ath6kl_bmi_read(ar,
720 ath6kl_get_hi_item_addr(ar,
721 HI_ITEM(hi_board_data)),
722 (u8 *) &board_address, 4);
723 ath6kl_dbg(ATH6KL_DBG_TRC, "board data download addr: 0x%x\n",
724 board_address);
725
726 /* determine where in target ram to write extended board data */
727 ath6kl_bmi_read(ar,
728 ath6kl_get_hi_item_addr(ar,
729 HI_ITEM(hi_board_ext_data)),
730 (u8 *) &board_ext_address, 4);
731
732 ath6kl_dbg(ATH6KL_DBG_TRC, "board file download addr: 0x%x\n",
733 board_ext_address);
734
735 if (board_ext_address == 0) {
736 ath6kl_err("Failed to get board file target address.\n");
737 return -EINVAL;
738 }
739
740 if (ar->fw_board_len == (AR6003_BOARD_DATA_SZ +
741 AR6003_BOARD_EXT_DATA_SZ)) {
742 /* write extended board data */
743 ret = ath6kl_bmi_write(ar, board_ext_address,
744 ar->fw_board + AR6003_BOARD_DATA_SZ,
745 AR6003_BOARD_EXT_DATA_SZ);
746
747 if (ret) {
748 ath6kl_err("Failed to write extended board data: %d\n",
749 ret);
750 return ret;
751 }
752
753 /* record that extended board data is initialized */
754 param = (AR6003_BOARD_EXT_DATA_SZ << 16) | 1;
755 ath6kl_bmi_write(ar,
756 ath6kl_get_hi_item_addr(ar,
757 HI_ITEM(hi_board_ext_data_config)),
758 (unsigned char *) &param, 4);
759 }
760
761 if (ar->fw_board_len < AR6003_BOARD_DATA_SZ) {
762 ath6kl_err("Too small board file: %zu\n", ar->fw_board_len);
763 ret = -EINVAL;
764 return ret;
765 }
766
767 ret = ath6kl_bmi_write(ar, board_address, ar->fw_board,
768 AR6003_BOARD_DATA_SZ);
769
770 if (ret) {
771 ath6kl_err("Board file bmi write failed: %d\n", ret);
772 return ret;
773 }
774
775 /* record the fact that Board Data IS initialized */
776 param = 1;
777 ath6kl_bmi_write(ar,
778 ath6kl_get_hi_item_addr(ar,
779 HI_ITEM(hi_board_data_initialized)),
780 (u8 *)&param, 4);
781
782 return ret;
783}
784
785static int ath6kl_upload_otp(struct ath6kl *ar)
786{
787 const char *filename;
788 u32 address, param;
789 int ret;
790
791 switch (ar->version.target_ver) {
792 case AR6003_REV2_VERSION:
793 filename = AR6003_REV2_OTP_FILE;
794 break;
795 default:
796 filename = AR6003_REV3_OTP_FILE;
797 break;
798 }
799
800 if (ar->fw_otp == NULL) {
801 ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
802 &ar->fw_otp_len);
803 if (ret) {
804 ath6kl_err("Failed to get OTP file %s: %d\n",
805 filename, ret);
806 return ret;
807 }
808 }
809
810 address = ath6kl_get_load_address(ar->version.target_ver,
811 APP_LOAD_ADDR);
812
813 ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp,
814 ar->fw_otp_len);
815 if (ret) {
816 ath6kl_err("Failed to upload OTP file: %d\n", ret);
817 return ret;
818 }
819
820 /* execute the OTP code */
821 param = 0;
822 address = ath6kl_get_load_address(ar->version.target_ver,
823 APP_START_OVERRIDE_ADDR);
824 ath6kl_bmi_execute(ar, address, &param);
825
826 return ret;
827}
828
829static int ath6kl_upload_firmware(struct ath6kl *ar)
830{
831 const char *filename;
832 u32 address;
833 int ret;
834
835 switch (ar->version.target_ver) {
836 case AR6003_REV2_VERSION:
837 filename = AR6003_REV2_FIRMWARE_FILE;
838 break;
839 default:
840 filename = AR6003_REV3_FIRMWARE_FILE;
841 break;
842 }
843
844 if (ar->fw == NULL) {
845 ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
846 if (ret) {
847 ath6kl_err("Failed to get firmware file %s: %d\n",
848 filename, ret);
849 return ret;
850 }
851 }
852
853 address = ath6kl_get_load_address(ar->version.target_ver,
854 APP_LOAD_ADDR);
855
856 ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len);
857
858 if (ret) {
859 ath6kl_err("Failed to write firmware: %d\n", ret);
860 return ret;
861 }
862
863 /* Set starting address for firmware */
864 address = ath6kl_get_load_address(ar->version.target_ver,
865 APP_START_OVERRIDE_ADDR);
866 ath6kl_bmi_set_app_start(ar, address);
867
868 return ret;
869}
870
871static int ath6kl_upload_patch(struct ath6kl *ar)
872{
873 const char *filename;
874 u32 address, param;
875 int ret;
876
877 switch (ar->version.target_ver) {
878 case AR6003_REV2_VERSION:
879 filename = AR6003_REV2_PATCH_FILE;
880 break;
881 default:
882 filename = AR6003_REV3_PATCH_FILE;
883 break;
884 }
885
886 if (ar->fw_patch == NULL) {
887 ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
888 &ar->fw_patch_len);
889 if (ret) {
890 ath6kl_err("Failed to get patch file %s: %d\n",
891 filename, ret);
892 return ret;
893 }
894 }
895
896 address = ath6kl_get_load_address(ar->version.target_ver,
897 DATASET_PATCH_ADDR);
898
899 ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len);
900 if (ret) {
901 ath6kl_err("Failed to write patch file: %d\n", ret);
902 return ret;
903 }
904
905 param = address;
906 ath6kl_bmi_write(ar,
907 ath6kl_get_hi_item_addr(ar,
908 HI_ITEM(hi_dset_list_head)),
909 (unsigned char *) &param, 4);
910
911 return 0;
912}
913
914static int ath6kl_init_upload(struct ath6kl *ar)
915{
916 u32 param, options, sleep, address;
917 int status = 0;
918
919 if (ar->target_type != TARGET_TYPE_AR6003)
920 return -EINVAL;
921
922 /* temporarily disable system sleep */
923 address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
924 status = ath6kl_bmi_reg_read(ar, address, &param);
925 if (status)
926 return status;
927
928 options = param;
929
930 param |= ATH6KL_OPTION_SLEEP_DISABLE;
931 status = ath6kl_bmi_reg_write(ar, address, param);
932 if (status)
933 return status;
934
935 address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
936 status = ath6kl_bmi_reg_read(ar, address, &param);
937 if (status)
938 return status;
939
940 sleep = param;
941
942 param |= SM(SYSTEM_SLEEP_DISABLE, 1);
943 status = ath6kl_bmi_reg_write(ar, address, param);
944 if (status)
945 return status;
946
947 ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n",
948 options, sleep);
949
950 /* program analog PLL register */
951 status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER,
952 0xF9104001);
953 if (status)
954 return status;
955
956 /* Run at 80/88MHz by default */
957 param = SM(CPU_CLOCK_STANDARD, 1);
958
959 address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
960 status = ath6kl_bmi_reg_write(ar, address, param);
961 if (status)
962 return status;
963
964 param = 0;
965 address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
966 param = SM(LPO_CAL_ENABLE, 1);
967 status = ath6kl_bmi_reg_write(ar, address, param);
968 if (status)
969 return status;
970
971 /* WAR to avoid SDIO CRC err */
972 if (ar->version.target_ver == AR6003_REV2_VERSION) {
973 ath6kl_err("temporary war to avoid sdio crc error\n");
974
975 param = 0x20;
976
977 address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
978 status = ath6kl_bmi_reg_write(ar, address, param);
979 if (status)
980 return status;
981
982 address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
983 status = ath6kl_bmi_reg_write(ar, address, param);
984 if (status)
985 return status;
986
987 address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
988 status = ath6kl_bmi_reg_write(ar, address, param);
989 if (status)
990 return status;
991
992 address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
993 status = ath6kl_bmi_reg_write(ar, address, param);
994 if (status)
995 return status;
996 }
997
998 /* write EEPROM data to Target RAM */
999 status = ath6kl_upload_board_file(ar);
1000 if (status)
1001 return status;
1002
1003 /* transfer One time Programmable data */
1004 status = ath6kl_upload_otp(ar);
1005 if (status)
1006 return status;
1007
1008 /* Download Target firmware */
1009 status = ath6kl_upload_firmware(ar);
1010 if (status)
1011 return status;
1012
1013 status = ath6kl_upload_patch(ar);
1014 if (status)
1015 return status;
1016
1017 /* Restore system sleep */
1018 address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
1019 status = ath6kl_bmi_reg_write(ar, address, sleep);
1020 if (status)
1021 return status;
1022
1023 address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
1024 param = options | 0x20;
1025 status = ath6kl_bmi_reg_write(ar, address, param);
1026 if (status)
1027 return status;
1028
1029 /* Configure GPIO AR6003 UART */
1030 param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
1031 status = ath6kl_bmi_write(ar,
1032 ath6kl_get_hi_item_addr(ar,
1033 HI_ITEM(hi_dbg_uart_txpin)),
1034 (u8 *)&param, 4);
1035
1036 return status;
1037}
1038
1039static int ath6kl_init(struct net_device *dev)
1040{
1041 struct ath6kl *ar = ath6kl_priv(dev);
1042 int status = 0;
1043 s32 timeleft;
1044
1045 if (!ar)
1046 return -EIO;
1047
1048 /* Do we need to finish the BMI phase */
1049 if (ath6kl_bmi_done(ar)) {
1050 status = -EIO;
1051 goto ath6kl_init_done;
1052 }
1053
1054 /* Indicate that WMI is enabled (although not ready yet) */
1055 set_bit(WMI_ENABLED, &ar->flag);
1056 ar->wmi = ath6kl_wmi_init(ar);
1057 if (!ar->wmi) {
1058 ath6kl_err("failed to initialize wmi\n");
1059 status = -EIO;
1060 goto ath6kl_init_done;
1061 }
1062
1063 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
1064
1065 wlan_node_table_init(&ar->scan_table);
1066
1067 /*
1068 * The reason we have to wait for the target here is that the
1069 * driver layer has to init BMI in order to set the host block
1070 * size.
1071 */
1072 if (htc_wait_target(ar->htc_target)) {
1073 status = -EIO;
1074 goto err_node_cleanup;
1075 }
1076
1077 if (ath6kl_init_service_ep(ar)) {
1078 status = -EIO;
1079 goto err_cleanup_scatter;
1080 }
1081
1082 /* setup access class priority mappings */
1083 ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
1084 ar->ac_stream_pri_map[WMM_AC_BE] = 1;
1085 ar->ac_stream_pri_map[WMM_AC_VI] = 2;
1086 ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
1087
1088 /* give our connected endpoints some buffers */
1089 ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
1090 ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
1091
1092 /* allocate some buffers that handle larger AMSDU frames */
1093 ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
1094
1095 /* setup credit distribution */
1096 ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
1097
1098 ath6kl_cookie_init(ar);
1099
1100 /* start HTC */
1101 status = htc_start(ar->htc_target);
1102
1103 if (status) {
1104 ath6kl_cookie_cleanup(ar);
1105 goto err_rxbuf_cleanup;
1106 }
1107
1108 /* Wait for Wmi event to be ready */
1109 timeleft = wait_event_interruptible_timeout(ar->event_wq,
1110 test_bit(WMI_READY,
1111 &ar->flag),
1112 WMI_TIMEOUT);
1113
1114 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
1115 ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
1116 ATH6KL_ABI_VERSION, ar->version.abi_ver);
1117 status = -EIO;
1118 goto err_htc_stop;
1119 }
1120
1121 if (!timeleft || signal_pending(current)) {
1122 ath6kl_err("wmi is not ready or wait was interrupted\n");
1123 status = -EIO;
1124 goto err_htc_stop;
1125 }
1126
1127 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
1128
1129 /* communicate the wmi protocol verision to the target */
1130 if ((ath6kl_set_host_app_area(ar)) != 0)
1131 ath6kl_err("unable to set the host app area\n");
1132
1133 ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
1134 ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
1135
1136 status = ath6kl_target_config_wlan_params(ar);
1137 if (!status)
1138 goto ath6kl_init_done;
1139
1140err_htc_stop:
1141 htc_stop(ar->htc_target);
1142err_rxbuf_cleanup:
1143 htc_flush_rx_buf(ar->htc_target);
1144 ath6kl_cleanup_amsdu_rxbufs(ar);
1145err_cleanup_scatter:
1146 ath6kl_hif_cleanup_scatter(ar);
1147err_node_cleanup:
1148 wlan_node_table_cleanup(&ar->scan_table);
1149 ath6kl_wmi_shutdown(ar->wmi);
1150 clear_bit(WMI_ENABLED, &ar->flag);
1151 ar->wmi = NULL;
1152
1153ath6kl_init_done:
1154 return status;
1155}
1156
1157int ath6kl_core_init(struct ath6kl *ar)
1158{
1159 int ret = 0;
1160 struct ath6kl_bmi_target_info targ_info;
1161
1162 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
1163 if (!ar->ath6kl_wq)
1164 return -ENOMEM;
1165
1166 ret = ath6kl_bmi_init(ar);
1167 if (ret)
1168 goto err_wq;
1169
1170 ret = ath6kl_bmi_get_target_info(ar, &targ_info);
1171 if (ret)
1172 goto err_bmi_cleanup;
1173
1174 ar->version.target_ver = le32_to_cpu(targ_info.version);
1175 ar->target_type = le32_to_cpu(targ_info.type);
1176 ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
1177
1178 ret = ath6kl_configure_target(ar);
1179 if (ret)
1180 goto err_bmi_cleanup;
1181
1182 ar->htc_target = htc_create(ar);
1183
1184 if (!ar->htc_target) {
1185 ret = -ENOMEM;
1186 goto err_bmi_cleanup;
1187 }
1188
1189 ar->aggr_cntxt = aggr_init(ar->net_dev);
1190 if (!ar->aggr_cntxt) {
1191 ath6kl_err("failed to initialize aggr\n");
1192 ret = -ENOMEM;
1193 goto err_htc_cleanup;
1194 }
1195
1196 ret = ath6kl_init_upload(ar);
1197 if (ret)
1198 goto err_htc_cleanup;
1199
1200 ret = ath6kl_init(ar->net_dev);
1201 if (ret)
1202 goto err_htc_cleanup;
1203
1204 /* This runs the init function if registered */
1205 ret = register_netdev(ar->net_dev);
1206 if (ret) {
1207 ath6kl_err("register_netdev failed\n");
1208 ath6kl_destroy(ar->net_dev, 0);
1209 return ret;
1210 }
1211
1212 set_bit(NETDEV_REGISTERED, &ar->flag);
1213
1214 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
1215 __func__, ar->net_dev->name, ar->net_dev, ar);
1216
1217 return ret;
1218
1219err_htc_cleanup:
1220 htc_cleanup(ar->htc_target);
1221err_bmi_cleanup:
1222 ath6kl_bmi_cleanup(ar);
1223err_wq:
1224 destroy_workqueue(ar->ath6kl_wq);
1225 return ret;
1226}
1227
1228void ath6kl_stop_txrx(struct ath6kl *ar)
1229{
1230 struct net_device *ndev = ar->net_dev;
1231
1232 if (!ndev)
1233 return;
1234
1235 set_bit(DESTROY_IN_PROGRESS, &ar->flag);
1236
1237 if (down_interruptible(&ar->sem)) {
1238 ath6kl_err("down_interruptible failed\n");
1239 return;
1240 }
1241
1242 if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
1243 ath6kl_stop_endpoint(ndev, false, true);
1244
1245 clear_bit(WLAN_ENABLED, &ar->flag);
1246}
1247
1248/*
1249 * We need to differentiate between the surprise and planned removal of the
1250 * device because of the following consideration:
1251 *
1252 * - In case of surprise removal, the hcd already frees up the pending
1253 * for the device and hence there is no need to unregister the function
1254 * driver inorder to get these requests. For planned removal, the function
1255 * driver has to explicitly unregister itself to have the hcd return all the
1256 * pending requests before the data structures for the devices are freed up.
1257 * Note that as per the current implementation, the function driver will
1258 * end up releasing all the devices since there is no API to selectively
1259 * release a particular device.
1260 *
1261 * - Certain commands issued to the target can be skipped for surprise
1262 * removal since they will anyway not go through.
1263 */
1264void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
1265{
1266 struct ath6kl *ar;
1267
1268 if (!dev || !ath6kl_priv(dev)) {
1269 ath6kl_err("failed to get device structure\n");
1270 return;
1271 }
1272
1273 ar = ath6kl_priv(dev);
1274
1275 destroy_workqueue(ar->ath6kl_wq);
1276
1277 if (ar->htc_target)
1278 htc_cleanup(ar->htc_target);
1279
1280 aggr_module_destroy(ar->aggr_cntxt);
1281
1282 ath6kl_cookie_cleanup(ar);
1283
1284 ath6kl_cleanup_amsdu_rxbufs(ar);
1285
1286 ath6kl_bmi_cleanup(ar);
1287
1288 if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
1289 unregister_netdev(dev);
1290 clear_bit(NETDEV_REGISTERED, &ar->flag);
1291 }
1292
1293 free_netdev(dev);
1294
1295 wlan_node_table_cleanup(&ar->scan_table);
1296
1297 kfree(ar->fw_board);
1298 kfree(ar->fw_otp);
1299 kfree(ar->fw);
1300 kfree(ar->fw_patch);
1301
1302 ath6kl_cfg80211_deinit(ar);
1303}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
new file mode 100644
index 000000000000..284e3e96ff3e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -0,0 +1,1337 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hif-ops.h"
19#include "cfg80211.h"
20#include "target.h"
21#include "debug.h"
22
23struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
24{
25 struct ath6kl_sta *conn = NULL;
26 u8 i, max_conn;
27
28 max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
29
30 for (i = 0; i < max_conn; i++) {
31 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
32 conn = &ar->sta_list[i];
33 break;
34 }
35 }
36
37 return conn;
38}
39
40struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
41{
42 struct ath6kl_sta *conn = NULL;
43 u8 ctr;
44
45 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
46 if (ar->sta_list[ctr].aid == aid) {
47 conn = &ar->sta_list[ctr];
48 break;
49 }
50 }
51 return conn;
52}
53
54static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
55 u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
56{
57 struct ath6kl_sta *sta;
58 u8 free_slot;
59
60 free_slot = aid - 1;
61
62 sta = &ar->sta_list[free_slot];
63 memcpy(sta->mac, mac, ETH_ALEN);
64 memcpy(sta->wpa_ie, wpaie, ielen);
65 sta->aid = aid;
66 sta->keymgmt = keymgmt;
67 sta->ucipher = ucipher;
68 sta->auth = auth;
69
70 ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
71 ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
72}
73
74static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
75{
76 struct ath6kl_sta *sta = &ar->sta_list[i];
77
78 /* empty the queued pkts in the PS queue if any */
79 spin_lock_bh(&sta->psq_lock);
80 skb_queue_purge(&sta->psq);
81 spin_unlock_bh(&sta->psq_lock);
82
83 memset(&ar->ap_stats.sta[sta->aid - 1], 0,
84 sizeof(struct wmi_per_sta_stat));
85 memset(sta->mac, 0, ETH_ALEN);
86 memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
87 sta->aid = 0;
88 sta->sta_flags = 0;
89
90 ar->sta_list_index = ar->sta_list_index & ~(1 << i);
91
92}
93
94static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
95{
96 u8 i, removed = 0;
97
98 if (is_zero_ether_addr(mac))
99 return removed;
100
101 if (is_broadcast_ether_addr(mac)) {
102 ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
103
104 for (i = 0; i < AP_MAX_NUM_STA; i++) {
105 if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
106 ath6kl_sta_cleanup(ar, i);
107 removed = 1;
108 }
109 }
110 } else {
111 for (i = 0; i < AP_MAX_NUM_STA; i++) {
112 if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
113 ath6kl_dbg(ATH6KL_DBG_TRC,
114 "deleting station %pM aid=%d reason=%d\n",
115 mac, ar->sta_list[i].aid, reason);
116 ath6kl_sta_cleanup(ar, i);
117 removed = 1;
118 break;
119 }
120 }
121 }
122
123 return removed;
124}
125
126enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
127{
128 struct ath6kl *ar = devt;
129 return ar->ac2ep_map[ac];
130}
131
132struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
133{
134 struct ath6kl_cookie *cookie;
135
136 cookie = ar->cookie_list;
137 if (cookie != NULL) {
138 ar->cookie_list = cookie->arc_list_next;
139 ar->cookie_count--;
140 }
141
142 return cookie;
143}
144
145void ath6kl_cookie_init(struct ath6kl *ar)
146{
147 u32 i;
148
149 ar->cookie_list = NULL;
150 ar->cookie_count = 0;
151
152 memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
153
154 for (i = 0; i < MAX_COOKIE_NUM; i++)
155 ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
156}
157
158void ath6kl_cookie_cleanup(struct ath6kl *ar)
159{
160 ar->cookie_list = NULL;
161 ar->cookie_count = 0;
162}
163
164void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
165{
166 /* Insert first */
167
168 if (!ar || !cookie)
169 return;
170
171 cookie->arc_list_next = ar->cookie_list;
172 ar->cookie_list = cookie;
173 ar->cookie_count++;
174}
175
176/* set the window address register (using 4-byte register access ). */
177static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
178{
179 int status;
180 u8 addr_val[4];
181 s32 i;
182
183 /*
184 * Write bytes 1,2,3 of the register to set the upper address bytes,
185 * the LSB is written last to initiate the access cycle
186 */
187
188 for (i = 1; i <= 3; i++) {
189 /*
190 * Fill the buffer with the address byte value we want to
191 * hit 4 times.
192 */
193 memset(addr_val, ((u8 *)&addr)[i], 4);
194
195 /*
196 * Hit each byte of the register address with a 4-byte
197 * write operation to the same address, this is a harmless
198 * operation.
199 */
200 status = hif_read_write_sync(ar, reg_addr + i, addr_val,
201 4, HIF_WR_SYNC_BYTE_FIX);
202 if (status)
203 break;
204 }
205
206 if (status) {
207 ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
208 addr, reg_addr);
209 return status;
210 }
211
212 /*
213 * Write the address register again, this time write the whole
214 * 4-byte value. The effect here is that the LSB write causes the
215 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
216 * effect since we are writing the same values again
217 */
218 status = hif_read_write_sync(ar, reg_addr, (u8 *)(&addr),
219 4, HIF_WR_SYNC_BYTE_INC);
220
221 if (status) {
222 ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
223 addr, reg_addr);
224 return status;
225 }
226
227 return 0;
228}
229
230/*
231 * Read from the ATH6KL through its diagnostic window. No cooperation from
232 * the Target is required for this.
233 */
234int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
235{
236 int status;
237
238 /* set window register to start read cycle */
239 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
240 *address);
241
242 if (status)
243 return status;
244
245 /* read the data */
246 status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
247 sizeof(u32), HIF_RD_SYNC_BYTE_INC);
248 if (status) {
249 ath6kl_err("failed to read from window data addr\n");
250 return status;
251 }
252
253 return status;
254}
255
256
257/*
258 * Write to the ATH6KL through its diagnostic window. No cooperation from
259 * the Target is required for this.
260 */
261static int ath6kl_write_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
262{
263 int status;
264
265 /* set write data */
266 status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
267 sizeof(u32), HIF_WR_SYNC_BYTE_INC);
268 if (status) {
269 ath6kl_err("failed to write 0x%x to window data addr\n", *data);
270 return status;
271 }
272
273 /* set window register, which starts the write cycle */
274 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
275 *address);
276}
277
278int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
279 u8 *data, u32 length, bool read)
280{
281 u32 count;
282 int status = 0;
283
284 for (count = 0; count < length; count += 4, address += 4) {
285 if (read) {
286 status = ath6kl_read_reg_diag(ar, &address,
287 (u32 *) &data[count]);
288 if (status)
289 break;
290 } else {
291 status = ath6kl_write_reg_diag(ar, &address,
292 (u32 *) &data[count]);
293 if (status)
294 break;
295 }
296 }
297
298 return status;
299}
300
301static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
302 bool wait_fot_compltn, bool cold_reset)
303{
304 int status = 0;
305 u32 address;
306 u32 data;
307
308 if (target_type != TARGET_TYPE_AR6003)
309 return;
310
311 data = cold_reset ? RESET_CONTROL_COLD_RST : RESET_CONTROL_MBOX_RST;
312
313 address = RTC_BASE_ADDRESS;
314 status = ath6kl_write_reg_diag(ar, &address, &data);
315
316 if (status)
317 ath6kl_err("failed to reset target\n");
318}
319
320void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
321 bool get_dbglogs)
322{
323 struct ath6kl *ar = ath6kl_priv(dev);
324 static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
325 bool discon_issued;
326
327 netif_stop_queue(dev);
328
329 /* disable the target and the interrupts associated with it */
330 if (test_bit(WMI_READY, &ar->flag)) {
331 discon_issued = (test_bit(CONNECTED, &ar->flag) ||
332 test_bit(CONNECT_PEND, &ar->flag));
333 ath6kl_disconnect(ar);
334 if (!keep_profile)
335 ath6kl_init_profile_info(ar);
336
337 del_timer(&ar->disconnect_timer);
338
339 clear_bit(WMI_READY, &ar->flag);
340 ath6kl_wmi_shutdown(ar->wmi);
341 clear_bit(WMI_ENABLED, &ar->flag);
342 ar->wmi = NULL;
343
344 /*
345 * After wmi_shudown all WMI events will be dropped. We
346 * need to cleanup the buffers allocated in AP mode and
347 * give disconnect notification to stack, which usually
348 * happens in the disconnect_event. Simulate the disconnect
349 * event by calling the function directly. Sometimes
350 * disconnect_event will be received when the debug logs
351 * are collected.
352 */
353 if (discon_issued)
354 ath6kl_disconnect_event(ar, DISCONNECT_CMD,
355 (ar->nw_type & AP_NETWORK) ?
356 bcast_mac : ar->bssid,
357 0, NULL, 0);
358
359 ar->user_key_ctrl = 0;
360
361 } else {
362 ath6kl_dbg(ATH6KL_DBG_TRC,
363 "%s: wmi is not ready 0x%p 0x%p\n",
364 __func__, ar, ar->wmi);
365
366 /* Shut down WMI if we have started it */
367 if (test_bit(WMI_ENABLED, &ar->flag)) {
368 ath6kl_dbg(ATH6KL_DBG_TRC,
369 "%s: shut down wmi\n", __func__);
370 ath6kl_wmi_shutdown(ar->wmi);
371 clear_bit(WMI_ENABLED, &ar->flag);
372 ar->wmi = NULL;
373 }
374 }
375
376 if (ar->htc_target) {
377 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
378 htc_stop(ar->htc_target);
379 }
380
381 /*
382 * Try to reset the device if we can. The driver may have been
383 * configure NOT to reset the target during a debug session.
384 */
385 ath6kl_dbg(ATH6KL_DBG_TRC,
386 "attempting to reset target on instance destroy\n");
387 ath6kl_reset_device(ar, ar->target_type, true, true);
388}
389
390static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
391{
392 u8 index;
393 u8 keyusage;
394
395 for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
396 if (ar->wep_key_list[index].key_len) {
397 keyusage = GROUP_USAGE;
398 if (index == ar->def_txkey_index)
399 keyusage |= TX_USAGE;
400
401 ath6kl_wmi_addkey_cmd(ar->wmi,
402 index,
403 WEP_CRYPT,
404 keyusage,
405 ar->wep_key_list[index].key_len,
406 NULL,
407 ar->wep_key_list[index].key,
408 KEY_OP_INIT_VAL, NULL,
409 NO_SYNC_WMIFLAG);
410 }
411 }
412}
413
414static void ath6kl_connect_ap_mode(struct ath6kl *ar, u16 channel, u8 *bssid,
415 u16 listen_int, u16 beacon_int,
416 u8 assoc_resp_len, u8 *assoc_info)
417{
418 struct net_device *dev = ar->net_dev;
419 struct station_info sinfo;
420 struct ath6kl_req_key *ik;
421 enum crypto_type keyType = NONE_CRYPT;
422
423 if (memcmp(dev->dev_addr, bssid, ETH_ALEN) == 0) {
424 ik = &ar->ap_mode_bkey;
425
426 switch (ar->auth_mode) {
427 case NONE_AUTH:
428 if (ar->prwise_crypto == WEP_CRYPT)
429 ath6kl_install_static_wep_keys(ar);
430 break;
431 case WPA_PSK_AUTH:
432 case WPA2_PSK_AUTH:
433 case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
434 switch (ik->ik_type) {
435 case ATH6KL_CIPHER_TKIP:
436 keyType = TKIP_CRYPT;
437 break;
438 case ATH6KL_CIPHER_AES_CCM:
439 keyType = AES_CRYPT;
440 break;
441 default:
442 goto skip_key;
443 }
444 ath6kl_wmi_addkey_cmd(ar->wmi, ik->ik_keyix, keyType,
445 GROUP_USAGE, ik->ik_keylen,
446 (u8 *)&ik->ik_keyrsc,
447 ik->ik_keydata,
448 KEY_OP_INIT_VAL, ik->ik_macaddr,
449 SYNC_BOTH_WMIFLAG);
450 break;
451 }
452skip_key:
453 set_bit(CONNECTED, &ar->flag);
454 return;
455 }
456
457 ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n",
458 bssid, channel);
459
460 ath6kl_add_new_sta(ar, bssid, channel, assoc_info, assoc_resp_len,
461 listen_int & 0xFF, beacon_int,
462 (listen_int >> 8) & 0xFF);
463
464 /* send event to application */
465 memset(&sinfo, 0, sizeof(sinfo));
466
467 /* TODO: sinfo.generation */
468 /* TODO: need to deliver (Re)AssocReq IEs somehow.. change in
469 * cfg80211 needed, e.g., by adding those into sinfo
470 */
471 cfg80211_new_sta(ar->net_dev, bssid, &sinfo, GFP_KERNEL);
472
473 netif_wake_queue(ar->net_dev);
474
475 return;
476}
477
478/* Functions for Tx credit handling */
479void ath6k_credit_init(struct htc_credit_state_info *cred_info,
480 struct list_head *ep_list,
481 int tot_credits)
482{
483 struct htc_endpoint_credit_dist *cur_ep_dist;
484 int count;
485
486 cred_info->cur_free_credits = tot_credits;
487 cred_info->total_avail_credits = tot_credits;
488
489 list_for_each_entry(cur_ep_dist, ep_list, list) {
490 if (cur_ep_dist->endpoint == ENDPOINT_0)
491 continue;
492
493 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
494
495 if (tot_credits > 4)
496 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
497 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
498 ath6kl_deposit_credit_to_ep(cred_info,
499 cur_ep_dist,
500 cur_ep_dist->cred_min);
501 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
502 }
503
504 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
505 ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
506 cur_ep_dist->cred_min);
507 /*
508 * Control service is always marked active, it
509 * never goes inactive EVER.
510 */
511 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
512 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
513 /* this is the lowest priority data endpoint */
514 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
515
516 /*
517 * Streams have to be created (explicit | implicit) for all
518 * kinds of traffic. BE endpoints are also inactive in the
519 * beginning. When BE traffic starts it creates implicit
520 * streams that redistributes credits.
521 *
522 * Note: all other endpoints have minimums set but are
523 * initially given NO credits. credits will be distributed
524 * as traffic activity demands
525 */
526 }
527
528 WARN_ON(cred_info->cur_free_credits <= 0);
529
530 list_for_each_entry(cur_ep_dist, ep_list, list) {
531 if (cur_ep_dist->endpoint == ENDPOINT_0)
532 continue;
533
534 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
535 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
536 else {
537 /*
538 * For the remaining data endpoints, we assume that
539 * each cred_per_msg are the same. We use a simple
540 * calculation here, we take the remaining credits
541 * and determine how many max messages this can
542 * cover and then set each endpoint's normal value
543 * equal to 3/4 this amount.
544 */
545 count = (cred_info->cur_free_credits /
546 cur_ep_dist->cred_per_msg)
547 * cur_ep_dist->cred_per_msg;
548 count = (count * 3) >> 2;
549 count = max(count, cur_ep_dist->cred_per_msg);
550 cur_ep_dist->cred_norm = count;
551
552 }
553 }
554}
555
556/* initialize and setup credit distribution */
557int ath6k_setup_credit_dist(void *htc_handle,
558 struct htc_credit_state_info *cred_info)
559{
560 u16 servicepriority[5];
561
562 memset(cred_info, 0, sizeof(struct htc_credit_state_info));
563
564 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
565 servicepriority[1] = WMI_DATA_VO_SVC;
566 servicepriority[2] = WMI_DATA_VI_SVC;
567 servicepriority[3] = WMI_DATA_BE_SVC;
568 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
569
570 /* set priority list */
571 htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
572
573 return 0;
574}
575
576/* reduce an ep's credits back to a set limit */
577static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
578 struct htc_endpoint_credit_dist *ep_dist,
579 int limit)
580{
581 int credits;
582
583 ep_dist->cred_assngd = limit;
584
585 if (ep_dist->credits <= limit)
586 return;
587
588 credits = ep_dist->credits - limit;
589 ep_dist->credits -= credits;
590 cred_info->cur_free_credits += credits;
591}
592
593static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
594 struct list_head *epdist_list)
595{
596 struct htc_endpoint_credit_dist *cur_dist_list;
597
598 list_for_each_entry(cur_dist_list, epdist_list, list) {
599 if (cur_dist_list->endpoint == ENDPOINT_0)
600 continue;
601
602 if (cur_dist_list->cred_to_dist > 0) {
603 cur_dist_list->credits +=
604 cur_dist_list->cred_to_dist;
605 cur_dist_list->cred_to_dist = 0;
606 if (cur_dist_list->credits >
607 cur_dist_list->cred_assngd)
608 ath6k_reduce_credits(cred_info,
609 cur_dist_list,
610 cur_dist_list->cred_assngd);
611
612 if (cur_dist_list->credits >
613 cur_dist_list->cred_norm)
614 ath6k_reduce_credits(cred_info, cur_dist_list,
615 cur_dist_list->cred_norm);
616
617 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
618 if (cur_dist_list->txq_depth == 0)
619 ath6k_reduce_credits(cred_info,
620 cur_dist_list, 0);
621 }
622 }
623 }
624}
625
626/*
627 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
628 * question.
629 */
630void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
631 struct htc_endpoint_credit_dist *ep_dist)
632{
633 struct htc_endpoint_credit_dist *curdist_list;
634 int credits = 0;
635 int need;
636
637 if (ep_dist->svc_id == WMI_CONTROL_SVC)
638 goto out;
639
640 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
641 (ep_dist->svc_id == WMI_DATA_VO_SVC))
642 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
643 goto out;
644
645 /*
646 * For all other services, we follow a simple algorithm of:
647 *
648 * 1. checking the free pool for credits
649 * 2. checking lower priority endpoints for credits to take
650 */
651
652 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
653
654 if (credits >= ep_dist->seek_cred)
655 goto out;
656
657 /*
658 * We don't have enough in the free pool, try taking away from
659 * lower priority services The rule for taking away credits:
660 *
661 * 1. Only take from lower priority endpoints
662 * 2. Only take what is allocated above the minimum (never
663 * starve an endpoint completely)
664 * 3. Only take what you need.
665 */
666
667 list_for_each_entry_reverse(curdist_list,
668 &cred_info->lowestpri_ep_dist,
669 list) {
670 if (curdist_list == ep_dist)
671 break;
672
673 need = ep_dist->seek_cred - cred_info->cur_free_credits;
674
675 if ((curdist_list->cred_assngd - need) >=
676 curdist_list->cred_min) {
677 /*
678 * The current one has been allocated more than
679 * it's minimum and it has enough credits assigned
680 * above it's minimum to fulfill our need try to
681 * take away just enough to fulfill our need.
682 */
683 ath6k_reduce_credits(cred_info, curdist_list,
684 curdist_list->cred_assngd - need);
685
686 if (cred_info->cur_free_credits >=
687 ep_dist->seek_cred)
688 break;
689 }
690
691 if (curdist_list->endpoint == ENDPOINT_0)
692 break;
693 }
694
695 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
696
697out:
698 /* did we find some credits? */
699 if (credits)
700 ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
701
702 ep_dist->seek_cred = 0;
703}
704
705/* redistribute credits based on activity change */
706static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
707 struct list_head *ep_dist_list)
708{
709 struct htc_endpoint_credit_dist *curdist_list;
710
711 list_for_each_entry(curdist_list, ep_dist_list, list) {
712 if (curdist_list->endpoint == ENDPOINT_0)
713 continue;
714
715 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
716 (curdist_list->svc_id == WMI_DATA_BE_SVC))
717 curdist_list->dist_flags |= HTC_EP_ACTIVE;
718
719 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
720 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
721 if (curdist_list->txq_depth == 0)
722 ath6k_reduce_credits(info,
723 curdist_list, 0);
724 else
725 ath6k_reduce_credits(info,
726 curdist_list,
727 curdist_list->cred_min);
728 }
729 }
730}
731
732/*
733 *
734 * This function is invoked whenever endpoints require credit
735 * distributions. A lock is held while this function is invoked, this
736 * function shall NOT block. The ep_dist_list is a list of distribution
737 * structures in prioritized order as defined by the call to the
738 * htc_set_credit_dist() api.
739 */
740void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
741 struct list_head *ep_dist_list,
742 enum htc_credit_dist_reason reason)
743{
744 switch (reason) {
745 case HTC_CREDIT_DIST_SEND_COMPLETE:
746 ath6k_credit_update(cred_info, ep_dist_list);
747 break;
748 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
749 ath6k_redistribute_credits(cred_info, ep_dist_list);
750 break;
751 default:
752 break;
753 }
754
755 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
756 WARN_ON(cred_info->cur_free_credits < 0);
757}
758
759void disconnect_timer_handler(unsigned long ptr)
760{
761 struct net_device *dev = (struct net_device *)ptr;
762 struct ath6kl *ar = ath6kl_priv(dev);
763
764 ath6kl_init_profile_info(ar);
765 ath6kl_disconnect(ar);
766}
767
768void ath6kl_disconnect(struct ath6kl *ar)
769{
770 if (test_bit(CONNECTED, &ar->flag) ||
771 test_bit(CONNECT_PEND, &ar->flag)) {
772 ath6kl_wmi_disconnect_cmd(ar->wmi);
773 /*
774 * Disconnect command is issued, clear the connect pending
775 * flag. The connected flag will be cleared in
776 * disconnect event notification.
777 */
778 clear_bit(CONNECT_PEND, &ar->flag);
779 }
780}
781
782/* WMI Event handlers */
783
784static const char *get_hw_id_string(u32 id)
785{
786 switch (id) {
787 case AR6003_REV1_VERSION:
788 return "1.0";
789 case AR6003_REV2_VERSION:
790 return "2.0";
791 case AR6003_REV3_VERSION:
792 return "2.1.1";
793 default:
794 return "unknown";
795 }
796}
797
798void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
799{
800 struct ath6kl *ar = devt;
801 struct net_device *dev = ar->net_dev;
802
803 memcpy(dev->dev_addr, datap, ETH_ALEN);
804 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
805 __func__, dev->dev_addr);
806
807 ar->version.wlan_ver = sw_ver;
808 ar->version.abi_ver = abi_ver;
809
810 snprintf(ar->wdev->wiphy->fw_version,
811 sizeof(ar->wdev->wiphy->fw_version),
812 "%u.%u.%u.%u",
813 (ar->version.wlan_ver & 0xf0000000) >> 28,
814 (ar->version.wlan_ver & 0x0f000000) >> 24,
815 (ar->version.wlan_ver & 0x00ff0000) >> 16,
816 (ar->version.wlan_ver & 0x0000ffff));
817
818 /* indicate to the waiting thread that the ready event was received */
819 set_bit(WMI_READY, &ar->flag);
820 wake_up(&ar->event_wq);
821
822 ath6kl_info("hw %s fw %s\n",
823 get_hw_id_string(ar->wdev->wiphy->hw_version),
824 ar->wdev->wiphy->fw_version);
825}
826
827void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
828{
829 ath6kl_cfg80211_scan_complete_event(ar, status);
830
831 if (!ar->usr_bss_filter)
832 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
833
834 ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
835}
836
837void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
838 u16 listen_int, u16 beacon_int,
839 enum network_type net_type, u8 beacon_ie_len,
840 u8 assoc_req_len, u8 assoc_resp_len,
841 u8 *assoc_info)
842{
843 unsigned long flags;
844
845 if (ar->nw_type == AP_NETWORK) {
846 ath6kl_connect_ap_mode(ar, channel, bssid, listen_int,
847 beacon_int, assoc_resp_len,
848 assoc_info);
849 return;
850 }
851
852 ath6kl_cfg80211_connect_event(ar, channel, bssid,
853 listen_int, beacon_int,
854 net_type, beacon_ie_len,
855 assoc_req_len, assoc_resp_len,
856 assoc_info);
857
858 memcpy(ar->bssid, bssid, sizeof(ar->bssid));
859 ar->bss_ch = channel;
860
861 if ((ar->nw_type == INFRA_NETWORK))
862 ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
863 ar->listen_intvl_b);
864
865 netif_wake_queue(ar->net_dev);
866
867 /* Update connect & link status atomically */
868 spin_lock_irqsave(&ar->lock, flags);
869 set_bit(CONNECTED, &ar->flag);
870 clear_bit(CONNECT_PEND, &ar->flag);
871 netif_carrier_on(ar->net_dev);
872 spin_unlock_irqrestore(&ar->lock, flags);
873
874 aggr_reset_state(ar->aggr_cntxt);
875 ar->reconnect_flag = 0;
876
877 if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
878 memset(ar->node_map, 0, sizeof(ar->node_map));
879 ar->node_num = 0;
880 ar->next_ep_id = ENDPOINT_2;
881 }
882
883 if (!ar->usr_bss_filter)
884 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
885}
886
887void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
888{
889 struct ath6kl_sta *sta;
890 u8 tsc[6];
891 /*
892 * For AP case, keyid will have aid of STA which sent pkt with
893 * MIC error. Use this aid to get MAC & send it to hostapd.
894 */
895 if (ar->nw_type == AP_NETWORK) {
896 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
897 if (!sta)
898 return;
899
900 ath6kl_dbg(ATH6KL_DBG_TRC,
901 "ap tkip mic error received from aid=%d\n", keyid);
902
903 memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
904 cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
905 NL80211_KEYTYPE_PAIRWISE, keyid,
906 tsc, GFP_KERNEL);
907 } else
908 ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
909
910}
911
912static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
913{
914 struct wmi_target_stats *tgt_stats =
915 (struct wmi_target_stats *) ptr;
916 struct target_stats *stats = &ar->target_stats;
917 struct tkip_ccmp_stats *ccmp_stats;
918 struct bss *conn_bss = NULL;
919 struct cserv_stats *c_stats;
920 u8 ac;
921
922 if (len < sizeof(*tgt_stats))
923 return;
924
925 /* update the RSSI of the connected bss */
926 if (test_bit(CONNECTED, &ar->flag)) {
927 conn_bss = ath6kl_wmi_find_node(ar->wmi, ar->bssid);
928 if (conn_bss) {
929 c_stats = &tgt_stats->cserv_stats;
930 conn_bss->ni_rssi =
931 a_sle16_to_cpu(c_stats->cs_ave_beacon_rssi);
932 conn_bss->ni_snr =
933 tgt_stats->cserv_stats.cs_ave_beacon_snr;
934 ath6kl_wmi_node_return(ar->wmi, conn_bss);
935 }
936 }
937
938 ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
939
940 stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
941 stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
942 stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
943 stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
944 stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
945 stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
946 stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
947 stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
948 stats->tx_rts_success_cnt +=
949 le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
950
951 for (ac = 0; ac < WMM_NUM_AC; ac++)
952 stats->tx_pkt_per_ac[ac] +=
953 le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
954
955 stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
956 stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
957 stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
958 stats->tx_mult_retry_cnt +=
959 le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
960 stats->tx_rts_fail_cnt +=
961 le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
962 stats->tx_ucast_rate =
963 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
964
965 stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
966 stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
967 stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
968 stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
969 stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
970 stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
971 stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
972 stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
973 stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
974 stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
975 stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
976 stats->rx_key_cache_miss +=
977 le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
978 stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
979 stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
980 stats->rx_ucast_rate =
981 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
982
983 ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
984
985 stats->tkip_local_mic_fail +=
986 le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
987 stats->tkip_cnter_measures_invoked +=
988 le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
989 stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
990
991 stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
992 stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
993
994 stats->pwr_save_fail_cnt +=
995 le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
996 stats->noise_floor_calib =
997 a_sle32_to_cpu(tgt_stats->noise_floor_calib);
998
999 stats->cs_bmiss_cnt +=
1000 le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
1001 stats->cs_low_rssi_cnt +=
1002 le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
1003 stats->cs_connect_cnt +=
1004 le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
1005 stats->cs_discon_cnt +=
1006 le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
1007
1008 stats->cs_ave_beacon_rssi =
1009 a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
1010
1011 stats->cs_last_roam_msec =
1012 tgt_stats->cserv_stats.cs_last_roam_msec;
1013 stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
1014 stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
1015
1016 stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
1017
1018 stats->wow_pkt_dropped +=
1019 le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
1020 stats->wow_host_pkt_wakeups +=
1021 tgt_stats->wow_stats.wow_host_pkt_wakeups;
1022 stats->wow_host_evt_wakeups +=
1023 tgt_stats->wow_stats.wow_host_evt_wakeups;
1024 stats->wow_evt_discarded +=
1025 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
1026
1027 if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
1028 clear_bit(STATS_UPDATE_PEND, &ar->flag);
1029 wake_up(&ar->event_wq);
1030 }
1031}
1032
1033static void ath6kl_add_le32(__le32 *var, __le32 val)
1034{
1035 *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
1036}
1037
1038void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
1039{
1040 struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
1041 struct wmi_ap_mode_stat *ap = &ar->ap_stats;
1042 struct wmi_per_sta_stat *st_ap, *st_p;
1043 u8 ac;
1044
1045 if (ar->nw_type == AP_NETWORK) {
1046 if (len < sizeof(*p))
1047 return;
1048
1049 for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
1050 st_ap = &ap->sta[ac];
1051 st_p = &p->sta[ac];
1052
1053 ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
1054 ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
1055 ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
1056 ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
1057 ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
1058 ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
1059 ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
1060 ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
1061 }
1062
1063 } else {
1064 ath6kl_update_target_stats(ar, ptr, len);
1065 }
1066}
1067
1068void ath6kl_wakeup_event(void *dev)
1069{
1070 struct ath6kl *ar = (struct ath6kl *) dev;
1071
1072 wake_up(&ar->event_wq);
1073}
1074
1075void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
1076{
1077 struct ath6kl *ar = (struct ath6kl *) devt;
1078
1079 ar->tx_pwr = tx_pwr;
1080 wake_up(&ar->event_wq);
1081}
1082
1083void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
1084{
1085 struct ath6kl_sta *conn;
1086 struct sk_buff *skb;
1087 bool psq_empty = false;
1088
1089 conn = ath6kl_find_sta_by_aid(ar, aid);
1090
1091 if (!conn)
1092 return;
1093 /*
1094 * Send out a packet queued on ps queue. When the ps queue
1095 * becomes empty update the PVB for this station.
1096 */
1097 spin_lock_bh(&conn->psq_lock);
1098 psq_empty = skb_queue_empty(&conn->psq);
1099 spin_unlock_bh(&conn->psq_lock);
1100
1101 if (psq_empty)
1102 /* TODO: Send out a NULL data frame */
1103 return;
1104
1105 spin_lock_bh(&conn->psq_lock);
1106 skb = skb_dequeue(&conn->psq);
1107 spin_unlock_bh(&conn->psq_lock);
1108
1109 conn->sta_flags |= STA_PS_POLLED;
1110 ath6kl_data_tx(skb, ar->net_dev);
1111 conn->sta_flags &= ~STA_PS_POLLED;
1112
1113 spin_lock_bh(&conn->psq_lock);
1114 psq_empty = skb_queue_empty(&conn->psq);
1115 spin_unlock_bh(&conn->psq_lock);
1116
1117 if (psq_empty)
1118 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1119}
1120
1121void ath6kl_dtimexpiry_event(struct ath6kl *ar)
1122{
1123 bool mcastq_empty = false;
1124 struct sk_buff *skb;
1125
1126 /*
1127 * If there are no associated STAs, ignore the DTIM expiry event.
1128 * There can be potential race conditions where the last associated
1129 * STA may disconnect & before the host could clear the 'Indicate
1130 * DTIM' request to the firmware, the firmware would have just
1131 * indicated a DTIM expiry event. The race is between 'clear DTIM
1132 * expiry cmd' going from the host to the firmware & the DTIM
1133 * expiry event happening from the firmware to the host.
1134 */
1135 if (!ar->sta_list_index)
1136 return;
1137
1138 spin_lock_bh(&ar->mcastpsq_lock);
1139 mcastq_empty = skb_queue_empty(&ar->mcastpsq);
1140 spin_unlock_bh(&ar->mcastpsq_lock);
1141
1142 if (mcastq_empty)
1143 return;
1144
1145 /* set the STA flag to dtim_expired for the frame to go out */
1146 set_bit(DTIM_EXPIRED, &ar->flag);
1147
1148 spin_lock_bh(&ar->mcastpsq_lock);
1149 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
1150 spin_unlock_bh(&ar->mcastpsq_lock);
1151
1152 ath6kl_data_tx(skb, ar->net_dev);
1153
1154 spin_lock_bh(&ar->mcastpsq_lock);
1155 }
1156 spin_unlock_bh(&ar->mcastpsq_lock);
1157
1158 clear_bit(DTIM_EXPIRED, &ar->flag);
1159
1160 /* clear the LSB of the BitMapCtl field of the TIM IE */
1161 ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
1162}
1163
1164void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
1165 u8 assoc_resp_len, u8 *assoc_info,
1166 u16 prot_reason_status)
1167{
1168 struct bss *wmi_ssid_node = NULL;
1169 unsigned long flags;
1170
1171 if (ar->nw_type == AP_NETWORK) {
1172 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
1173 return;
1174
1175 /* if no more associated STAs, empty the mcast PS q */
1176 if (ar->sta_list_index == 0) {
1177 spin_lock_bh(&ar->mcastpsq_lock);
1178 skb_queue_purge(&ar->mcastpsq);
1179 spin_unlock_bh(&ar->mcastpsq_lock);
1180
1181 /* clear the LSB of the TIM IE's BitMapCtl field */
1182 if (test_bit(WMI_READY, &ar->flag))
1183 ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
1184 }
1185
1186 if (!is_broadcast_ether_addr(bssid)) {
1187 /* send event to application */
1188 cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
1189 }
1190
1191 clear_bit(CONNECTED, &ar->flag);
1192 return;
1193 }
1194
1195 ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
1196 assoc_resp_len, assoc_info,
1197 prot_reason_status);
1198
1199 aggr_reset_state(ar->aggr_cntxt);
1200
1201 del_timer(&ar->disconnect_timer);
1202
1203 ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
1204 "disconnect reason is %d\n", reason);
1205
1206 /*
1207 * If the event is due to disconnect cmd from the host, only they
1208 * the target would stop trying to connect. Under any other
1209 * condition, target would keep trying to connect.
1210 */
1211 if (reason == DISCONNECT_CMD) {
1212 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
1213 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
1214 } else {
1215 set_bit(CONNECT_PEND, &ar->flag);
1216 if (((reason == ASSOC_FAILED) &&
1217 (prot_reason_status == 0x11)) ||
1218 ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
1219 && (ar->reconnect_flag == 1))) {
1220 set_bit(CONNECTED, &ar->flag);
1221 return;
1222 }
1223 }
1224
1225 if ((reason == NO_NETWORK_AVAIL) && test_bit(WMI_READY, &ar->flag)) {
1226 ath6kl_wmi_node_free(ar->wmi, bssid);
1227
1228 /*
1229 * In case any other same SSID nodes are present remove it,
1230 * since those nodes also not available now.
1231 */
1232 do {
1233 /*
1234 * Find the nodes based on SSID and remove it
1235 *
1236 * Note: This case will not work out for
1237 * Hidden-SSID
1238 */
1239 wmi_ssid_node = ath6kl_wmi_find_ssid_node(ar->wmi,
1240 ar->ssid,
1241 ar->ssid_len,
1242 false,
1243 true);
1244
1245 if (wmi_ssid_node)
1246 ath6kl_wmi_node_free(ar->wmi,
1247 wmi_ssid_node->ni_macaddr);
1248
1249 } while (wmi_ssid_node);
1250 }
1251
1252 /* update connect & link status atomically */
1253 spin_lock_irqsave(&ar->lock, flags);
1254 clear_bit(CONNECTED, &ar->flag);
1255 netif_carrier_off(ar->net_dev);
1256 spin_unlock_irqrestore(&ar->lock, flags);
1257
1258 if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
1259 ar->reconnect_flag = 0;
1260
1261 if (reason != CSERV_DISCONNECT)
1262 ar->user_key_ctrl = 0;
1263
1264 netif_stop_queue(ar->net_dev);
1265 memset(ar->bssid, 0, sizeof(ar->bssid));
1266 ar->bss_ch = 0;
1267
1268 ath6kl_tx_data_cleanup(ar);
1269}
1270
1271static int ath6kl_open(struct net_device *dev)
1272{
1273 struct ath6kl *ar = ath6kl_priv(dev);
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&ar->lock, flags);
1277
1278 set_bit(WLAN_ENABLED, &ar->flag);
1279
1280 if (test_bit(CONNECTED, &ar->flag)) {
1281 netif_carrier_on(dev);
1282 netif_wake_queue(dev);
1283 } else
1284 netif_carrier_off(dev);
1285
1286 spin_unlock_irqrestore(&ar->lock, flags);
1287
1288 return 0;
1289}
1290
1291static int ath6kl_close(struct net_device *dev)
1292{
1293 struct ath6kl *ar = ath6kl_priv(dev);
1294
1295 netif_stop_queue(dev);
1296
1297 ath6kl_disconnect(ar);
1298
1299 if (test_bit(WMI_READY, &ar->flag)) {
1300 if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
1301 0, 0, 0))
1302 return -EIO;
1303
1304 clear_bit(WLAN_ENABLED, &ar->flag);
1305 }
1306
1307 ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
1308
1309 return 0;
1310}
1311
1312static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
1313{
1314 struct ath6kl *ar = ath6kl_priv(dev);
1315
1316 return &ar->net_stats;
1317}
1318
1319static struct net_device_ops ath6kl_netdev_ops = {
1320 .ndo_open = ath6kl_open,
1321 .ndo_stop = ath6kl_close,
1322 .ndo_start_xmit = ath6kl_data_tx,
1323 .ndo_get_stats = ath6kl_get_stats,
1324};
1325
1326void init_netdev(struct net_device *dev)
1327{
1328 dev->netdev_ops = &ath6kl_netdev_ops;
1329 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1330
1331 dev->needed_headroom = ETH_HLEN;
1332 dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
1333 sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
1334 + WMI_MAX_TX_META_SZ;
1335
1336 return;
1337}
diff --git a/drivers/net/wireless/ath/ath6kl/node.c b/drivers/net/wireless/ath/ath6kl/node.c
new file mode 100644
index 000000000000..131205c610b9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/node.c
@@ -0,0 +1,234 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18#include "wmi.h"
19#include "debug.h"
20
21struct bss *wlan_node_alloc(int wh_size)
22{
23 struct bss *ni;
24
25 ni = kzalloc(sizeof(struct bss), GFP_ATOMIC);
26
27 if ((ni != NULL) && wh_size) {
28 ni->ni_buf = kmalloc(wh_size, GFP_ATOMIC);
29 if (ni->ni_buf == NULL) {
30 kfree(ni);
31 return NULL;
32 }
33 }
34
35 return ni;
36}
37
38void wlan_node_free(struct bss *ni)
39{
40 kfree(ni->ni_buf);
41 kfree(ni);
42}
43
44void wlan_setup_node(struct ath6kl_node_table *nt, struct bss *ni,
45 const u8 *mac_addr)
46{
47 int hash;
48
49 memcpy(ni->ni_macaddr, mac_addr, ETH_ALEN);
50 hash = ATH6KL_NODE_HASH(mac_addr);
51 ni->ni_refcnt = 1;
52
53 ni->ni_tstamp = jiffies_to_msecs(jiffies);
54 ni->ni_actcnt = WLAN_NODE_INACT_CNT;
55
56 spin_lock_bh(&nt->nt_nodelock);
57
58 /* insert at the end of the node list */
59 ni->ni_list_next = NULL;
60 ni->ni_list_prev = nt->nt_node_last;
61 if (nt->nt_node_last != NULL)
62 nt->nt_node_last->ni_list_next = ni;
63
64 nt->nt_node_last = ni;
65 if (nt->nt_node_first == NULL)
66 nt->nt_node_first = ni;
67
68 /* insert into the hash list */
69 ni->ni_hash_next = nt->nt_hash[hash];
70 if (ni->ni_hash_next != NULL)
71 nt->nt_hash[hash]->ni_hash_prev = ni;
72
73 ni->ni_hash_prev = NULL;
74 nt->nt_hash[hash] = ni;
75
76 spin_unlock_bh(&nt->nt_nodelock);
77}
78
79struct bss *wlan_find_node(struct ath6kl_node_table *nt,
80 const u8 *mac_addr)
81{
82 struct bss *ni, *found_ni = NULL;
83 int hash;
84
85 spin_lock_bh(&nt->nt_nodelock);
86
87 hash = ATH6KL_NODE_HASH(mac_addr);
88 for (ni = nt->nt_hash[hash]; ni; ni = ni->ni_hash_next) {
89 if (memcmp(ni->ni_macaddr, mac_addr, ETH_ALEN) == 0) {
90 ni->ni_refcnt++;
91 found_ni = ni;
92 break;
93 }
94 }
95
96 spin_unlock_bh(&nt->nt_nodelock);
97
98 return found_ni;
99}
100
101void wlan_node_reclaim(struct ath6kl_node_table *nt, struct bss *ni)
102{
103 int hash;
104
105 spin_lock_bh(&nt->nt_nodelock);
106
107 if (ni->ni_list_prev == NULL)
108 /* fix list head */
109 nt->nt_node_first = ni->ni_list_next;
110 else
111 ni->ni_list_prev->ni_list_next = ni->ni_list_next;
112
113 if (ni->ni_list_next == NULL)
114 /* fix list tail */
115 nt->nt_node_last = ni->ni_list_prev;
116 else
117 ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
118
119 if (ni->ni_hash_prev == NULL) {
120 /* first in list so fix the list head */
121 hash = ATH6KL_NODE_HASH(ni->ni_macaddr);
122 nt->nt_hash[hash] = ni->ni_hash_next;
123 } else {
124 ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
125 }
126
127 if (ni->ni_hash_next != NULL)
128 ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
129
130 wlan_node_free(ni);
131
132 spin_unlock_bh(&nt->nt_nodelock);
133}
134
135static void wlan_node_dec_free(struct bss *ni)
136{
137 if ((ni->ni_refcnt--) == 1)
138 wlan_node_free(ni);
139}
140
141void wlan_free_allnodes(struct ath6kl_node_table *nt)
142{
143 struct bss *ni;
144
145 while ((ni = nt->nt_node_first) != NULL)
146 wlan_node_reclaim(nt, ni);
147}
148
149void wlan_iterate_nodes(struct ath6kl_node_table *nt, void *arg)
150{
151 struct bss *ni;
152
153 spin_lock_bh(&nt->nt_nodelock);
154 for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
155 ni->ni_refcnt++;
156 ath6kl_cfg80211_scan_node(arg, ni);
157 wlan_node_dec_free(ni);
158 }
159 spin_unlock_bh(&nt->nt_nodelock);
160}
161
162void wlan_node_table_init(struct ath6kl_node_table *nt)
163{
164 ath6kl_dbg(ATH6KL_DBG_WLAN_NODE, "node table = 0x%lx\n",
165 (unsigned long)nt);
166
167 memset(nt, 0, sizeof(struct ath6kl_node_table));
168
169 spin_lock_init(&nt->nt_nodelock);
170
171 nt->nt_node_age = WLAN_NODE_INACT_TIMEOUT_MSEC;
172}
173
174void wlan_refresh_inactive_nodes(struct ath6kl *ar)
175{
176 struct ath6kl_node_table *nt = &ar->scan_table;
177 struct bss *bss;
178 u32 now;
179
180 now = jiffies_to_msecs(jiffies);
181 bss = nt->nt_node_first;
182 while (bss != NULL) {
183 /* refresh all nodes except the current bss */
184 if (memcmp(ar->bssid, bss->ni_macaddr, ETH_ALEN) != 0) {
185 if (((now - bss->ni_tstamp) > nt->nt_node_age)
186 || --bss->ni_actcnt == 0) {
187 wlan_node_reclaim(nt, bss);
188 }
189 }
190 bss = bss->ni_list_next;
191 }
192}
193
194void wlan_node_table_cleanup(struct ath6kl_node_table *nt)
195{
196 wlan_free_allnodes(nt);
197}
198
199struct bss *wlan_find_ssid_node(struct ath6kl_node_table *nt, u8 * ssid,
200 u32 ssid_len, bool is_wpa2, bool match_ssid)
201{
202 struct bss *ni, *found_ni = NULL;
203 u8 *ie_ssid;
204
205 spin_lock_bh(&nt->nt_nodelock);
206
207 for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
208
209 ie_ssid = ni->ni_cie.ie_ssid;
210
211 if ((ie_ssid[1] <= IEEE80211_MAX_SSID_LEN) &&
212 (memcmp(ssid, &ie_ssid[2], ssid_len) == 0)) {
213
214 if (match_ssid ||
215 (is_wpa2 && ni->ni_cie.ie_rsn != NULL) ||
216 (!is_wpa2 && ni->ni_cie.ie_wpa != NULL)) {
217 ni->ni_refcnt++;
218 found_ni = ni;
219 break;
220 }
221 }
222 }
223
224 spin_unlock_bh(&nt->nt_nodelock);
225
226 return found_ni;
227}
228
229void wlan_node_return(struct ath6kl_node_table *nt, struct bss *ni)
230{
231 spin_lock_bh(&nt->nt_nodelock);
232 wlan_node_dec_free(ni);
233 spin_unlock_bh(&nt->nt_nodelock);
234}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
new file mode 100644
index 000000000000..34171604cbe4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -0,0 +1,912 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/mmc/card.h>
18#include <linux/mmc/mmc.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/sdio_func.h>
21#include <linux/mmc/sdio_ids.h>
22#include <linux/mmc/sdio.h>
23#include <linux/mmc/sd.h>
24#include "htc_hif.h"
25#include "hif-ops.h"
26#include "target.h"
27#include "debug.h"
28
29struct ath6kl_sdio {
30 struct sdio_func *func;
31
32 spinlock_t lock;
33
34 /* free list */
35 struct list_head bus_req_freeq;
36
37 /* available bus requests */
38 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
39
40 struct ath6kl *ar;
41 u8 *dma_buffer;
42
43 /* scatter request list head */
44 struct list_head scat_req;
45
46 spinlock_t scat_lock;
47 bool is_disabled;
48 atomic_t irq_handling;
49 const struct sdio_device_id *id;
50 struct work_struct wr_async_work;
51 struct list_head wr_asyncq;
52 spinlock_t wr_async_lock;
53};
54
55#define CMD53_ARG_READ 0
56#define CMD53_ARG_WRITE 1
57#define CMD53_ARG_BLOCK_BASIS 1
58#define CMD53_ARG_FIXED_ADDRESS 0
59#define CMD53_ARG_INCR_ADDRESS 1
60
61static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
62{
63 return ar->hif_priv;
64}
65
66/*
67 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
68 * Most host controllers assume the buffer is DMA'able and will
69 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
70 * check fails on stack memory.
71 */
72static inline bool buf_needs_bounce(u8 *buf)
73{
74 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
75}
76
77static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
78{
79 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
80
81 /* EP1 has an extended range */
82 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
83 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
84 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
85 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
86 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
87 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
88}
89
90static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
91 u8 mode, u8 opcode, u32 addr,
92 u16 blksz)
93{
94 *arg = (((rw & 1) << 31) |
95 ((func & 0x7) << 28) |
96 ((mode & 1) << 27) |
97 ((opcode & 1) << 26) |
98 ((addr & 0x1FFFF) << 9) |
99 (blksz & 0x1FF));
100}
101
102static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
103 unsigned int address,
104 unsigned char val)
105{
106 const u8 func = 0;
107
108 *arg = ((write & 1) << 31) |
109 ((func & 0x7) << 28) |
110 ((raw & 1) << 27) |
111 (1 << 26) |
112 ((address & 0x1FFFF) << 9) |
113 (1 << 8) |
114 (val & 0xFF);
115}
116
117static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
118 unsigned int address,
119 unsigned char byte)
120{
121 struct mmc_command io_cmd;
122
123 memset(&io_cmd, 0, sizeof(io_cmd));
124 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
125 io_cmd.opcode = SD_IO_RW_DIRECT;
126 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
127
128 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
129}
130
131static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
132 u8 *buf, u32 len)
133{
134 int ret = 0;
135
136 if (request & HIF_WRITE) {
137 if (addr >= HIF_MBOX_BASE_ADDR &&
138 addr <= HIF_MBOX_END_ADDR)
139 addr += (HIF_MBOX_WIDTH - len);
140
141 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
142 addr += HIF_MBOX0_EXT_WIDTH - len;
143
144 if (request & HIF_FIXED_ADDRESS)
145 ret = sdio_writesb(func, addr, buf, len);
146 else
147 ret = sdio_memcpy_toio(func, addr, buf, len);
148 } else {
149 if (request & HIF_FIXED_ADDRESS)
150 ret = sdio_readsb(func, buf, addr, len);
151 else
152 ret = sdio_memcpy_fromio(func, buf, addr, len);
153 }
154
155 return ret;
156}
157
158static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
159{
160 struct bus_request *bus_req;
161 unsigned long flag;
162
163 spin_lock_irqsave(&ar_sdio->lock, flag);
164
165 if (list_empty(&ar_sdio->bus_req_freeq)) {
166 spin_unlock_irqrestore(&ar_sdio->lock, flag);
167 return NULL;
168 }
169
170 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
171 struct bus_request, list);
172 list_del(&bus_req->list);
173
174 spin_unlock_irqrestore(&ar_sdio->lock, flag);
175 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
176
177 return bus_req;
178}
179
180static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
181 struct bus_request *bus_req)
182{
183 unsigned long flag;
184
185 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
186
187 spin_lock_irqsave(&ar_sdio->lock, flag);
188 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
189 spin_unlock_irqrestore(&ar_sdio->lock, flag);
190}
191
192static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
193 struct mmc_data *data)
194{
195 struct scatterlist *sg;
196 int i;
197
198 data->blksz = HIF_MBOX_BLOCK_SIZE;
199 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
200
201 ath6kl_dbg(ATH6KL_DBG_SCATTER,
202 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
203 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
204 data->blksz, data->blocks, scat_req->len,
205 scat_req->scat_entries);
206
207 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
208 MMC_DATA_READ;
209
210 /* fill SG entries */
211 sg = scat_req->sgentries;
212 sg_init_table(sg, scat_req->scat_entries);
213
214 /* assemble SG list */
215 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
216 if ((unsigned long)scat_req->scat_list[i].buf & 0x3)
217 /*
218 * Some scatter engines can handle unaligned
219 * buffers, print this as informational only.
220 */
221 ath6kl_dbg(ATH6KL_DBG_SCATTER,
222 "(%s) scatter buffer is unaligned 0x%p\n",
223 scat_req->req & HIF_WRITE ? "WR" : "RD",
224 scat_req->scat_list[i].buf);
225
226 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
227 i, scat_req->scat_list[i].buf,
228 scat_req->scat_list[i].len);
229
230 sg_set_buf(sg, scat_req->scat_list[i].buf,
231 scat_req->scat_list[i].len);
232 }
233
234 /* set scatter-gather table for request */
235 data->sg = scat_req->sgentries;
236 data->sg_len = scat_req->scat_entries;
237}
238
239static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
240 struct bus_request *req)
241{
242 struct mmc_request mmc_req;
243 struct mmc_command cmd;
244 struct mmc_data data;
245 struct hif_scatter_req *scat_req;
246 u8 opcode, rw;
247 int status, len;
248
249 scat_req = req->scat_req;
250
251 if (scat_req->virt_scat) {
252 len = scat_req->len;
253 if (scat_req->req & HIF_BLOCK_BASIS)
254 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
255
256 status = ath6kl_sdio_io(ar_sdio->func, scat_req->req,
257 scat_req->addr, scat_req->virt_dma_buf,
258 len);
259 goto scat_complete;
260 }
261
262 memset(&mmc_req, 0, sizeof(struct mmc_request));
263 memset(&cmd, 0, sizeof(struct mmc_command));
264 memset(&data, 0, sizeof(struct mmc_data));
265
266 ath6kl_sdio_setup_scat_data(scat_req, &data);
267
268 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
269 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
270
271 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
272
273 /* Fixup the address so that the last byte will fall on MBOX EOM */
274 if (scat_req->req & HIF_WRITE) {
275 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
276 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
277 else
278 /* Uses extended address range */
279 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
280 }
281
282 /* set command argument */
283 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
284 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
285 data.blocks);
286
287 cmd.opcode = SD_IO_RW_EXTENDED;
288 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
289
290 mmc_req.cmd = &cmd;
291 mmc_req.data = &data;
292
293 mmc_set_data_timeout(&data, ar_sdio->func->card);
294 /* synchronous call to process request */
295 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
296
297 status = cmd.error ? cmd.error : data.error;
298
299scat_complete:
300 scat_req->status = status;
301
302 if (scat_req->status)
303 ath6kl_err("Scatter write request failed:%d\n",
304 scat_req->status);
305
306 if (scat_req->req & HIF_ASYNCHRONOUS)
307 scat_req->complete(ar_sdio->ar->htc_target, scat_req);
308
309 return status;
310}
311
312static int ath6kl_sdio_alloc_prep_scat_req(struct ath6kl_sdio *ar_sdio,
313 int n_scat_entry, int n_scat_req,
314 bool virt_scat)
315{
316 struct hif_scatter_req *s_req;
317 struct bus_request *bus_req;
318 int i, scat_req_sz, scat_list_sz, sg_sz, buf_sz;
319 u8 *virt_buf;
320
321 scat_list_sz = (n_scat_entry - 1) * sizeof(struct hif_scatter_item);
322 scat_req_sz = sizeof(*s_req) + scat_list_sz;
323
324 if (!virt_scat)
325 sg_sz = sizeof(struct scatterlist) * n_scat_entry;
326 else
327 buf_sz = 2 * L1_CACHE_BYTES +
328 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
329
330 for (i = 0; i < n_scat_req; i++) {
331 /* allocate the scatter request */
332 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
333 if (!s_req)
334 return -ENOMEM;
335
336 if (virt_scat) {
337 virt_buf = kzalloc(buf_sz, GFP_KERNEL);
338 if (!virt_buf) {
339 kfree(s_req);
340 return -ENOMEM;
341 }
342
343 s_req->virt_dma_buf =
344 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_buf);
345 } else {
346 /* allocate sglist */
347 s_req->sgentries = kzalloc(sg_sz, GFP_KERNEL);
348
349 if (!s_req->sgentries) {
350 kfree(s_req);
351 return -ENOMEM;
352 }
353 }
354
355 /* allocate a bus request for this scatter request */
356 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
357 if (!bus_req) {
358 kfree(s_req->sgentries);
359 kfree(s_req->virt_dma_buf);
360 kfree(s_req);
361 return -ENOMEM;
362 }
363
364 /* assign the scatter request to this bus request */
365 bus_req->scat_req = s_req;
366 s_req->busrequest = bus_req;
367
368 s_req->virt_scat = virt_scat;
369
370 /* add it to the scatter pool */
371 hif_scatter_req_add(ar_sdio->ar, s_req);
372 }
373
374 return 0;
375}
376
377static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
378 u32 len, u32 request)
379{
380 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
381 u8 *tbuf = NULL;
382 int ret;
383 bool bounced = false;
384
385 if (request & HIF_BLOCK_BASIS)
386 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
387
388 if (buf_needs_bounce(buf)) {
389 if (!ar_sdio->dma_buffer)
390 return -ENOMEM;
391 tbuf = ar_sdio->dma_buffer;
392 memcpy(tbuf, buf, len);
393 bounced = true;
394 } else
395 tbuf = buf;
396
397 sdio_claim_host(ar_sdio->func);
398 ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
399 if ((request & HIF_READ) && bounced)
400 memcpy(buf, tbuf, len);
401 sdio_release_host(ar_sdio->func);
402
403 return ret;
404}
405
406static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
407 struct bus_request *req)
408{
409 if (req->scat_req)
410 ath6kl_sdio_scat_rw(ar_sdio, req);
411 else {
412 void *context;
413 int status;
414
415 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
416 req->buffer, req->length,
417 req->request);
418 context = req->packet;
419 ath6kl_sdio_free_bus_req(ar_sdio, req);
420 ath6kldev_rw_comp_handler(context, status);
421 }
422}
423
424static void ath6kl_sdio_write_async_work(struct work_struct *work)
425{
426 struct ath6kl_sdio *ar_sdio;
427 unsigned long flags;
428 struct bus_request *req, *tmp_req;
429
430 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
431 sdio_claim_host(ar_sdio->func);
432
433 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
434 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
435 list_del(&req->list);
436 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
437 __ath6kl_sdio_write_async(ar_sdio, req);
438 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
439 }
440 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
441
442 sdio_release_host(ar_sdio->func);
443}
444
445static void ath6kl_sdio_irq_handler(struct sdio_func *func)
446{
447 int status;
448 struct ath6kl_sdio *ar_sdio;
449
450 ar_sdio = sdio_get_drvdata(func);
451 atomic_set(&ar_sdio->irq_handling, 1);
452
453 /*
454 * Release the host during interrups so we can pick it back up when
455 * we process commands.
456 */
457 sdio_release_host(ar_sdio->func);
458
459 status = ath6kldev_intr_bh_handler(ar_sdio->ar);
460 sdio_claim_host(ar_sdio->func);
461 atomic_set(&ar_sdio->irq_handling, 0);
462 WARN_ON(status && status != -ECANCELED);
463}
464
465static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
466{
467 struct sdio_func *func = ar_sdio->func;
468 int ret = 0;
469
470 if (!ar_sdio->is_disabled)
471 return 0;
472
473 sdio_claim_host(func);
474
475 ret = sdio_enable_func(func);
476 if (ret) {
477 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
478 sdio_release_host(func);
479 return ret;
480 }
481
482 sdio_release_host(func);
483
484 /*
485 * Wait for hardware to initialise. It should take a lot less than
486 * 10 ms but let's be conservative here.
487 */
488 msleep(10);
489
490 ar_sdio->is_disabled = false;
491
492 return ret;
493}
494
495static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
496{
497 int ret;
498
499 if (ar_sdio->is_disabled)
500 return 0;
501
502 /* Disable the card */
503 sdio_claim_host(ar_sdio->func);
504 ret = sdio_disable_func(ar_sdio->func);
505 sdio_release_host(ar_sdio->func);
506
507 if (ret)
508 return ret;
509
510 ar_sdio->is_disabled = true;
511
512 return ret;
513}
514
515static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
516 u32 length, u32 request,
517 struct htc_packet *packet)
518{
519 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
520 struct bus_request *bus_req;
521 unsigned long flags;
522
523 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
524
525 if (!bus_req)
526 return -ENOMEM;
527
528 bus_req->address = address;
529 bus_req->buffer = buffer;
530 bus_req->length = length;
531 bus_req->request = request;
532 bus_req->packet = packet;
533
534 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
535 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
536 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
537 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
538
539 return 0;
540}
541
542static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
543{
544 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
545 int ret;
546
547 sdio_claim_host(ar_sdio->func);
548
549 /* Register the isr */
550 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
551 if (ret)
552 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
553
554 sdio_release_host(ar_sdio->func);
555}
556
557static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
558{
559 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
560 int ret;
561
562 sdio_claim_host(ar_sdio->func);
563
564 /* Mask our function IRQ */
565 while (atomic_read(&ar_sdio->irq_handling)) {
566 sdio_release_host(ar_sdio->func);
567 schedule_timeout(HZ / 10);
568 sdio_claim_host(ar_sdio->func);
569 }
570
571 ret = sdio_release_irq(ar_sdio->func);
572 if (ret)
573 ath6kl_err("Failed to release sdio irq: %d\n", ret);
574
575 sdio_release_host(ar_sdio->func);
576}
577
578static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
579{
580 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
581 struct hif_scatter_req *node = NULL;
582 unsigned long flag;
583
584 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
585
586 if (!list_empty(&ar_sdio->scat_req)) {
587 node = list_first_entry(&ar_sdio->scat_req,
588 struct hif_scatter_req, list);
589 list_del(&node->list);
590 }
591
592 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
593
594 return node;
595}
596
597static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
598 struct hif_scatter_req *s_req)
599{
600 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
601 unsigned long flag;
602
603 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
604
605 list_add_tail(&s_req->list, &ar_sdio->scat_req);
606
607 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
608
609}
610
611/* scatter gather read write request */
612static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
613 struct hif_scatter_req *scat_req)
614{
615 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
616 u32 request = scat_req->req;
617 int status = 0;
618 unsigned long flags;
619
620 if (!scat_req->len)
621 return -EINVAL;
622
623 ath6kl_dbg(ATH6KL_DBG_SCATTER,
624 "hif-scatter: total len: %d scatter entries: %d\n",
625 scat_req->len, scat_req->scat_entries);
626
627 if (request & HIF_SYNCHRONOUS) {
628 sdio_claim_host(ar_sdio->func);
629 status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
630 sdio_release_host(ar_sdio->func);
631 } else {
632 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
633 list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
634 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
635 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
636 }
637
638 return status;
639}
640
641/* clean up scatter support */
642static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
643{
644 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
645 struct hif_scatter_req *s_req, *tmp_req;
646 unsigned long flag;
647
648 /* empty the free list */
649 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
650 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
651 list_del(&s_req->list);
652 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
653
654 if (s_req->busrequest)
655 ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
656 kfree(s_req->virt_dma_buf);
657 kfree(s_req->sgentries);
658 kfree(s_req);
659
660 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
661 }
662 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
663}
664
665/* setup of HIF scatter resources */
666static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
667{
668 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
669 struct htc_target *target = ar->htc_target;
670 int ret;
671 bool virt_scat = false;
672
673 /* check if host supports scatter and it meets our requirements */
674 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
675 ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
676 ar_sdio->func->card->host->max_segs,
677 MAX_SCATTER_ENTRIES_PER_REQ);
678 virt_scat = true;
679 }
680
681 if (!virt_scat) {
682 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
683 MAX_SCATTER_ENTRIES_PER_REQ,
684 MAX_SCATTER_REQUESTS, virt_scat);
685
686 if (!ret) {
687 ath6kl_dbg(ATH6KL_DBG_ANY,
688 "hif-scatter enabled: max scatter req : %d entries: %d\n",
689 MAX_SCATTER_REQUESTS,
690 MAX_SCATTER_ENTRIES_PER_REQ);
691
692 target->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
693 target->max_xfer_szper_scatreq =
694 MAX_SCATTER_REQ_TRANSFER_SIZE;
695 } else {
696 ath6kl_sdio_cleanup_scatter(ar);
697 ath6kl_warn("hif scatter resource setup failed, trying virtual scatter method\n");
698 }
699 }
700
701 if (virt_scat || ret) {
702 ret = ath6kl_sdio_alloc_prep_scat_req(ar_sdio,
703 ATH6KL_SCATTER_ENTRIES_PER_REQ,
704 ATH6KL_SCATTER_REQS, virt_scat);
705
706 if (ret) {
707 ath6kl_err("failed to alloc virtual scatter resources !\n");
708 ath6kl_sdio_cleanup_scatter(ar);
709 return ret;
710 }
711
712 ath6kl_dbg(ATH6KL_DBG_ANY,
713 "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
714 ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
715
716 target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
717 target->max_xfer_szper_scatreq =
718 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
719 }
720
721 return 0;
722}
723
724static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
725 .read_write_sync = ath6kl_sdio_read_write_sync,
726 .write_async = ath6kl_sdio_write_async,
727 .irq_enable = ath6kl_sdio_irq_enable,
728 .irq_disable = ath6kl_sdio_irq_disable,
729 .scatter_req_get = ath6kl_sdio_scatter_req_get,
730 .scatter_req_add = ath6kl_sdio_scatter_req_add,
731 .enable_scatter = ath6kl_sdio_enable_scatter,
732 .scat_req_rw = ath6kl_sdio_async_rw_scatter,
733 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
734};
735
736static int ath6kl_sdio_probe(struct sdio_func *func,
737 const struct sdio_device_id *id)
738{
739 int ret;
740 struct ath6kl_sdio *ar_sdio;
741 struct ath6kl *ar;
742 int count;
743
744 ath6kl_dbg(ATH6KL_DBG_TRC,
745 "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
746 __func__, func->num, func->vendor,
747 func->device, func->max_blksize, func->cur_blksize);
748
749 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
750 if (!ar_sdio)
751 return -ENOMEM;
752
753 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
754 if (!ar_sdio->dma_buffer) {
755 ret = -ENOMEM;
756 goto err_hif;
757 }
758
759 ar_sdio->func = func;
760 sdio_set_drvdata(func, ar_sdio);
761
762 ar_sdio->id = id;
763 ar_sdio->is_disabled = true;
764
765 spin_lock_init(&ar_sdio->lock);
766 spin_lock_init(&ar_sdio->scat_lock);
767 spin_lock_init(&ar_sdio->wr_async_lock);
768
769 INIT_LIST_HEAD(&ar_sdio->scat_req);
770 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
771 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
772
773 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
774
775 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
776 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
777
778 ar = ath6kl_core_alloc(&ar_sdio->func->dev);
779 if (!ar) {
780 ath6kl_err("Failed to alloc ath6kl core\n");
781 ret = -ENOMEM;
782 goto err_dma;
783 }
784
785 ar_sdio->ar = ar;
786 ar->hif_priv = ar_sdio;
787 ar->hif_ops = &ath6kl_sdio_ops;
788
789 ath6kl_sdio_set_mbox_info(ar);
790
791 sdio_claim_host(func);
792
793 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
794 MANUFACTURER_ID_AR6003_BASE) {
795 /* enable 4-bit ASYNC interrupt on AR6003 or later */
796 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
797 CCCR_SDIO_IRQ_MODE_REG,
798 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
799 if (ret) {
800 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
801 ret);
802 sdio_release_host(func);
803 goto err_dma;
804 }
805
806 ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
807 }
808
809 /* give us some time to enable, in ms */
810 func->enable_timeout = 100;
811
812 sdio_release_host(func);
813
814 ret = ath6kl_sdio_power_on(ar_sdio);
815 if (ret)
816 goto err_dma;
817
818 sdio_claim_host(func);
819
820 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
821 if (ret) {
822 ath6kl_err("Set sdio block size %d failed: %d)\n",
823 HIF_MBOX_BLOCK_SIZE, ret);
824 sdio_release_host(func);
825 goto err_off;
826 }
827
828 sdio_release_host(func);
829
830 ret = ath6kl_core_init(ar);
831 if (ret) {
832 ath6kl_err("Failed to init ath6kl core\n");
833 goto err_off;
834 }
835
836 return ret;
837
838err_off:
839 ath6kl_sdio_power_off(ar_sdio);
840err_dma:
841 kfree(ar_sdio->dma_buffer);
842err_hif:
843 kfree(ar_sdio);
844
845 return ret;
846}
847
848static void ath6kl_sdio_remove(struct sdio_func *func)
849{
850 struct ath6kl_sdio *ar_sdio;
851
852 ar_sdio = sdio_get_drvdata(func);
853
854 ath6kl_stop_txrx(ar_sdio->ar);
855 cancel_work_sync(&ar_sdio->wr_async_work);
856
857 ath6kl_unavail_ev(ar_sdio->ar);
858
859 ath6kl_sdio_power_off(ar_sdio);
860
861 kfree(ar_sdio->dma_buffer);
862 kfree(ar_sdio);
863}
864
865static const struct sdio_device_id ath6kl_sdio_devices[] = {
866 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
867 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
868 {},
869};
870
871MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
872
873static struct sdio_driver ath6kl_sdio_driver = {
874 .name = "ath6kl_sdio",
875 .id_table = ath6kl_sdio_devices,
876 .probe = ath6kl_sdio_probe,
877 .remove = ath6kl_sdio_remove,
878};
879
880static int __init ath6kl_sdio_init(void)
881{
882 int ret;
883
884 ret = sdio_register_driver(&ath6kl_sdio_driver);
885 if (ret)
886 ath6kl_err("sdio driver registration failed: %d\n", ret);
887
888 return ret;
889}
890
891static void __exit ath6kl_sdio_exit(void)
892{
893 sdio_unregister_driver(&ath6kl_sdio_driver);
894}
895
896module_init(ath6kl_sdio_init);
897module_exit(ath6kl_sdio_exit);
898
899MODULE_AUTHOR("Atheros Communications, Inc.");
900MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
901MODULE_LICENSE("Dual BSD/GPL");
902
903MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
904MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
905MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
906MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
907MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
908MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
909MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
910MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
911MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
912MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
new file mode 100644
index 000000000000..519a013c9991
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -0,0 +1,331 @@
1/*
2 * Copyright (c) 2004-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef TARGET_H
18#define TARGET_H
19
20#define AR6003_BOARD_DATA_SZ 1024
21#define AR6003_BOARD_EXT_DATA_SZ 768
22
23#define RESET_CONTROL_ADDRESS 0x00000000
24#define RESET_CONTROL_COLD_RST 0x00000100
25#define RESET_CONTROL_MBOX_RST 0x00000004
26
27#define CPU_CLOCK_STANDARD_S 0
28#define CPU_CLOCK_STANDARD 0x00000003
29#define CPU_CLOCK_ADDRESS 0x00000020
30
31#define CLOCK_CONTROL_ADDRESS 0x00000028
32#define CLOCK_CONTROL_LF_CLK32_S 2
33#define CLOCK_CONTROL_LF_CLK32 0x00000004
34
35#define SYSTEM_SLEEP_ADDRESS 0x000000c4
36#define SYSTEM_SLEEP_DISABLE_S 0
37#define SYSTEM_SLEEP_DISABLE 0x00000001
38
39#define LPO_CAL_ADDRESS 0x000000e0
40#define LPO_CAL_ENABLE_S 20
41#define LPO_CAL_ENABLE 0x00100000
42
43#define GPIO_PIN10_ADDRESS 0x00000050
44#define GPIO_PIN11_ADDRESS 0x00000054
45#define GPIO_PIN12_ADDRESS 0x00000058
46#define GPIO_PIN13_ADDRESS 0x0000005c
47
48#define HOST_INT_STATUS_ADDRESS 0x00000400
49#define HOST_INT_STATUS_ERROR_S 7
50#define HOST_INT_STATUS_ERROR 0x00000080
51
52#define HOST_INT_STATUS_CPU_S 6
53#define HOST_INT_STATUS_CPU 0x00000040
54
55#define HOST_INT_STATUS_COUNTER_S 4
56#define HOST_INT_STATUS_COUNTER 0x00000010
57
58#define CPU_INT_STATUS_ADDRESS 0x00000401
59
60#define ERROR_INT_STATUS_ADDRESS 0x00000402
61#define ERROR_INT_STATUS_WAKEUP_S 2
62#define ERROR_INT_STATUS_WAKEUP 0x00000004
63
64#define ERROR_INT_STATUS_RX_UNDERFLOW_S 1
65#define ERROR_INT_STATUS_RX_UNDERFLOW 0x00000002
66
67#define ERROR_INT_STATUS_TX_OVERFLOW_S 0
68#define ERROR_INT_STATUS_TX_OVERFLOW 0x00000001
69
70#define COUNTER_INT_STATUS_ADDRESS 0x00000403
71#define COUNTER_INT_STATUS_COUNTER_S 0
72#define COUNTER_INT_STATUS_COUNTER 0x000000ff
73
74#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
75
76#define INT_STATUS_ENABLE_ADDRESS 0x00000418
77#define INT_STATUS_ENABLE_ERROR_S 7
78#define INT_STATUS_ENABLE_ERROR 0x00000080
79
80#define INT_STATUS_ENABLE_CPU_S 6
81#define INT_STATUS_ENABLE_CPU 0x00000040
82
83#define INT_STATUS_ENABLE_INT_S 5
84#define INT_STATUS_ENABLE_INT 0x00000020
85#define INT_STATUS_ENABLE_COUNTER_S 4
86#define INT_STATUS_ENABLE_COUNTER 0x00000010
87
88#define INT_STATUS_ENABLE_MBOX_DATA_S 0
89#define INT_STATUS_ENABLE_MBOX_DATA 0x0000000f
90
91#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
92#define CPU_INT_STATUS_ENABLE_BIT_S 0
93#define CPU_INT_STATUS_ENABLE_BIT 0x000000ff
94
95#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
96#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_S 1
97#define ERROR_STATUS_ENABLE_RX_UNDERFLOW 0x00000002
98
99#define ERROR_STATUS_ENABLE_TX_OVERFLOW_S 0
100#define ERROR_STATUS_ENABLE_TX_OVERFLOW 0x00000001
101
102#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
103#define COUNTER_INT_STATUS_ENABLE_BIT_S 0
104#define COUNTER_INT_STATUS_ENABLE_BIT 0x000000ff
105
106#define COUNT_ADDRESS 0x00000420
107
108#define COUNT_DEC_ADDRESS 0x00000440
109
110#define WINDOW_DATA_ADDRESS 0x00000474
111#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
112#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
113#define CPU_DBG_SEL_ADDRESS 0x00000483
114#define CPU_DBG_ADDRESS 0x00000484
115
116#define LOCAL_SCRATCH_ADDRESS 0x000000c0
117#define ATH6KL_OPTION_SLEEP_DISABLE 0x08
118
119#define RTC_BASE_ADDRESS 0x00004000
120#define GPIO_BASE_ADDRESS 0x00014000
121#define MBOX_BASE_ADDRESS 0x00018000
122#define ANALOG_INTF_BASE_ADDRESS 0x0001c000
123
124/* real name of the register is unknown */
125#define ATH6KL_ANALOG_PLL_REGISTER (ANALOG_INTF_BASE_ADDRESS + 0x284)
126
127#define SM(f, v) (((v) << f##_S) & f)
128#define MS(f, v) (((v) & f) >> f##_S)
129
130/*
131 * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
132 * host_interest structure.
133 *
134 * Host Interest is shared between Host and Target in order to coordinate
135 * between the two, and is intended to remain constant (with additions only
136 * at the end).
137 */
138#define ATH6KL_HI_START_ADDR 0x00540600
139
140/*
141 * These are items that the Host may need to access
142 * via BMI or via the Diagnostic Window. The position
143 * of items in this structure must remain constant.
144 * across firmware revisions!
145 *
146 * Types for each item must be fixed size across target and host platforms.
147 * The structure is used only to calculate offset for each register with
148 * HI_ITEM() macro, no values are stored to it.
149 *
150 * More items may be added at the end.
151 */
152struct host_interest {
153 /*
154 * Pointer to application-defined area, if any.
155 * Set by Target application during startup.
156 */
157 u32 hi_app_host_interest; /* 0x00 */
158
159 /* Pointer to register dump area, valid after Target crash. */
160 u32 hi_failure_state; /* 0x04 */
161
162 /* Pointer to debug logging header */
163 u32 hi_dbglog_hdr; /* 0x08 */
164
165 u32 hi_unused1; /* 0x0c */
166
167 /*
168 * General-purpose flag bits, similar to ATH6KL_OPTION_* flags.
169 * Can be used by application rather than by OS.
170 */
171 u32 hi_option_flag; /* 0x10 */
172
173 /*
174 * Boolean that determines whether or not to
175 * display messages on the serial port.
176 */
177 u32 hi_serial_enable; /* 0x14 */
178
179 /* Start address of DataSet index, if any */
180 u32 hi_dset_list_head; /* 0x18 */
181
182 /* Override Target application start address */
183 u32 hi_app_start; /* 0x1c */
184
185 /* Clock and voltage tuning */
186 u32 hi_skip_clock_init; /* 0x20 */
187 u32 hi_core_clock_setting; /* 0x24 */
188 u32 hi_cpu_clock_setting; /* 0x28 */
189 u32 hi_system_sleep_setting; /* 0x2c */
190 u32 hi_xtal_control_setting; /* 0x30 */
191 u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
192 u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
193 u32 hi_ref_voltage_trim_setting; /* 0x3c */
194 u32 hi_clock_info; /* 0x40 */
195
196 /*
197 * Flash configuration overrides, used only
198 * when firmware is not executing from flash.
199 * (When using flash, modify the global variables
200 * with equivalent names.)
201 */
202 u32 hi_bank0_addr_value; /* 0x44 */
203 u32 hi_bank0_read_value; /* 0x48 */
204 u32 hi_bank0_write_value; /* 0x4c */
205 u32 hi_bank0_config_value; /* 0x50 */
206
207 /* Pointer to Board Data */
208 u32 hi_board_data; /* 0x54 */
209 u32 hi_board_data_initialized; /* 0x58 */
210
211 u32 hi_dset_ram_index_tbl; /* 0x5c */
212
213 u32 hi_desired_baud_rate; /* 0x60 */
214 u32 hi_dbglog_config; /* 0x64 */
215 u32 hi_end_ram_reserve_sz; /* 0x68 */
216 u32 hi_mbox_io_block_sz; /* 0x6c */
217
218 u32 hi_num_bpatch_streams; /* 0x70 -- unused */
219 u32 hi_mbox_isr_yield_limit; /* 0x74 */
220
221 u32 hi_refclk_hz; /* 0x78 */
222 u32 hi_ext_clk_detected; /* 0x7c */
223 u32 hi_dbg_uart_txpin; /* 0x80 */
224 u32 hi_dbg_uart_rxpin; /* 0x84 */
225 u32 hi_hci_uart_baud; /* 0x88 */
226 u32 hi_hci_uart_pin_assignments; /* 0x8C */
227 /*
228 * NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts
229 * pin
230 */
231 u32 hi_hci_uart_baud_scale_val; /* 0x90 */
232 u32 hi_hci_uart_baud_step_val; /* 0x94 */
233
234 u32 hi_allocram_start; /* 0x98 */
235 u32 hi_allocram_sz; /* 0x9c */
236 u32 hi_hci_bridge_flags; /* 0xa0 */
237 u32 hi_hci_uart_support_pins; /* 0xa4 */
238 /*
239 * NOTE: byte [0] = RESET pin (bit 7 is polarity),
240 * bytes[1]..bytes[3] are for future use
241 */
242 u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
243 /*
244 * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
245 * [31:16]: wakeup timeout in ms
246 */
247
248 /* Pointer to extended board data */
249 u32 hi_board_ext_data; /* 0xac */
250 u32 hi_board_ext_data_config; /* 0xb0 */
251
252 /*
253 * Bit [0] : valid
254 * Bit[31:16: size
255 */
256 /*
257 * hi_reset_flag is used to do some stuff when target reset.
258 * such as restore app_start after warm reset or
259 * preserve host Interest area, or preserve ROM data, literals etc.
260 */
261 u32 hi_reset_flag; /* 0xb4 */
262 /* indicate hi_reset_flag is valid */
263 u32 hi_reset_flag_valid; /* 0xb8 */
264 u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
265 /*
266 * 0xbc - [31:0]: idle timeout in ms
267 */
268 /* ACS flags */
269 u32 hi_acs_flags; /* 0xc0 */
270 u32 hi_console_flags; /* 0xc4 */
271 u32 hi_nvram_state; /* 0xc8 */
272 u32 hi_option_flag2; /* 0xcc */
273
274 /* If non-zero, override values sent to Host in WMI_READY event. */
275 u32 hi_sw_version_override; /* 0xd0 */
276 u32 hi_abi_version_override; /* 0xd4 */
277
278 /*
279 * Percentage of high priority RX traffic to total expected RX traffic -
280 * applicable only to ar6004
281 */
282 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
283
284 /* test applications flags */
285 u32 hi_test_apps_related ; /* 0xdc */
286 /* location of test script */
287 u32 hi_ota_testscript; /* 0xe0 */
288 /* location of CAL data */
289 u32 hi_cal_data; /* 0xe4 */
290 /* Number of packet log buffers */
291 u32 hi_pktlog_num_buffers; /* 0xe8 */
292
293} __packed;
294
295#define HI_ITEM(item) offsetof(struct host_interest, item)
296
297#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
298
299#define HI_OPTION_FW_MODE_IBSS 0x0
300#define HI_OPTION_FW_MODE_BSS_STA 0x1
301#define HI_OPTION_FW_MODE_AP 0x2
302
303#define HI_OPTION_NUM_DEV_SHIFT 0x9
304
305#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
306
307/* Fw Mode/SubMode Mask
308|------------------------------------------------------------------------------|
309| SUB | SUB | SUB | SUB | | | |
310| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0|
311| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
312|------------------------------------------------------------------------------|
313*/
314#define HI_OPTION_FW_MODE_SHIFT 0xC
315
316/* Convert a Target virtual address into a Target physical address */
317#define TARG_VTOP(vaddr) (vaddr & 0x001fffff)
318
319#define AR6003_REV2_APP_START_OVERRIDE 0x944C00
320#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
321#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
322#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
323#define AR6003_REV2_RAM_RESERVE_SIZE 6912
324
325#define AR6003_REV3_APP_START_OVERRIDE 0x945d00
326#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
327#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
328#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
329#define AR6003_REV3_RAM_RESERVE_SIZE 512
330
331#endif
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
new file mode 100644
index 000000000000..0cab1c1b6fd1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -0,0 +1,1457 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19
20static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21 u32 *map_no)
22{
23 struct ath6kl *ar = ath6kl_priv(dev);
24 struct ethhdr *eth_hdr;
25 u32 i, ep_map = -1;
26 u8 *datap;
27
28 *map_no = 0;
29 datap = skb->data;
30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
31
32 if (is_multicast_ether_addr(eth_hdr->h_dest))
33 return ENDPOINT_2;
34
35 for (i = 0; i < ar->node_num; i++) {
36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
37 ETH_ALEN) == 0) {
38 *map_no = i + 1;
39 ar->node_map[i].tx_pend++;
40 return ar->node_map[i].ep_id;
41 }
42
43 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
44 ep_map = i;
45 }
46
47 if (ep_map == -1) {
48 ep_map = ar->node_num;
49 ar->node_num++;
50 if (ar->node_num > MAX_NODE_NUM)
51 return ENDPOINT_UNUSED;
52 }
53
54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
55
56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57 if (!ar->tx_pending[i]) {
58 ar->node_map[ep_map].ep_id = i;
59 break;
60 }
61
62 /*
63 * No free endpoint is available, start redistribution on
64 * the inuse endpoints.
65 */
66 if (i == ENDPOINT_5) {
67 ar->node_map[ep_map].ep_id = ar->next_ep_id;
68 ar->next_ep_id++;
69 if (ar->next_ep_id > ENDPOINT_5)
70 ar->next_ep_id = ENDPOINT_2;
71 }
72 }
73
74 *map_no = ep_map + 1;
75 ar->node_map[ep_map].tx_pend++;
76
77 return ar->node_map[ep_map].ep_id;
78}
79
80static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
81 bool *more_data)
82{
83 struct ethhdr *datap = (struct ethhdr *) skb->data;
84 struct ath6kl_sta *conn = NULL;
85 bool ps_queued = false, is_psq_empty = false;
86
87 if (is_multicast_ether_addr(datap->h_dest)) {
88 u8 ctr = 0;
89 bool q_mcast = false;
90
91 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
92 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
93 q_mcast = true;
94 break;
95 }
96 }
97
98 if (q_mcast) {
99 /*
100 * If this transmit is not because of a Dtim Expiry
101 * q it.
102 */
103 if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
104 bool is_mcastq_empty = false;
105
106 spin_lock_bh(&ar->mcastpsq_lock);
107 is_mcastq_empty =
108 skb_queue_empty(&ar->mcastpsq);
109 skb_queue_tail(&ar->mcastpsq, skb);
110 spin_unlock_bh(&ar->mcastpsq_lock);
111
112 /*
113 * If this is the first Mcast pkt getting
114 * queued indicate to the target to set the
115 * BitmapControl LSB of the TIM IE.
116 */
117 if (is_mcastq_empty)
118 ath6kl_wmi_set_pvb_cmd(ar->wmi,
119 MCAST_AID, 1);
120
121 ps_queued = true;
122 } else {
123 /*
124 * This transmit is because of Dtim expiry.
125 * Determine if MoreData bit has to be set.
126 */
127 spin_lock_bh(&ar->mcastpsq_lock);
128 if (!skb_queue_empty(&ar->mcastpsq))
129 *more_data = true;
130 spin_unlock_bh(&ar->mcastpsq_lock);
131 }
132 }
133 } else {
134 conn = ath6kl_find_sta(ar, datap->h_dest);
135 if (!conn) {
136 dev_kfree_skb(skb);
137
138 /* Inform the caller that the skb is consumed */
139 return true;
140 }
141
142 if (conn->sta_flags & STA_PS_SLEEP) {
143 if (!(conn->sta_flags & STA_PS_POLLED)) {
144 /* Queue the frames if the STA is sleeping */
145 spin_lock_bh(&conn->psq_lock);
146 is_psq_empty = skb_queue_empty(&conn->psq);
147 skb_queue_tail(&conn->psq, skb);
148 spin_unlock_bh(&conn->psq_lock);
149
150 /*
151 * If this is the first pkt getting queued
152 * for this STA, update the PVB for this
153 * STA.
154 */
155 if (is_psq_empty)
156 ath6kl_wmi_set_pvb_cmd(ar->wmi,
157 conn->aid, 1);
158
159 ps_queued = true;
160 } else {
161 /*
162 * This tx is because of a PsPoll.
163 * Determine if MoreData bit has to be set.
164 */
165 spin_lock_bh(&conn->psq_lock);
166 if (!skb_queue_empty(&conn->psq))
167 *more_data = true;
168 spin_unlock_bh(&conn->psq_lock);
169 }
170 }
171 }
172
173 return ps_queued;
174}
175
176/* Tx functions */
177
178int ath6kl_control_tx(void *devt, struct sk_buff *skb,
179 enum htc_endpoint_id eid)
180{
181 struct ath6kl *ar = devt;
182 int status = 0;
183 struct ath6kl_cookie *cookie = NULL;
184
185 spin_lock_bh(&ar->lock);
186
187 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
188 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
189 skb, skb->len, eid);
190
191 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
192 /*
193 * Control endpoint is full, don't allocate resources, we
194 * are just going to drop this packet.
195 */
196 cookie = NULL;
197 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
198 skb, skb->len);
199 } else
200 cookie = ath6kl_alloc_cookie(ar);
201
202 if (cookie == NULL) {
203 spin_unlock_bh(&ar->lock);
204 status = -ENOMEM;
205 goto fail_ctrl_tx;
206 }
207
208 ar->tx_pending[eid]++;
209
210 if (eid != ar->ctrl_ep)
211 ar->total_tx_data_pend++;
212
213 spin_unlock_bh(&ar->lock);
214
215 cookie->skb = skb;
216 cookie->map_no = 0;
217 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
218 eid, ATH6KL_CONTROL_PKT_TAG);
219
220 /*
221 * This interface is asynchronous, if there is an error, cleanup
222 * will happen in the TX completion callback.
223 */
224 htc_tx(ar->htc_target, &cookie->htc_pkt);
225
226 return 0;
227
228fail_ctrl_tx:
229 dev_kfree_skb(skb);
230 return status;
231}
232
233int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
234{
235 struct ath6kl *ar = ath6kl_priv(dev);
236 struct ath6kl_cookie *cookie = NULL;
237 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
238 u32 map_no = 0;
239 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
240 u8 ac = 99 ; /* initialize to unmapped ac */
241 bool chk_adhoc_ps_mapping = false, more_data = false;
242 struct wmi_tx_meta_v2 meta_v2;
243 int ret;
244
245 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
246 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
247 skb, skb->data, skb->len);
248
249 /* If target is not associated */
250 if (!test_bit(CONNECTED, &ar->flag)) {
251 dev_kfree_skb(skb);
252 return 0;
253 }
254
255 if (!test_bit(WMI_READY, &ar->flag))
256 goto fail_tx;
257
258 /* AP mode Power saving processing */
259 if (ar->nw_type == AP_NETWORK) {
260 if (ath6kl_powersave_ap(ar, skb, &more_data))
261 return 0;
262 }
263
264 if (test_bit(WMI_ENABLED, &ar->flag)) {
265 memset(&meta_v2, 0, sizeof(meta_v2));
266
267 if (skb_headroom(skb) < dev->needed_headroom) {
268 WARN_ON(1);
269 goto fail_tx;
270 }
271
272 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
273 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
274 goto fail_tx;
275 }
276
277 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
278 more_data, 0, 0, NULL)) {
279 ath6kl_err("wmi_data_hdr_add failed\n");
280 goto fail_tx;
281 }
282
283 if ((ar->nw_type == ADHOC_NETWORK) &&
284 ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
285 chk_adhoc_ps_mapping = true;
286 else {
287 /* get the stream mapping */
288 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
289 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
290 if (ret)
291 goto fail_tx;
292 }
293 } else
294 goto fail_tx;
295
296 spin_lock_bh(&ar->lock);
297
298 if (chk_adhoc_ps_mapping)
299 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
300 else
301 eid = ar->ac2ep_map[ac];
302
303 if (eid == 0 || eid == ENDPOINT_UNUSED) {
304 ath6kl_err("eid %d is not mapped!\n", eid);
305 spin_unlock_bh(&ar->lock);
306 goto fail_tx;
307 }
308
309 /* allocate resource for this packet */
310 cookie = ath6kl_alloc_cookie(ar);
311
312 if (!cookie) {
313 spin_unlock_bh(&ar->lock);
314 goto fail_tx;
315 }
316
317 /* update counts while the lock is held */
318 ar->tx_pending[eid]++;
319 ar->total_tx_data_pend++;
320
321 spin_unlock_bh(&ar->lock);
322
323 cookie->skb = skb;
324 cookie->map_no = map_no;
325 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
326 eid, htc_tag);
327
328 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len);
329
330 /*
331 * HTC interface is asynchronous, if this fails, cleanup will
332 * happen in the ath6kl_tx_complete callback.
333 */
334 htc_tx(ar->htc_target, &cookie->htc_pkt);
335
336 return 0;
337
338fail_tx:
339 dev_kfree_skb(skb);
340
341 ar->net_stats.tx_dropped++;
342 ar->net_stats.tx_aborted_errors++;
343
344 return 0;
345}
346
347/* indicate tx activity or inactivity on a WMI stream */
348void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
349{
350 struct ath6kl *ar = devt;
351 enum htc_endpoint_id eid;
352 int i;
353
354 eid = ar->ac2ep_map[traffic_class];
355
356 if (!test_bit(WMI_ENABLED, &ar->flag))
357 goto notify_htc;
358
359 spin_lock_bh(&ar->lock);
360
361 ar->ac_stream_active[traffic_class] = active;
362
363 if (active) {
364 /*
365 * Keep track of the active stream with the highest
366 * priority.
367 */
368 if (ar->ac_stream_pri_map[traffic_class] >
369 ar->hiac_stream_active_pri)
370 /* set the new highest active priority */
371 ar->hiac_stream_active_pri =
372 ar->ac_stream_pri_map[traffic_class];
373
374 } else {
375 /*
376 * We may have to search for the next active stream
377 * that is the highest priority.
378 */
379 if (ar->hiac_stream_active_pri ==
380 ar->ac_stream_pri_map[traffic_class]) {
381 /*
382 * The highest priority stream just went inactive
383 * reset and search for the "next" highest "active"
384 * priority stream.
385 */
386 ar->hiac_stream_active_pri = 0;
387
388 for (i = 0; i < WMM_NUM_AC; i++) {
389 if (ar->ac_stream_active[i] &&
390 (ar->ac_stream_pri_map[i] >
391 ar->hiac_stream_active_pri))
392 /*
393 * Set the new highest active
394 * priority.
395 */
396 ar->hiac_stream_active_pri =
397 ar->ac_stream_pri_map[i];
398 }
399 }
400 }
401
402 spin_unlock_bh(&ar->lock);
403
404notify_htc:
405 /* notify HTC, this may cause credit distribution changes */
406 htc_indicate_activity_change(ar->htc_target, eid, active);
407}
408
409enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
410 struct htc_packet *packet)
411{
412 struct ath6kl *ar = target->dev->ar;
413 enum htc_endpoint_id endpoint = packet->endpoint;
414
415 if (endpoint == ar->ctrl_ep) {
416 /*
417 * Under normal WMI if this is getting full, then something
418 * is running rampant the host should not be exhausting the
419 * WMI queue with too many commands the only exception to
420 * this is during testing using endpointping.
421 */
422 spin_lock_bh(&ar->lock);
423 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
424 spin_unlock_bh(&ar->lock);
425 ath6kl_err("wmi ctrl ep is full\n");
426 return HTC_SEND_FULL_KEEP;
427 }
428
429 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
430 return HTC_SEND_FULL_KEEP;
431
432 if (ar->nw_type == ADHOC_NETWORK)
433 /*
434 * In adhoc mode, we cannot differentiate traffic
435 * priorities so there is no need to continue, however we
436 * should stop the network.
437 */
438 goto stop_net_queues;
439
440 /*
441 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
442 * the highest active stream.
443 */
444 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
445 ar->hiac_stream_active_pri &&
446 ar->cookie_count <= MAX_HI_COOKIE_NUM)
447 /*
448 * Give preference to the highest priority stream by
449 * dropping the packets which overflowed.
450 */
451 return HTC_SEND_FULL_DROP;
452
453stop_net_queues:
454 spin_lock_bh(&ar->lock);
455 set_bit(NETQ_STOPPED, &ar->flag);
456 spin_unlock_bh(&ar->lock);
457 netif_stop_queue(ar->net_dev);
458
459 return HTC_SEND_FULL_KEEP;
460}
461
462/* TODO this needs to be looked at */
463static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
464 enum htc_endpoint_id eid, u32 map_no)
465{
466 u32 i;
467
468 if (ar->nw_type != ADHOC_NETWORK)
469 return;
470
471 if (!ar->ibss_ps_enable)
472 return;
473
474 if (eid == ar->ctrl_ep)
475 return;
476
477 if (map_no == 0)
478 return;
479
480 map_no--;
481 ar->node_map[map_no].tx_pend--;
482
483 if (ar->node_map[map_no].tx_pend)
484 return;
485
486 if (map_no != (ar->node_num - 1))
487 return;
488
489 for (i = ar->node_num; i > 0; i--) {
490 if (ar->node_map[i - 1].tx_pend)
491 break;
492
493 memset(&ar->node_map[i - 1], 0,
494 sizeof(struct ath6kl_node_mapping));
495 ar->node_num--;
496 }
497}
498
499void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
500{
501 struct ath6kl *ar = context;
502 struct sk_buff_head skb_queue;
503 struct htc_packet *packet;
504 struct sk_buff *skb;
505 struct ath6kl_cookie *ath6kl_cookie;
506 u32 map_no = 0;
507 int status;
508 enum htc_endpoint_id eid;
509 bool wake_event = false;
510 bool flushing = false;
511
512 skb_queue_head_init(&skb_queue);
513
514 /* lock the driver as we update internal state */
515 spin_lock_bh(&ar->lock);
516
517 /* reap completed packets */
518 while (!list_empty(packet_queue)) {
519
520 packet = list_first_entry(packet_queue, struct htc_packet,
521 list);
522 list_del(&packet->list);
523
524 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
525 if (!ath6kl_cookie)
526 goto fatal;
527
528 status = packet->status;
529 skb = ath6kl_cookie->skb;
530 eid = packet->endpoint;
531 map_no = ath6kl_cookie->map_no;
532
533 if (!skb || !skb->data)
534 goto fatal;
535
536 packet->buf = skb->data;
537
538 __skb_queue_tail(&skb_queue, skb);
539
540 if (!status && (packet->act_len != skb->len))
541 goto fatal;
542
543 ar->tx_pending[eid]--;
544
545 if (eid != ar->ctrl_ep)
546 ar->total_tx_data_pend--;
547
548 if (eid == ar->ctrl_ep) {
549 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
550 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
551
552 if (ar->tx_pending[eid] == 0)
553 wake_event = true;
554 }
555
556 if (status) {
557 if (status == -ECANCELED)
558 /* a packet was flushed */
559 flushing = true;
560
561 ar->net_stats.tx_errors++;
562
563 if (status != -ENOSPC)
564 ath6kl_err("tx error, status: 0x%x\n", status);
565 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
566 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
567 __func__, skb, packet->buf, packet->act_len,
568 eid, "error!");
569 } else {
570 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
571 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
572 __func__, skb, packet->buf, packet->act_len,
573 eid, "OK");
574
575 flushing = false;
576 ar->net_stats.tx_packets++;
577 ar->net_stats.tx_bytes += skb->len;
578 }
579
580 ath6kl_tx_clear_node_map(ar, eid, map_no);
581
582 ath6kl_free_cookie(ar, ath6kl_cookie);
583
584 if (test_bit(NETQ_STOPPED, &ar->flag))
585 clear_bit(NETQ_STOPPED, &ar->flag);
586 }
587
588 spin_unlock_bh(&ar->lock);
589
590 __skb_queue_purge(&skb_queue);
591
592 if (test_bit(CONNECTED, &ar->flag)) {
593 if (!flushing)
594 netif_wake_queue(ar->net_dev);
595 }
596
597 if (wake_event)
598 wake_up(&ar->event_wq);
599
600 return;
601
602fatal:
603 WARN_ON(1);
604 spin_unlock_bh(&ar->lock);
605 return;
606}
607
608void ath6kl_tx_data_cleanup(struct ath6kl *ar)
609{
610 int i;
611
612 /* flush all the data (non-control) streams */
613 for (i = 0; i < WMM_NUM_AC; i++)
614 htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
615 ATH6KL_DATA_PKT_TAG);
616}
617
618/* Rx functions */
619
620static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
621 struct sk_buff *skb)
622{
623 if (!skb)
624 return;
625
626 skb->dev = dev;
627
628 if (!(skb->dev->flags & IFF_UP)) {
629 dev_kfree_skb(skb);
630 return;
631 }
632
633 skb->protocol = eth_type_trans(skb, skb->dev);
634
635 netif_rx_ni(skb);
636}
637
638static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
639{
640 struct sk_buff *skb;
641
642 while (num) {
643 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
644 if (!skb) {
645 ath6kl_err("netbuf allocation failed\n");
646 return;
647 }
648 skb_queue_tail(q, skb);
649 num--;
650 }
651}
652
653static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
654{
655 struct sk_buff *skb = NULL;
656
657 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
658 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
659
660 skb = skb_dequeue(&p_aggr->free_q);
661
662 return skb;
663}
664
665void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
666{
667 struct ath6kl *ar = target->dev->ar;
668 struct sk_buff *skb;
669 int rx_buf;
670 int n_buf_refill;
671 struct htc_packet *packet;
672 struct list_head queue;
673
674 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
675 htc_get_rxbuf_num(ar->htc_target, endpoint);
676
677 if (n_buf_refill <= 0)
678 return;
679
680 INIT_LIST_HEAD(&queue);
681
682 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
683 "%s: providing htc with %d buffers at eid=%d\n",
684 __func__, n_buf_refill, endpoint);
685
686 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
687 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
688 if (!skb)
689 break;
690
691 packet = (struct htc_packet *) skb->head;
692 set_htc_rxpkt_info(packet, skb, skb->data,
693 ATH6KL_BUFFER_SIZE, endpoint);
694 list_add_tail(&packet->list, &queue);
695 }
696
697 if (!list_empty(&queue))
698 htc_add_rxbuf_multiple(ar->htc_target, &queue);
699}
700
701void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
702{
703 struct htc_packet *packet;
704 struct sk_buff *skb;
705
706 while (count) {
707 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
708 if (!skb)
709 return;
710
711 packet = (struct htc_packet *) skb->head;
712 set_htc_rxpkt_info(packet, skb, skb->data,
713 ATH6KL_AMSDU_BUFFER_SIZE, 0);
714 spin_lock_bh(&ar->lock);
715 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
716 spin_unlock_bh(&ar->lock);
717 count--;
718 }
719}
720
721/*
722 * Callback to allocate a receive buffer for a pending packet. We use a
723 * pre-allocated list of buffers of maximum AMSDU size (4K).
724 */
725struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
726 enum htc_endpoint_id endpoint,
727 int len)
728{
729 struct ath6kl *ar = target->dev->ar;
730 struct htc_packet *packet = NULL;
731 struct list_head *pkt_pos;
732 int refill_cnt = 0, depth = 0;
733
734 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
735 __func__, endpoint, len);
736
737 if ((len <= ATH6KL_BUFFER_SIZE) ||
738 (len > ATH6KL_AMSDU_BUFFER_SIZE))
739 return NULL;
740
741 spin_lock_bh(&ar->lock);
742
743 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
744 spin_unlock_bh(&ar->lock);
745 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
746 goto refill_buf;
747 }
748
749 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
750 struct htc_packet, list);
751 list_del(&packet->list);
752 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
753 depth++;
754
755 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
756 spin_unlock_bh(&ar->lock);
757
758 /* set actual endpoint ID */
759 packet->endpoint = endpoint;
760
761refill_buf:
762 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
763 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
764
765 return packet;
766}
767
768static void aggr_slice_amsdu(struct aggr_info *p_aggr,
769 struct rxtid *rxtid, struct sk_buff *skb)
770{
771 struct sk_buff *new_skb;
772 struct ethhdr *hdr;
773 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
774 u8 *framep;
775
776 mac_hdr_len = sizeof(struct ethhdr);
777 framep = skb->data + mac_hdr_len;
778 amsdu_len = skb->len - mac_hdr_len;
779
780 while (amsdu_len > mac_hdr_len) {
781 hdr = (struct ethhdr *) framep;
782 payload_8023_len = ntohs(hdr->h_proto);
783
784 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
785 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
786 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
787 payload_8023_len);
788 break;
789 }
790
791 frame_8023_len = payload_8023_len + mac_hdr_len;
792 new_skb = aggr_get_free_skb(p_aggr);
793 if (!new_skb) {
794 ath6kl_err("no buffer available\n");
795 break;
796 }
797
798 memcpy(new_skb->data, framep, frame_8023_len);
799 skb_put(new_skb, frame_8023_len);
800 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
801 ath6kl_err("dot3_2_dix error\n");
802 dev_kfree_skb(new_skb);
803 break;
804 }
805
806 skb_queue_tail(&rxtid->q, new_skb);
807
808 /* Is this the last subframe within this aggregate ? */
809 if ((amsdu_len - frame_8023_len) == 0)
810 break;
811
812 /* Add the length of A-MSDU subframe padding bytes -
813 * Round to nearest word.
814 */
815 frame_8023_len = ALIGN(frame_8023_len + 3, 3);
816
817 framep += frame_8023_len;
818 amsdu_len -= frame_8023_len;
819 }
820
821 dev_kfree_skb(skb);
822}
823
824static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
825 u16 seq_no, u8 order)
826{
827 struct sk_buff *skb;
828 struct rxtid *rxtid;
829 struct skb_hold_q *node;
830 u16 idx, idx_end, seq_end;
831 struct rxtid_stats *stats;
832
833 if (!p_aggr)
834 return;
835
836 rxtid = &p_aggr->rx_tid[tid];
837 stats = &p_aggr->stat[tid];
838
839 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
840
841 /*
842 * idx_end is typically the last possible frame in the window,
843 * but changes to 'the' seq_no, when BAR comes. If seq_no
844 * is non-zero, we will go up to that and stop.
845 * Note: last seq no in current window will occupy the same
846 * index position as index that is just previous to start.
847 * An imp point : if win_sz is 7, for seq_no space of 4095,
848 * then, there would be holes when sequence wrap around occurs.
849 * Target should judiciously choose the win_sz, based on
850 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
851 * 2, 4, 8, 16 win_sz works fine).
852 * We must deque from "idx" to "idx_end", including both.
853 */
854 seq_end = seq_no ? seq_no : rxtid->seq_next;
855 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
856
857 spin_lock_bh(&rxtid->lock);
858
859 do {
860 node = &rxtid->hold_q[idx];
861 if ((order == 1) && (!node->skb))
862 break;
863
864 if (node->skb) {
865 if (node->is_amsdu)
866 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
867 else
868 skb_queue_tail(&rxtid->q, node->skb);
869 node->skb = NULL;
870 } else
871 stats->num_hole++;
872
873 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
874 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
875 } while (idx != idx_end);
876
877 spin_unlock_bh(&rxtid->lock);
878
879 stats->num_delivered += skb_queue_len(&rxtid->q);
880
881 while ((skb = skb_dequeue(&rxtid->q)))
882 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
883}
884
885static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
886 u16 seq_no,
887 bool is_amsdu, struct sk_buff *frame)
888{
889 struct rxtid *rxtid;
890 struct rxtid_stats *stats;
891 struct sk_buff *skb;
892 struct skb_hold_q *node;
893 u16 idx, st, cur, end;
894 bool is_queued = false;
895 u16 extended_end;
896
897 rxtid = &agg_info->rx_tid[tid];
898 stats = &agg_info->stat[tid];
899
900 stats->num_into_aggr++;
901
902 if (!rxtid->aggr) {
903 if (is_amsdu) {
904 aggr_slice_amsdu(agg_info, rxtid, frame);
905 is_queued = true;
906 stats->num_amsdu++;
907 while ((skb = skb_dequeue(&rxtid->q)))
908 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
909 skb);
910 }
911 return is_queued;
912 }
913
914 /* Check the incoming sequence no, if it's in the window */
915 st = rxtid->seq_next;
916 cur = seq_no;
917 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
918
919 if (((st < end) && (cur < st || cur > end)) ||
920 ((st > end) && (cur > end) && (cur < st))) {
921 extended_end = (end + rxtid->hold_q_sz - 1) &
922 ATH6KL_MAX_SEQ_NO;
923
924 if (((end < extended_end) &&
925 (cur < end || cur > extended_end)) ||
926 ((end > extended_end) && (cur > extended_end) &&
927 (cur < end))) {
928 aggr_deque_frms(agg_info, tid, 0, 0);
929 if (cur >= rxtid->hold_q_sz - 1)
930 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
931 else
932 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
933 (rxtid->hold_q_sz - 2 - cur);
934 } else {
935 /*
936 * Dequeue only those frames that are outside the
937 * new shifted window.
938 */
939 if (cur >= rxtid->hold_q_sz - 1)
940 st = cur - (rxtid->hold_q_sz - 1);
941 else
942 st = ATH6KL_MAX_SEQ_NO -
943 (rxtid->hold_q_sz - 2 - cur);
944
945 aggr_deque_frms(agg_info, tid, st, 0);
946 }
947
948 stats->num_oow++;
949 }
950
951 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
952
953 node = &rxtid->hold_q[idx];
954
955 spin_lock_bh(&rxtid->lock);
956
957 /*
958 * Is the cur frame duplicate or something beyond our window(hold_q
959 * -> which is 2x, already)?
960 *
961 * 1. Duplicate is easy - drop incoming frame.
962 * 2. Not falling in current sliding window.
963 * 2a. is the frame_seq_no preceding current tid_seq_no?
964 * -> drop the frame. perhaps sender did not get our ACK.
965 * this is taken care of above.
966 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
967 * -> Taken care of it above, by moving window forward.
968 */
969 dev_kfree_skb(node->skb);
970 stats->num_dups++;
971
972 node->skb = frame;
973 is_queued = true;
974 node->is_amsdu = is_amsdu;
975 node->seq_no = seq_no;
976
977 if (node->is_amsdu)
978 stats->num_amsdu++;
979 else
980 stats->num_mpdu++;
981
982 spin_unlock_bh(&rxtid->lock);
983
984 aggr_deque_frms(agg_info, tid, 0, 1);
985
986 if (agg_info->timer_scheduled)
987 rxtid->progress = true;
988 else
989 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
990 if (rxtid->hold_q[idx].skb) {
991 /*
992 * There is a frame in the queue and no
993 * timer so start a timer to ensure that
994 * the frame doesn't remain stuck
995 * forever.
996 */
997 agg_info->timer_scheduled = true;
998 mod_timer(&agg_info->timer,
999 (jiffies +
1000 HZ * (AGGR_RX_TIMEOUT) / 1000));
1001 rxtid->progress = false;
1002 rxtid->timer_mon = true;
1003 break;
1004 }
1005 }
1006
1007 return is_queued;
1008}
1009
1010void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1011{
1012 struct ath6kl *ar = target->dev->ar;
1013 struct sk_buff *skb = packet->pkt_cntxt;
1014 struct wmi_rx_meta_v2 *meta;
1015 struct wmi_data_hdr *dhdr;
1016 int min_hdr_len;
1017 u8 meta_type, dot11_hdr = 0;
1018 int status = packet->status;
1019 enum htc_endpoint_id ept = packet->endpoint;
1020 bool is_amsdu, prev_ps, ps_state = false;
1021 struct ath6kl_sta *conn = NULL;
1022 struct sk_buff *skb1 = NULL;
1023 struct ethhdr *datap = NULL;
1024 u16 seq_no, offset;
1025 u8 tid;
1026
1027 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1028 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1029 __func__, ar, ept, skb, packet->buf,
1030 packet->act_len, status);
1031
1032 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1033 ar->net_stats.rx_errors++;
1034 dev_kfree_skb(skb);
1035 return;
1036 }
1037
1038 /*
1039 * Take lock to protect buffer counts and adaptive power throughput
1040 * state.
1041 */
1042 spin_lock_bh(&ar->lock);
1043
1044 ar->net_stats.rx_packets++;
1045 ar->net_stats.rx_bytes += packet->act_len;
1046
1047 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1048 skb_pull(skb, HTC_HDR_LENGTH);
1049
1050 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len);
1051
1052 spin_unlock_bh(&ar->lock);
1053
1054 skb->dev = ar->net_dev;
1055
1056 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1057 if (EPPING_ALIGNMENT_PAD > 0)
1058 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1059 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1060 return;
1061 }
1062
1063 if (ept == ar->ctrl_ep) {
1064 ath6kl_wmi_control_rx(ar->wmi, skb);
1065 return;
1066 }
1067
1068 min_hdr_len = sizeof(struct ethhdr);
1069 min_hdr_len += sizeof(struct wmi_data_hdr) +
1070 sizeof(struct ath6kl_llc_snap_hdr);
1071
1072 dhdr = (struct wmi_data_hdr *) skb->data;
1073
1074 /*
1075 * In the case of AP mode we may receive NULL data frames
1076 * that do not have LLC hdr. They are 16 bytes in size.
1077 * Allow these frames in the AP mode.
1078 */
1079 if (ar->nw_type != AP_NETWORK &&
1080 ((packet->act_len < min_hdr_len) ||
1081 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1082 ath6kl_info("frame len is too short or too long\n");
1083 ar->net_stats.rx_errors++;
1084 ar->net_stats.rx_length_errors++;
1085 dev_kfree_skb(skb);
1086 return;
1087 }
1088
1089 /* Get the Power save state of the STA */
1090 if (ar->nw_type == AP_NETWORK) {
1091 meta_type = wmi_data_hdr_get_meta(dhdr);
1092
1093 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1094 WMI_DATA_HDR_PS_MASK);
1095
1096 offset = sizeof(struct wmi_data_hdr);
1097
1098 switch (meta_type) {
1099 case 0:
1100 break;
1101 case WMI_META_VERSION_1:
1102 offset += sizeof(struct wmi_rx_meta_v1);
1103 break;
1104 case WMI_META_VERSION_2:
1105 offset += sizeof(struct wmi_rx_meta_v2);
1106 break;
1107 default:
1108 break;
1109 }
1110
1111 datap = (struct ethhdr *) (skb->data + offset);
1112 conn = ath6kl_find_sta(ar, datap->h_source);
1113
1114 if (!conn) {
1115 dev_kfree_skb(skb);
1116 return;
1117 }
1118
1119 /*
1120 * If there is a change in PS state of the STA,
1121 * take appropriate steps:
1122 *
1123 * 1. If Sleep-->Awake, flush the psq for the STA
1124 * Clear the PVB for the STA.
1125 * 2. If Awake-->Sleep, Starting queueing frames
1126 * the STA.
1127 */
1128 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1129
1130 if (ps_state)
1131 conn->sta_flags |= STA_PS_SLEEP;
1132 else
1133 conn->sta_flags &= ~STA_PS_SLEEP;
1134
1135 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1136 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1137 struct sk_buff *skbuff = NULL;
1138
1139 spin_lock_bh(&conn->psq_lock);
1140 while ((skbuff = skb_dequeue(&conn->psq))
1141 != NULL) {
1142 spin_unlock_bh(&conn->psq_lock);
1143 ath6kl_data_tx(skbuff, ar->net_dev);
1144 spin_lock_bh(&conn->psq_lock);
1145 }
1146 spin_unlock_bh(&conn->psq_lock);
1147 /* Clear the PVB for this STA */
1148 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1149 }
1150 }
1151
1152 /* drop NULL data frames here */
1153 if ((packet->act_len < min_hdr_len) ||
1154 (packet->act_len >
1155 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1156 dev_kfree_skb(skb);
1157 return;
1158 }
1159 }
1160
1161 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1162 tid = wmi_data_hdr_get_up(dhdr);
1163 seq_no = wmi_data_hdr_get_seqno(dhdr);
1164 meta_type = wmi_data_hdr_get_meta(dhdr);
1165 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1166
1167 ath6kl_wmi_data_hdr_remove(ar->wmi, skb);
1168
1169 switch (meta_type) {
1170 case WMI_META_VERSION_1:
1171 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1172 break;
1173 case WMI_META_VERSION_2:
1174 meta = (struct wmi_rx_meta_v2 *) skb->data;
1175 if (meta->csum_flags & 0x1) {
1176 skb->ip_summed = CHECKSUM_COMPLETE;
1177 skb->csum = (__force __wsum) meta->csum;
1178 }
1179 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1180 break;
1181 default:
1182 break;
1183 }
1184
1185 if (dot11_hdr)
1186 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1187 else if (!is_amsdu)
1188 status = ath6kl_wmi_dot3_2_dix(skb);
1189
1190 if (status) {
1191 /*
1192 * Drop frames that could not be processed (lack of
1193 * memory, etc.)
1194 */
1195 dev_kfree_skb(skb);
1196 return;
1197 }
1198
1199 if (!(ar->net_dev->flags & IFF_UP)) {
1200 dev_kfree_skb(skb);
1201 return;
1202 }
1203
1204 if (ar->nw_type == AP_NETWORK) {
1205 datap = (struct ethhdr *) skb->data;
1206 if (is_multicast_ether_addr(datap->h_dest))
1207 /*
1208 * Bcast/Mcast frames should be sent to the
1209 * OS stack as well as on the air.
1210 */
1211 skb1 = skb_copy(skb, GFP_ATOMIC);
1212 else {
1213 /*
1214 * Search for a connected STA with dstMac
1215 * as the Mac address. If found send the
1216 * frame to it on the air else send the
1217 * frame up the stack.
1218 */
1219 struct ath6kl_sta *conn = NULL;
1220 conn = ath6kl_find_sta(ar, datap->h_dest);
1221
1222 if (conn && ar->intra_bss) {
1223 skb1 = skb;
1224 skb = NULL;
1225 } else if (conn && !ar->intra_bss) {
1226 dev_kfree_skb(skb);
1227 skb = NULL;
1228 }
1229 }
1230 if (skb1)
1231 ath6kl_data_tx(skb1, ar->net_dev);
1232 }
1233
1234 if (!aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
1235 is_amsdu, skb))
1236 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1237}
1238
1239static void aggr_timeout(unsigned long arg)
1240{
1241 u8 i, j;
1242 struct aggr_info *p_aggr = (struct aggr_info *) arg;
1243 struct rxtid *rxtid;
1244 struct rxtid_stats *stats;
1245
1246 for (i = 0; i < NUM_OF_TIDS; i++) {
1247 rxtid = &p_aggr->rx_tid[i];
1248 stats = &p_aggr->stat[i];
1249
1250 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1251 continue;
1252
1253 /*
1254 * FIXME: these timeouts happen quite fruently, something
1255 * line once within 60 seconds. Investigate why.
1256 */
1257 stats->num_timeouts++;
1258 ath6kl_dbg(ATH6KL_DBG_AGGR,
1259 "aggr timeout (st %d end %d)\n",
1260 rxtid->seq_next,
1261 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1262 ATH6KL_MAX_SEQ_NO));
1263 aggr_deque_frms(p_aggr, i, 0, 0);
1264 }
1265
1266 p_aggr->timer_scheduled = false;
1267
1268 for (i = 0; i < NUM_OF_TIDS; i++) {
1269 rxtid = &p_aggr->rx_tid[i];
1270
1271 if (rxtid->aggr && rxtid->hold_q) {
1272 for (j = 0; j < rxtid->hold_q_sz; j++) {
1273 if (rxtid->hold_q[j].skb) {
1274 p_aggr->timer_scheduled = true;
1275 rxtid->timer_mon = true;
1276 rxtid->progress = false;
1277 break;
1278 }
1279 }
1280
1281 if (j >= rxtid->hold_q_sz)
1282 rxtid->timer_mon = false;
1283 }
1284 }
1285
1286 if (p_aggr->timer_scheduled)
1287 mod_timer(&p_aggr->timer,
1288 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1289}
1290
1291static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1292{
1293 struct rxtid *rxtid;
1294 struct rxtid_stats *stats;
1295
1296 if (!p_aggr || tid >= NUM_OF_TIDS)
1297 return;
1298
1299 rxtid = &p_aggr->rx_tid[tid];
1300 stats = &p_aggr->stat[tid];
1301
1302 if (rxtid->aggr)
1303 aggr_deque_frms(p_aggr, tid, 0, 0);
1304
1305 rxtid->aggr = false;
1306 rxtid->progress = false;
1307 rxtid->timer_mon = false;
1308 rxtid->win_sz = 0;
1309 rxtid->seq_next = 0;
1310 rxtid->hold_q_sz = 0;
1311
1312 kfree(rxtid->hold_q);
1313 rxtid->hold_q = NULL;
1314
1315 memset(stats, 0, sizeof(struct rxtid_stats));
1316}
1317
1318void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
1319{
1320 struct aggr_info *p_aggr = ar->aggr_cntxt;
1321 struct rxtid *rxtid;
1322 struct rxtid_stats *stats;
1323 u16 hold_q_size;
1324
1325 if (!p_aggr)
1326 return;
1327
1328 rxtid = &p_aggr->rx_tid[tid];
1329 stats = &p_aggr->stat[tid];
1330
1331 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1332 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1333 __func__, win_sz, tid);
1334
1335 if (rxtid->aggr)
1336 aggr_delete_tid_state(p_aggr, tid);
1337
1338 rxtid->seq_next = seq_no;
1339 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1340 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1341 if (!rxtid->hold_q)
1342 return;
1343
1344 rxtid->win_sz = win_sz;
1345 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1346 if (!skb_queue_empty(&rxtid->q))
1347 return;
1348
1349 rxtid->aggr = true;
1350}
1351
1352struct aggr_info *aggr_init(struct net_device *dev)
1353{
1354 struct aggr_info *p_aggr = NULL;
1355 struct rxtid *rxtid;
1356 u8 i;
1357
1358 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1359 if (!p_aggr) {
1360 ath6kl_err("failed to alloc memory for aggr_node\n");
1361 return NULL;
1362 }
1363
1364 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1365 p_aggr->dev = dev;
1366 init_timer(&p_aggr->timer);
1367 p_aggr->timer.function = aggr_timeout;
1368 p_aggr->timer.data = (unsigned long) p_aggr;
1369
1370 p_aggr->timer_scheduled = false;
1371 skb_queue_head_init(&p_aggr->free_q);
1372
1373 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1374
1375 for (i = 0; i < NUM_OF_TIDS; i++) {
1376 rxtid = &p_aggr->rx_tid[i];
1377 rxtid->aggr = false;
1378 rxtid->progress = false;
1379 rxtid->timer_mon = false;
1380 skb_queue_head_init(&rxtid->q);
1381 spin_lock_init(&rxtid->lock);
1382 }
1383
1384 return p_aggr;
1385}
1386
1387void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
1388{
1389 struct aggr_info *p_aggr = ar->aggr_cntxt;
1390 struct rxtid *rxtid;
1391
1392 if (!p_aggr)
1393 return;
1394
1395 rxtid = &p_aggr->rx_tid[tid];
1396
1397 if (rxtid->aggr)
1398 aggr_delete_tid_state(p_aggr, tid);
1399}
1400
1401void aggr_reset_state(struct aggr_info *aggr_info)
1402{
1403 u8 tid;
1404
1405 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1406 aggr_delete_tid_state(aggr_info, tid);
1407}
1408
1409/* clean up our amsdu buffer list */
1410void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1411{
1412 struct htc_packet *packet, *tmp_pkt;
1413
1414 spin_lock_bh(&ar->lock);
1415 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1416 spin_unlock_bh(&ar->lock);
1417 return;
1418 }
1419
1420 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1421 list) {
1422 list_del(&packet->list);
1423 spin_unlock_bh(&ar->lock);
1424 dev_kfree_skb(packet->pkt_cntxt);
1425 spin_lock_bh(&ar->lock);
1426 }
1427
1428 spin_unlock_bh(&ar->lock);
1429}
1430
1431void aggr_module_destroy(struct aggr_info *aggr_info)
1432{
1433 struct rxtid *rxtid;
1434 u8 i, k;
1435
1436 if (!aggr_info)
1437 return;
1438
1439 if (aggr_info->timer_scheduled) {
1440 del_timer(&aggr_info->timer);
1441 aggr_info->timer_scheduled = false;
1442 }
1443
1444 for (i = 0; i < NUM_OF_TIDS; i++) {
1445 rxtid = &aggr_info->rx_tid[i];
1446 if (rxtid->hold_q) {
1447 for (k = 0; k < rxtid->hold_q_sz; k++)
1448 dev_kfree_skb(rxtid->hold_q[k].skb);
1449 kfree(rxtid->hold_q);
1450 }
1451
1452 skb_queue_purge(&rxtid->q);
1453 }
1454
1455 skb_queue_purge(&aggr_info->free_q);
1456 kfree(aggr_info);
1457}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
new file mode 100644
index 000000000000..f5aa33dd4c42
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -0,0 +1,2743 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/ip.h>
18#include "core.h"
19#include "debug.h"
20
21static int ath6kl_wmi_sync_point(struct wmi *wmi);
22
23static const s32 wmi_rate_tbl[][2] = {
24 /* {W/O SGI, with SGI} */
25 {1000, 1000},
26 {2000, 2000},
27 {5500, 5500},
28 {11000, 11000},
29 {6000, 6000},
30 {9000, 9000},
31 {12000, 12000},
32 {18000, 18000},
33 {24000, 24000},
34 {36000, 36000},
35 {48000, 48000},
36 {54000, 54000},
37 {6500, 7200},
38 {13000, 14400},
39 {19500, 21700},
40 {26000, 28900},
41 {39000, 43300},
42 {52000, 57800},
43 {58500, 65000},
44 {65000, 72200},
45 {13500, 15000},
46 {27000, 30000},
47 {40500, 45000},
48 {54000, 60000},
49 {81000, 90000},
50 {108000, 120000},
51 {121500, 135000},
52 {135000, 150000},
53 {0, 0}
54};
55
56/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
57static const u8 up_to_ac[] = {
58 WMM_AC_BE,
59 WMM_AC_BK,
60 WMM_AC_BK,
61 WMM_AC_BE,
62 WMM_AC_VI,
63 WMM_AC_VI,
64 WMM_AC_VO,
65 WMM_AC_VO,
66};
67
68void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
69{
70 if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
71 return;
72
73 wmi->ep_id = ep_id;
74}
75
76enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
77{
78 return wmi->ep_id;
79}
80
81/* Performs DIX to 802.3 encapsulation for transmit packets.
82 * Assumes the entire DIX header is contigous and that there is
83 * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
84 */
85int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
86{
87 struct ath6kl_llc_snap_hdr *llc_hdr;
88 struct ethhdr *eth_hdr;
89 size_t new_len;
90 __be16 type;
91 u8 *datap;
92 u16 size;
93
94 if (WARN_ON(skb == NULL))
95 return -EINVAL;
96
97 size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr);
98 if (skb_headroom(skb) < size)
99 return -ENOMEM;
100
101 eth_hdr = (struct ethhdr *) skb->data;
102 type = eth_hdr->h_proto;
103
104 if (!is_ethertype(be16_to_cpu(type))) {
105 ath6kl_dbg(ATH6KL_DBG_WMI,
106 "%s: pkt is already in 802.3 format\n", __func__);
107 return 0;
108 }
109
110 new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
111
112 skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
113 datap = skb->data;
114
115 eth_hdr->h_proto = cpu_to_be16(new_len);
116
117 memcpy(datap, eth_hdr, sizeof(*eth_hdr));
118
119 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr));
120 llc_hdr->dsap = 0xAA;
121 llc_hdr->ssap = 0xAA;
122 llc_hdr->cntl = 0x03;
123 llc_hdr->org_code[0] = 0x0;
124 llc_hdr->org_code[1] = 0x0;
125 llc_hdr->org_code[2] = 0x0;
126 llc_hdr->eth_type = type;
127
128 return 0;
129}
130
131static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
132 u8 *version, void *tx_meta_info)
133{
134 struct wmi_tx_meta_v1 *v1;
135 struct wmi_tx_meta_v2 *v2;
136
137 if (WARN_ON(skb == NULL || version == NULL))
138 return -EINVAL;
139
140 switch (*version) {
141 case WMI_META_VERSION_1:
142 skb_push(skb, WMI_MAX_TX_META_SZ);
143 v1 = (struct wmi_tx_meta_v1 *) skb->data;
144 v1->pkt_id = 0;
145 v1->rate_plcy_id = 0;
146 *version = WMI_META_VERSION_1;
147 break;
148 case WMI_META_VERSION_2:
149 skb_push(skb, WMI_MAX_TX_META_SZ);
150 v2 = (struct wmi_tx_meta_v2 *) skb->data;
151 memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info,
152 sizeof(struct wmi_tx_meta_v2));
153 break;
154 }
155
156 return 0;
157}
158
159int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
160 u8 msg_type, bool more_data,
161 enum wmi_data_hdr_data_type data_type,
162 u8 meta_ver, void *tx_meta_info)
163{
164 struct wmi_data_hdr *data_hdr;
165 int ret;
166
167 if (WARN_ON(skb == NULL))
168 return -EINVAL;
169
170 ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
171 if (ret)
172 return ret;
173
174 skb_push(skb, sizeof(struct wmi_data_hdr));
175
176 data_hdr = (struct wmi_data_hdr *)skb->data;
177 memset(data_hdr, 0, sizeof(struct wmi_data_hdr));
178
179 data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
180 data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
181
182 if (more_data)
183 data_hdr->info |=
184 WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
185
186 data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
187 data_hdr->info3 = 0;
188
189 return 0;
190}
191
192static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
193{
194 struct iphdr *ip_hdr = (struct iphdr *) pkt;
195 u8 ip_pri;
196
197 /*
198 * Determine IPTOS priority
199 *
200 * IP-TOS - 8bits
201 * : DSCP(6-bits) ECN(2-bits)
202 * : DSCP - P2 P1 P0 X X X
203 * where (P2 P1 P0) form 802.1D
204 */
205 ip_pri = ip_hdr->tos >> 5;
206 ip_pri &= 0x7;
207
208 if ((layer2_pri & 0x7) > ip_pri)
209 return (u8) layer2_pri & 0x7;
210 else
211 return ip_pri;
212}
213
214int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
215 u32 layer2_priority, bool wmm_enabled,
216 u8 *ac)
217{
218 struct wmi_data_hdr *data_hdr;
219 struct ath6kl_llc_snap_hdr *llc_hdr;
220 struct wmi_create_pstream_cmd cmd;
221 u32 meta_size, hdr_size;
222 u16 ip_type = IP_ETHERTYPE;
223 u8 stream_exist, usr_pri;
224 u8 traffic_class = WMM_AC_BE;
225 u8 *datap;
226
227 if (WARN_ON(skb == NULL))
228 return -EINVAL;
229
230 datap = skb->data;
231 data_hdr = (struct wmi_data_hdr *) datap;
232
233 meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
234 WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0;
235
236 if (!wmm_enabled) {
237 /* If WMM is disabled all traffic goes as BE traffic */
238 usr_pri = 0;
239 } else {
240 hdr_size = sizeof(struct ethhdr);
241
242 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap +
243 sizeof(struct
244 wmi_data_hdr) +
245 meta_size + hdr_size);
246
247 if (llc_hdr->eth_type == htons(ip_type)) {
248 /*
249 * Extract the endpoint info from the TOS field
250 * in the IP header.
251 */
252 usr_pri =
253 ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
254 sizeof(struct ath6kl_llc_snap_hdr),
255 layer2_priority);
256 } else
257 usr_pri = layer2_priority & 0x7;
258 }
259
260 /* workaround for WMM S5 */
261 if ((wmi->traffic_class == WMM_AC_VI) &&
262 ((usr_pri == 5) || (usr_pri == 4)))
263 usr_pri = 1;
264
265 /* Convert user priority to traffic class */
266 traffic_class = up_to_ac[usr_pri & 0x7];
267
268 wmi_data_hdr_set_up(data_hdr, usr_pri);
269
270 spin_lock_bh(&wmi->lock);
271 stream_exist = wmi->fat_pipe_exist;
272 spin_unlock_bh(&wmi->lock);
273
274 if (!(stream_exist & (1 << traffic_class))) {
275 memset(&cmd, 0, sizeof(cmd));
276 cmd.traffic_class = traffic_class;
277 cmd.user_pri = usr_pri;
278 cmd.inactivity_int =
279 cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
280 /* Implicit streams are created with TSID 0xFF */
281 cmd.tsid = WMI_IMPLICIT_PSTREAM;
282 ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
283 }
284
285 *ac = traffic_class;
286
287 return 0;
288}
289
290int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
291{
292 struct ieee80211_hdr_3addr *pwh, wh;
293 struct ath6kl_llc_snap_hdr *llc_hdr;
294 struct ethhdr eth_hdr;
295 u32 hdr_size;
296 u8 *datap;
297 __le16 sub_type;
298
299 if (WARN_ON(skb == NULL))
300 return -EINVAL;
301
302 datap = skb->data;
303 pwh = (struct ieee80211_hdr_3addr *) datap;
304
305 sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
306
307 memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr));
308
309 /* Strip off the 802.11 header */
310 if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
311 hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
312 sizeof(u32));
313 skb_pull(skb, hdr_size);
314 } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
315 skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
316
317 datap = skb->data;
318 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
319
320 memset(&eth_hdr, 0, sizeof(eth_hdr));
321 eth_hdr.h_proto = llc_hdr->eth_type;
322
323 switch ((le16_to_cpu(wh.frame_control)) &
324 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
325 case 0:
326 memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
327 memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
328 break;
329 case IEEE80211_FCTL_TODS:
330 memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
331 memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
332 break;
333 case IEEE80211_FCTL_FROMDS:
334 memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
335 memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN);
336 break;
337 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
338 break;
339 }
340
341 skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
342 skb_push(skb, sizeof(eth_hdr));
343
344 datap = skb->data;
345
346 memcpy(datap, &eth_hdr, sizeof(eth_hdr));
347
348 return 0;
349}
350
351/*
352 * Performs 802.3 to DIX encapsulation for received packets.
353 * Assumes the entire 802.3 header is contigous.
354 */
355int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
356{
357 struct ath6kl_llc_snap_hdr *llc_hdr;
358 struct ethhdr eth_hdr;
359 u8 *datap;
360
361 if (WARN_ON(skb == NULL))
362 return -EINVAL;
363
364 datap = skb->data;
365
366 memcpy(&eth_hdr, datap, sizeof(eth_hdr));
367
368 llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr));
369 eth_hdr.h_proto = llc_hdr->eth_type;
370
371 skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
372 datap = skb->data;
373
374 memcpy(datap, &eth_hdr, sizeof(eth_hdr));
375
376 return 0;
377}
378
379int ath6kl_wmi_data_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
380{
381 if (WARN_ON(skb == NULL))
382 return -EINVAL;
383
384 skb_pull(skb, sizeof(struct wmi_data_hdr));
385
386 return 0;
387}
388
389static void ath6kl_wmi_convert_bssinfo_hdr2_to_hdr(struct sk_buff *skb,
390 u8 *datap)
391{
392 struct wmi_bss_info_hdr2 bih2;
393 struct wmi_bss_info_hdr *bih;
394
395 memcpy(&bih2, datap, sizeof(struct wmi_bss_info_hdr2));
396
397 skb_push(skb, 4);
398 bih = (struct wmi_bss_info_hdr *) skb->data;
399
400 bih->ch = bih2.ch;
401 bih->frame_type = bih2.frame_type;
402 bih->snr = bih2.snr;
403 bih->rssi = a_cpu_to_sle16(bih2.snr - 95);
404 bih->ie_mask = cpu_to_le32(le16_to_cpu(bih2.ie_mask));
405 memcpy(bih->bssid, bih2.bssid, ETH_ALEN);
406}
407
408static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
409{
410 struct tx_complete_msg_v1 *msg_v1;
411 struct wmi_tx_complete_event *evt;
412 int index;
413 u16 size;
414
415 evt = (struct wmi_tx_complete_event *) datap;
416
417 ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
418 evt->num_msg, evt->msg_len, evt->msg_type);
419
420 if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_WMI))
421 return 0;
422
423 for (index = 0; index < evt->num_msg; index++) {
424 size = sizeof(struct wmi_tx_complete_event) +
425 (index * sizeof(struct tx_complete_msg_v1));
426 msg_v1 = (struct tx_complete_msg_v1 *)(datap + size);
427
428 ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n",
429 msg_v1->status, msg_v1->pkt_id,
430 msg_v1->rate_idx, msg_v1->ack_failures);
431 }
432
433 return 0;
434}
435
436static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
437{
438 struct sk_buff *skb;
439
440 skb = ath6kl_buf_alloc(size);
441 if (!skb)
442 return NULL;
443
444 skb_put(skb, size);
445 if (size)
446 memset(skb->data, 0, size);
447
448 return skb;
449}
450
451/* Send a "simple" wmi command -- one with no arguments */
452static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
453{
454 struct sk_buff *skb;
455 int ret;
456
457 skb = ath6kl_wmi_get_new_buf(0);
458 if (!skb)
459 return -ENOMEM;
460
461 ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
462
463 return ret;
464}
465
466static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
467{
468 struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
469
470 if (len < sizeof(struct wmi_ready_event_2))
471 return -EINVAL;
472
473 wmi->ready = true;
474 ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
475 le32_to_cpu(ev->sw_version),
476 le32_to_cpu(ev->abi_version));
477
478 return 0;
479}
480
481static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
482{
483 struct wmi_connect_event *ev;
484 u8 *pie, *peie;
485
486 if (len < sizeof(struct wmi_connect_event))
487 return -EINVAL;
488
489 ev = (struct wmi_connect_event *) datap;
490
491 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM\n",
492 __func__, ev->ch, ev->bssid);
493
494 /* Start of assoc rsp IEs */
495 pie = ev->assoc_info + ev->beacon_ie_len +
496 ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */
497
498 /* End of assoc rsp IEs */
499 peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len +
500 ev->assoc_resp_len;
501
502 while (pie < peie) {
503 switch (*pie) {
504 case WLAN_EID_VENDOR_SPECIFIC:
505 if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
506 pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
507 /* WMM OUT (00:50:F2) */
508 if (pie[1] > 5
509 && pie[6] == WMM_PARAM_OUI_SUBTYPE)
510 wmi->is_wmm_enabled = true;
511 }
512 break;
513 }
514
515 if (wmi->is_wmm_enabled)
516 break;
517
518 pie += pie[1] + 2;
519 }
520
521 ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->ch), ev->bssid,
522 le16_to_cpu(ev->listen_intvl),
523 le16_to_cpu(ev->beacon_intvl),
524 le32_to_cpu(ev->nw_type),
525 ev->beacon_ie_len, ev->assoc_req_len,
526 ev->assoc_resp_len, ev->assoc_info);
527
528 return 0;
529}
530
531static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
532{
533 struct wmi_disconnect_event *ev;
534 wmi->traffic_class = 100;
535
536 if (len < sizeof(struct wmi_disconnect_event))
537 return -EINVAL;
538
539 ev = (struct wmi_disconnect_event *) datap;
540
541 wmi->is_wmm_enabled = false;
542 wmi->pair_crypto_type = NONE_CRYPT;
543 wmi->grp_crypto_type = NONE_CRYPT;
544
545 ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
546 ev->bssid, ev->assoc_resp_len, ev->assoc_info,
547 le16_to_cpu(ev->proto_reason_status));
548
549 return 0;
550}
551
552static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
553{
554 struct wmi_peer_node_event *ev;
555
556 if (len < sizeof(struct wmi_peer_node_event))
557 return -EINVAL;
558
559 ev = (struct wmi_peer_node_event *) datap;
560
561 if (ev->event_code == PEER_NODE_JOIN_EVENT)
562 ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n",
563 ev->peer_mac_addr);
564 else if (ev->event_code == PEER_NODE_LEAVE_EVENT)
565 ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n",
566 ev->peer_mac_addr);
567
568 return 0;
569}
570
571static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
572{
573 struct wmi_tkip_micerr_event *ev;
574
575 if (len < sizeof(struct wmi_tkip_micerr_event))
576 return -EINVAL;
577
578 ev = (struct wmi_tkip_micerr_event *) datap;
579
580 ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
581
582 return 0;
583}
584
585static int ath6kl_wlan_parse_beacon(u8 *buf, int frame_len,
586 struct ath6kl_common_ie *cie)
587{
588 u8 *frm, *efrm;
589 u8 elemid_ssid = false;
590
591 frm = buf;
592 efrm = (u8 *) (frm + frame_len);
593
594 /*
595 * beacon/probe response frame format
596 * [8] time stamp
597 * [2] beacon interval
598 * [2] capability information
599 * [tlv] ssid
600 * [tlv] supported rates
601 * [tlv] country information
602 * [tlv] parameter set (FH/DS)
603 * [tlv] erp information
604 * [tlv] extended supported rates
605 * [tlv] WMM
606 * [tlv] WPA or RSN
607 * [tlv] Atheros Advanced Capabilities
608 */
609 if ((efrm - frm) < 12)
610 return -EINVAL;
611
612 memset(cie, 0, sizeof(*cie));
613
614 cie->ie_tstamp = frm;
615 frm += 8;
616 cie->ie_beaconInt = *(u16 *) frm;
617 frm += 2;
618 cie->ie_capInfo = *(u16 *) frm;
619 frm += 2;
620 cie->ie_chan = 0;
621
622 while (frm < efrm) {
623 switch (*frm) {
624 case WLAN_EID_SSID:
625 if (!elemid_ssid) {
626 cie->ie_ssid = frm;
627 elemid_ssid = true;
628 }
629 break;
630 case WLAN_EID_SUPP_RATES:
631 cie->ie_rates = frm;
632 break;
633 case WLAN_EID_COUNTRY:
634 cie->ie_country = frm;
635 break;
636 case WLAN_EID_FH_PARAMS:
637 break;
638 case WLAN_EID_DS_PARAMS:
639 cie->ie_chan = frm[2];
640 break;
641 case WLAN_EID_TIM:
642 cie->ie_tim = frm;
643 break;
644 case WLAN_EID_IBSS_PARAMS:
645 break;
646 case WLAN_EID_EXT_SUPP_RATES:
647 cie->ie_xrates = frm;
648 break;
649 case WLAN_EID_ERP_INFO:
650 if (frm[1] != 1)
651 return -EINVAL;
652
653 cie->ie_erp = frm[2];
654 break;
655 case WLAN_EID_RSN:
656 cie->ie_rsn = frm;
657 break;
658 case WLAN_EID_HT_CAPABILITY:
659 cie->ie_htcap = frm;
660 break;
661 case WLAN_EID_HT_INFORMATION:
662 cie->ie_htop = frm;
663 break;
664 case WLAN_EID_VENDOR_SPECIFIC:
665 if (frm[1] > 3 && frm[2] == 0x00 && frm[3] == 0x50 &&
666 frm[4] == 0xf2) {
667 /* OUT Type (00:50:F2) */
668
669 if (frm[5] == WPA_OUI_TYPE) {
670 /* WPA OUT */
671 cie->ie_wpa = frm;
672 } else if (frm[5] == WMM_OUI_TYPE) {
673 /* WMM OUT */
674 cie->ie_wmm = frm;
675 } else if (frm[5] == WSC_OUT_TYPE) {
676 /* WSC OUT */
677 cie->ie_wsc = frm;
678 }
679
680 } else if (frm[1] > 3 && frm[2] == 0x00
681 && frm[3] == 0x03 && frm[4] == 0x7f
682 && frm[5] == ATH_OUI_TYPE) {
683 /* Atheros OUI (00:03:7f) */
684 cie->ie_ath = frm;
685 }
686 break;
687 default:
688 break;
689 }
690 frm += frm[1] + 2;
691 }
692
693 if ((cie->ie_rates == NULL)
694 || (cie->ie_rates[1] > ATH6KL_RATE_MAXSIZE))
695 return -EINVAL;
696
697 if ((cie->ie_ssid == NULL)
698 || (cie->ie_ssid[1] > IEEE80211_MAX_SSID_LEN))
699 return -EINVAL;
700
701 return 0;
702}
703
704static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
705{
706 struct bss *bss = NULL;
707 struct wmi_bss_info_hdr *bih;
708 u8 cached_ssid_len = 0;
709 u8 cached_ssid[IEEE80211_MAX_SSID_LEN] = { 0 };
710 u8 beacon_ssid_len = 0;
711 u8 *buf, *ie_ssid;
712 u8 *ni_buf;
713 int buf_len;
714
715 int ret;
716
717 if (len <= sizeof(struct wmi_bss_info_hdr))
718 return -EINVAL;
719
720 bih = (struct wmi_bss_info_hdr *) datap;
721 bss = wlan_find_node(&wmi->parent_dev->scan_table, bih->bssid);
722
723 if (a_sle16_to_cpu(bih->rssi) > 0) {
724 if (bss == NULL)
725 return 0;
726 else
727 bih->rssi = a_cpu_to_sle16(bss->ni_rssi);
728 }
729
730 buf = datap + sizeof(struct wmi_bss_info_hdr);
731 len -= sizeof(struct wmi_bss_info_hdr);
732
733 ath6kl_dbg(ATH6KL_DBG_WMI,
734 "bss info evt - ch %u, rssi %02x, bssid \"%pM\"\n",
735 bih->ch, a_sle16_to_cpu(bih->rssi), bih->bssid);
736
737 if (bss != NULL) {
738 /*
739 * Free up the node. We are about to allocate a new node.
740 * In case of hidden AP, beacon will not have ssid,
741 * but a directed probe response will have it,
742 * so cache the probe-resp-ssid if already present.
743 */
744 if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE)) {
745 ie_ssid = bss->ni_cie.ie_ssid;
746 if (ie_ssid && (ie_ssid[1] <= IEEE80211_MAX_SSID_LEN) &&
747 (ie_ssid[2] != 0)) {
748 cached_ssid_len = ie_ssid[1];
749 memcpy(cached_ssid, ie_ssid + 2,
750 cached_ssid_len);
751 }
752 }
753
754 /*
755 * Use the current average rssi of associated AP base on
756 * assumption
757 * 1. Most os with GUI will update RSSI by
758 * ath6kl_wmi_get_stats_cmd() periodically.
759 * 2. ath6kl_wmi_get_stats_cmd(..) will be called when calling
760 * ath6kl_wmi_startscan_cmd(...)
761 * The average value of RSSI give end-user better feeling for
762 * instance value of scan result. It also sync up RSSI info
763 * in GUI between scan result and RSSI signal icon.
764 */
765 if (memcmp(wmi->parent_dev->bssid, bih->bssid, ETH_ALEN) == 0) {
766 bih->rssi = a_cpu_to_sle16(bss->ni_rssi);
767 bih->snr = bss->ni_snr;
768 }
769
770 wlan_node_reclaim(&wmi->parent_dev->scan_table, bss);
771 }
772
773 /*
774 * beacon/probe response frame format
775 * [8] time stamp
776 * [2] beacon interval
777 * [2] capability information
778 * [tlv] ssid
779 */
780 beacon_ssid_len = buf[SSID_IE_LEN_INDEX];
781
782 /*
783 * If ssid is cached for this hidden AP, then change
784 * buffer len accordingly.
785 */
786 if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE) &&
787 (cached_ssid_len != 0) &&
788 (beacon_ssid_len == 0 || (cached_ssid_len > beacon_ssid_len &&
789 buf[SSID_IE_LEN_INDEX + 1] == 0))) {
790
791 len += (cached_ssid_len - beacon_ssid_len);
792 }
793
794 bss = wlan_node_alloc(len);
795 if (!bss)
796 return -ENOMEM;
797
798 bss->ni_snr = bih->snr;
799 bss->ni_rssi = a_sle16_to_cpu(bih->rssi);
800
801 if (WARN_ON(!bss->ni_buf))
802 return -EINVAL;
803
804 /*
805 * In case of hidden AP, beacon will not have ssid,
806 * but a directed probe response will have it,
807 * so place the cached-ssid(probe-resp) in the bss info.
808 */
809 if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE) &&
810 (cached_ssid_len != 0) &&
811 (beacon_ssid_len == 0 || (beacon_ssid_len &&
812 buf[SSID_IE_LEN_INDEX + 1] == 0))) {
813 ni_buf = bss->ni_buf;
814 buf_len = len;
815
816 /*
817 * Copy the first 14 bytes:
818 * time-stamp(8), beacon-interval(2),
819 * cap-info(2), ssid-id(1), ssid-len(1).
820 */
821 memcpy(ni_buf, buf, SSID_IE_LEN_INDEX + 1);
822
823 ni_buf[SSID_IE_LEN_INDEX] = cached_ssid_len;
824 ni_buf += (SSID_IE_LEN_INDEX + 1);
825
826 buf += (SSID_IE_LEN_INDEX + 1);
827 buf_len -= (SSID_IE_LEN_INDEX + 1);
828
829 memcpy(ni_buf, cached_ssid, cached_ssid_len);
830 ni_buf += cached_ssid_len;
831
832 buf += beacon_ssid_len;
833 buf_len -= beacon_ssid_len;
834
835 if (cached_ssid_len > beacon_ssid_len)
836 buf_len -= (cached_ssid_len - beacon_ssid_len);
837
838 memcpy(ni_buf, buf, buf_len);
839 } else
840 memcpy(bss->ni_buf, buf, len);
841
842 bss->ni_framelen = len;
843
844 ret = ath6kl_wlan_parse_beacon(bss->ni_buf, len, &bss->ni_cie);
845 if (ret) {
846 wlan_node_free(bss);
847 return -EINVAL;
848 }
849
850 /*
851 * Update the frequency in ie_chan, overwriting of channel number
852 * which is done in ath6kl_wlan_parse_beacon
853 */
854 bss->ni_cie.ie_chan = le16_to_cpu(bih->ch);
855 wlan_setup_node(&wmi->parent_dev->scan_table, bss, bih->bssid);
856
857 return 0;
858}
859
860static int ath6kl_wmi_opt_frame_event_rx(struct wmi *wmi, u8 *datap, int len)
861{
862 struct bss *bss;
863 struct wmi_opt_rx_info_hdr *bih;
864 u8 *buf;
865
866 if (len <= sizeof(struct wmi_opt_rx_info_hdr))
867 return -EINVAL;
868
869 bih = (struct wmi_opt_rx_info_hdr *) datap;
870 buf = datap + sizeof(struct wmi_opt_rx_info_hdr);
871 len -= sizeof(struct wmi_opt_rx_info_hdr);
872
873 ath6kl_dbg(ATH6KL_DBG_WMI, "opt frame event %2.2x:%2.2x\n",
874 bih->bssid[4], bih->bssid[5]);
875
876 bss = wlan_find_node(&wmi->parent_dev->scan_table, bih->bssid);
877 if (bss != NULL) {
878 /* Free up the node. We are about to allocate a new node. */
879 wlan_node_reclaim(&wmi->parent_dev->scan_table, bss);
880 }
881
882 bss = wlan_node_alloc(len);
883 if (!bss)
884 return -ENOMEM;
885
886 bss->ni_snr = bih->snr;
887 bss->ni_cie.ie_chan = le16_to_cpu(bih->ch);
888
889 if (WARN_ON(!bss->ni_buf))
890 return -EINVAL;
891
892 memcpy(bss->ni_buf, buf, len);
893 wlan_setup_node(&wmi->parent_dev->scan_table, bss, bih->bssid);
894
895 return 0;
896}
897
898/* Inactivity timeout of a fatpipe(pstream) at the target */
899static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
900 int len)
901{
902 struct wmi_pstream_timeout_event *ev;
903
904 if (len < sizeof(struct wmi_pstream_timeout_event))
905 return -EINVAL;
906
907 ev = (struct wmi_pstream_timeout_event *) datap;
908
909 /*
910 * When the pstream (fat pipe == AC) timesout, it means there were
911 * no thinStreams within this pstream & it got implicitly created
912 * due to data flow on this AC. We start the inactivity timer only
913 * for implicitly created pstream. Just reset the host state.
914 */
915 spin_lock_bh(&wmi->lock);
916 wmi->stream_exist_for_ac[ev->traffic_class] = 0;
917 wmi->fat_pipe_exist &= ~(1 << ev->traffic_class);
918 spin_unlock_bh(&wmi->lock);
919
920 /* Indicate inactivity to driver layer for this fatpipe (pstream) */
921 ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false);
922
923 return 0;
924}
925
926static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
927{
928 struct wmi_bit_rate_reply *reply;
929 s32 rate;
930 u32 sgi, index;
931
932 if (len < sizeof(struct wmi_bit_rate_reply))
933 return -EINVAL;
934
935 reply = (struct wmi_bit_rate_reply *) datap;
936
937 ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
938
939 if (reply->rate_index == (s8) RATE_AUTO) {
940 rate = RATE_AUTO;
941 } else {
942 index = reply->rate_index & 0x7f;
943 sgi = (reply->rate_index & 0x80) ? 1 : 0;
944 rate = wmi_rate_tbl[index][sgi];
945 }
946
947 ath6kl_wakeup_event(wmi->parent_dev);
948
949 return 0;
950}
951
952static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len)
953{
954 if (len < sizeof(struct wmi_fix_rates_reply))
955 return -EINVAL;
956
957 ath6kl_wakeup_event(wmi->parent_dev);
958
959 return 0;
960}
961
962static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len)
963{
964 if (len < sizeof(struct wmi_channel_list_reply))
965 return -EINVAL;
966
967 ath6kl_wakeup_event(wmi->parent_dev);
968
969 return 0;
970}
971
972static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len)
973{
974 struct wmi_tx_pwr_reply *reply;
975
976 if (len < sizeof(struct wmi_tx_pwr_reply))
977 return -EINVAL;
978
979 reply = (struct wmi_tx_pwr_reply *) datap;
980 ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM);
981
982 return 0;
983}
984
985static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
986{
987 if (len < sizeof(struct wmi_get_keepalive_cmd))
988 return -EINVAL;
989
990 ath6kl_wakeup_event(wmi->parent_dev);
991
992 return 0;
993}
994
995static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
996{
997 struct wmi_scan_complete_event *ev;
998
999 ev = (struct wmi_scan_complete_event *) datap;
1000
1001 if (a_sle32_to_cpu(ev->status) == 0)
1002 wlan_refresh_inactive_nodes(wmi->parent_dev);
1003
1004 ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
1005 wmi->is_probe_ssid = false;
1006
1007 return 0;
1008}
1009
1010/*
1011 * Target is reporting a programming error. This is for
1012 * developer aid only. Target only checks a few common violations
1013 * and it is responsibility of host to do all error checking.
1014 * Behavior of target after wmi error event is undefined.
1015 * A reset is recommended.
1016 */
1017static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
1018{
1019 const char *type = "unknown error";
1020 struct wmi_cmd_error_event *ev;
1021 ev = (struct wmi_cmd_error_event *) datap;
1022
1023 switch (ev->err_code) {
1024 case INVALID_PARAM:
1025 type = "invalid parameter";
1026 break;
1027 case ILLEGAL_STATE:
1028 type = "invalid state";
1029 break;
1030 case INTERNAL_ERROR:
1031 type = "internal error";
1032 break;
1033 }
1034
1035 ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n",
1036 ev->cmd_id, type);
1037
1038 return 0;
1039}
1040
1041static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
1042{
1043 ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
1044
1045 return 0;
1046}
1047
1048static u8 ath6kl_wmi_get_upper_threshold(s16 rssi,
1049 struct sq_threshold_params *sq_thresh,
1050 u32 size)
1051{
1052 u32 index;
1053 u8 threshold = (u8) sq_thresh->upper_threshold[size - 1];
1054
1055 /* The list is already in sorted order. Get the next lower value */
1056 for (index = 0; index < size; index++) {
1057 if (rssi < sq_thresh->upper_threshold[index]) {
1058 threshold = (u8) sq_thresh->upper_threshold[index];
1059 break;
1060 }
1061 }
1062
1063 return threshold;
1064}
1065
1066static u8 ath6kl_wmi_get_lower_threshold(s16 rssi,
1067 struct sq_threshold_params *sq_thresh,
1068 u32 size)
1069{
1070 u32 index;
1071 u8 threshold = (u8) sq_thresh->lower_threshold[size - 1];
1072
1073 /* The list is already in sorted order. Get the next lower value */
1074 for (index = 0; index < size; index++) {
1075 if (rssi > sq_thresh->lower_threshold[index]) {
1076 threshold = (u8) sq_thresh->lower_threshold[index];
1077 break;
1078 }
1079 }
1080
1081 return threshold;
1082}
1083
1084static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
1085 struct wmi_rssi_threshold_params_cmd *rssi_cmd)
1086{
1087 struct sk_buff *skb;
1088 struct wmi_rssi_threshold_params_cmd *cmd;
1089
1090 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1091 if (!skb)
1092 return -ENOMEM;
1093
1094 cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
1095 memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
1096
1097 return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
1098 NO_SYNC_WMIFLAG);
1099}
1100
1101static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
1102 int len)
1103{
1104 struct wmi_rssi_threshold_event *reply;
1105 struct wmi_rssi_threshold_params_cmd cmd;
1106 struct sq_threshold_params *sq_thresh;
1107 enum wmi_rssi_threshold_val new_threshold;
1108 u8 upper_rssi_threshold, lower_rssi_threshold;
1109 s16 rssi;
1110 int ret;
1111
1112 if (len < sizeof(struct wmi_rssi_threshold_event))
1113 return -EINVAL;
1114
1115 reply = (struct wmi_rssi_threshold_event *) datap;
1116 new_threshold = (enum wmi_rssi_threshold_val) reply->range;
1117 rssi = a_sle16_to_cpu(reply->rssi);
1118
1119 sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI];
1120
1121 /*
1122 * Identify the threshold breached and communicate that to the app.
1123 * After that install a new set of thresholds based on the signal
1124 * quality reported by the target
1125 */
1126 if (new_threshold) {
1127 /* Upper threshold breached */
1128 if (rssi < sq_thresh->upper_threshold[0]) {
1129 ath6kl_dbg(ATH6KL_DBG_WMI,
1130 "spurious upper rssi threshold event: %d\n",
1131 rssi);
1132 } else if ((rssi < sq_thresh->upper_threshold[1]) &&
1133 (rssi >= sq_thresh->upper_threshold[0])) {
1134 new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
1135 } else if ((rssi < sq_thresh->upper_threshold[2]) &&
1136 (rssi >= sq_thresh->upper_threshold[1])) {
1137 new_threshold = WMI_RSSI_THRESHOLD2_ABOVE;
1138 } else if ((rssi < sq_thresh->upper_threshold[3]) &&
1139 (rssi >= sq_thresh->upper_threshold[2])) {
1140 new_threshold = WMI_RSSI_THRESHOLD3_ABOVE;
1141 } else if ((rssi < sq_thresh->upper_threshold[4]) &&
1142 (rssi >= sq_thresh->upper_threshold[3])) {
1143 new_threshold = WMI_RSSI_THRESHOLD4_ABOVE;
1144 } else if ((rssi < sq_thresh->upper_threshold[5]) &&
1145 (rssi >= sq_thresh->upper_threshold[4])) {
1146 new_threshold = WMI_RSSI_THRESHOLD5_ABOVE;
1147 } else if (rssi >= sq_thresh->upper_threshold[5]) {
1148 new_threshold = WMI_RSSI_THRESHOLD6_ABOVE;
1149 }
1150 } else {
1151 /* Lower threshold breached */
1152 if (rssi > sq_thresh->lower_threshold[0]) {
1153 ath6kl_dbg(ATH6KL_DBG_WMI,
1154 "spurious lower rssi threshold event: %d %d\n",
1155 rssi, sq_thresh->lower_threshold[0]);
1156 } else if ((rssi > sq_thresh->lower_threshold[1]) &&
1157 (rssi <= sq_thresh->lower_threshold[0])) {
1158 new_threshold = WMI_RSSI_THRESHOLD6_BELOW;
1159 } else if ((rssi > sq_thresh->lower_threshold[2]) &&
1160 (rssi <= sq_thresh->lower_threshold[1])) {
1161 new_threshold = WMI_RSSI_THRESHOLD5_BELOW;
1162 } else if ((rssi > sq_thresh->lower_threshold[3]) &&
1163 (rssi <= sq_thresh->lower_threshold[2])) {
1164 new_threshold = WMI_RSSI_THRESHOLD4_BELOW;
1165 } else if ((rssi > sq_thresh->lower_threshold[4]) &&
1166 (rssi <= sq_thresh->lower_threshold[3])) {
1167 new_threshold = WMI_RSSI_THRESHOLD3_BELOW;
1168 } else if ((rssi > sq_thresh->lower_threshold[5]) &&
1169 (rssi <= sq_thresh->lower_threshold[4])) {
1170 new_threshold = WMI_RSSI_THRESHOLD2_BELOW;
1171 } else if (rssi <= sq_thresh->lower_threshold[5]) {
1172 new_threshold = WMI_RSSI_THRESHOLD1_BELOW;
1173 }
1174 }
1175
1176 /* Calculate and install the next set of thresholds */
1177 lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh,
1178 sq_thresh->lower_threshold_valid_count);
1179 upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh,
1180 sq_thresh->upper_threshold_valid_count);
1181
1182 /* Issue a wmi command to install the thresholds */
1183 cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold);
1184 cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold);
1185 cmd.weight = sq_thresh->weight;
1186 cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
1187
1188 ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd);
1189 if (ret) {
1190 ath6kl_err("unable to configure rssi thresholds\n");
1191 return -EIO;
1192 }
1193
1194 return 0;
1195}
1196
1197static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
1198{
1199 struct wmi_cac_event *reply;
1200 struct ieee80211_tspec_ie *ts;
1201 u16 active_tsids, tsinfo;
1202 u8 tsid, index;
1203 u8 ts_id;
1204
1205 if (len < sizeof(struct wmi_cac_event))
1206 return -EINVAL;
1207
1208 reply = (struct wmi_cac_event *) datap;
1209
1210 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
1211 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
1212
1213 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1214 tsinfo = le16_to_cpu(ts->tsinfo);
1215 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
1216 IEEE80211_WMM_IE_TSPEC_TID_MASK;
1217
1218 ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
1219 } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
1220 /*
1221 * Following assumes that there is only one outstanding
1222 * ADDTS request when this event is received
1223 */
1224 spin_lock_bh(&wmi->lock);
1225 active_tsids = wmi->stream_exist_for_ac[reply->ac];
1226 spin_unlock_bh(&wmi->lock);
1227
1228 for (index = 0; index < sizeof(active_tsids) * 8; index++) {
1229 if ((active_tsids >> index) & 1)
1230 break;
1231 }
1232 if (index < (sizeof(active_tsids) * 8))
1233 ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
1234 }
1235
1236 /*
1237 * Clear active tsids and Add missing handling
1238 * for delete qos stream from AP
1239 */
1240 else if (reply->cac_indication == CAC_INDICATION_DELETE) {
1241
1242 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1243 tsinfo = le16_to_cpu(ts->tsinfo);
1244 ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
1245 IEEE80211_WMM_IE_TSPEC_TID_MASK);
1246
1247 spin_lock_bh(&wmi->lock);
1248 wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
1249 active_tsids = wmi->stream_exist_for_ac[reply->ac];
1250 spin_unlock_bh(&wmi->lock);
1251
1252 /* Indicate stream inactivity to driver layer only if all tsids
1253 * within this AC are deleted.
1254 */
1255 if (!active_tsids) {
1256 ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
1257 false);
1258 wmi->fat_pipe_exist &= ~(1 << reply->ac);
1259 }
1260 }
1261
1262 return 0;
1263}
1264
1265static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
1266 struct wmi_snr_threshold_params_cmd *snr_cmd)
1267{
1268 struct sk_buff *skb;
1269 struct wmi_snr_threshold_params_cmd *cmd;
1270
1271 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1272 if (!skb)
1273 return -ENOMEM;
1274
1275 cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
1276 memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
1277
1278 return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
1279 NO_SYNC_WMIFLAG);
1280}
1281
1282static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
1283 int len)
1284{
1285 struct wmi_snr_threshold_event *reply;
1286 struct sq_threshold_params *sq_thresh;
1287 struct wmi_snr_threshold_params_cmd cmd;
1288 enum wmi_snr_threshold_val new_threshold;
1289 u8 upper_snr_threshold, lower_snr_threshold;
1290 s16 snr;
1291 int ret;
1292
1293 if (len < sizeof(struct wmi_snr_threshold_event))
1294 return -EINVAL;
1295
1296 reply = (struct wmi_snr_threshold_event *) datap;
1297
1298 new_threshold = (enum wmi_snr_threshold_val) reply->range;
1299 snr = reply->snr;
1300
1301 sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR];
1302
1303 /*
1304 * Identify the threshold breached and communicate that to the app.
1305 * After that install a new set of thresholds based on the signal
1306 * quality reported by the target.
1307 */
1308 if (new_threshold) {
1309 /* Upper threshold breached */
1310 if (snr < sq_thresh->upper_threshold[0]) {
1311 ath6kl_dbg(ATH6KL_DBG_WMI,
1312 "spurious upper snr threshold event: %d\n",
1313 snr);
1314 } else if ((snr < sq_thresh->upper_threshold[1]) &&
1315 (snr >= sq_thresh->upper_threshold[0])) {
1316 new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
1317 } else if ((snr < sq_thresh->upper_threshold[2]) &&
1318 (snr >= sq_thresh->upper_threshold[1])) {
1319 new_threshold = WMI_SNR_THRESHOLD2_ABOVE;
1320 } else if ((snr < sq_thresh->upper_threshold[3]) &&
1321 (snr >= sq_thresh->upper_threshold[2])) {
1322 new_threshold = WMI_SNR_THRESHOLD3_ABOVE;
1323 } else if (snr >= sq_thresh->upper_threshold[3]) {
1324 new_threshold = WMI_SNR_THRESHOLD4_ABOVE;
1325 }
1326 } else {
1327 /* Lower threshold breached */
1328 if (snr > sq_thresh->lower_threshold[0]) {
1329 ath6kl_dbg(ATH6KL_DBG_WMI,
1330 "spurious lower snr threshold event: %d\n",
1331 sq_thresh->lower_threshold[0]);
1332 } else if ((snr > sq_thresh->lower_threshold[1]) &&
1333 (snr <= sq_thresh->lower_threshold[0])) {
1334 new_threshold = WMI_SNR_THRESHOLD4_BELOW;
1335 } else if ((snr > sq_thresh->lower_threshold[2]) &&
1336 (snr <= sq_thresh->lower_threshold[1])) {
1337 new_threshold = WMI_SNR_THRESHOLD3_BELOW;
1338 } else if ((snr > sq_thresh->lower_threshold[3]) &&
1339 (snr <= sq_thresh->lower_threshold[2])) {
1340 new_threshold = WMI_SNR_THRESHOLD2_BELOW;
1341 } else if (snr <= sq_thresh->lower_threshold[3]) {
1342 new_threshold = WMI_SNR_THRESHOLD1_BELOW;
1343 }
1344 }
1345
1346 /* Calculate and install the next set of thresholds */
1347 lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh,
1348 sq_thresh->lower_threshold_valid_count);
1349 upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh,
1350 sq_thresh->upper_threshold_valid_count);
1351
1352 /* Issue a wmi command to install the thresholds */
1353 cmd.thresh_above1_val = upper_snr_threshold;
1354 cmd.thresh_below1_val = lower_snr_threshold;
1355 cmd.weight = sq_thresh->weight;
1356 cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
1357
1358 ath6kl_dbg(ATH6KL_DBG_WMI,
1359 "snr: %d, threshold: %d, lower: %d, upper: %d\n",
1360 snr, new_threshold,
1361 lower_snr_threshold, upper_snr_threshold);
1362
1363 ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd);
1364 if (ret) {
1365 ath6kl_err("unable to configure snr threshold\n");
1366 return -EIO;
1367 }
1368
1369 return 0;
1370}
1371
1372static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
1373{
1374 u16 ap_info_entry_size;
1375 struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
1376 struct wmi_ap_info_v1 *ap_info_v1;
1377 u8 index;
1378
1379 if (len < sizeof(struct wmi_aplist_event) ||
1380 ev->ap_list_ver != APLIST_VER1)
1381 return -EINVAL;
1382
1383 ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
1384 ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
1385
1386 ath6kl_dbg(ATH6KL_DBG_WMI,
1387 "number of APs in aplist event: %d\n", ev->num_ap);
1388
1389 if (len < (int) (sizeof(struct wmi_aplist_event) +
1390 (ev->num_ap - 1) * ap_info_entry_size))
1391 return -EINVAL;
1392
1393 /* AP list version 1 contents */
1394 for (index = 0; index < ev->num_ap; index++) {
1395 ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n",
1396 index, ap_info_v1->bssid, ap_info_v1->channel);
1397 ap_info_v1++;
1398 }
1399
1400 return 0;
1401}
1402
1403int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
1404 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
1405{
1406 struct wmi_cmd_hdr *cmd_hdr;
1407 enum htc_endpoint_id ep_id = wmi->ep_id;
1408 int ret;
1409
1410 if (WARN_ON(skb == NULL))
1411 return -EINVAL;
1412
1413 if (sync_flag >= END_WMIFLAG) {
1414 dev_kfree_skb(skb);
1415 return -EINVAL;
1416 }
1417
1418 if ((sync_flag == SYNC_BEFORE_WMIFLAG) ||
1419 (sync_flag == SYNC_BOTH_WMIFLAG)) {
1420 /*
1421 * Make sure all data currently queued is transmitted before
1422 * the cmd execution. Establish a new sync point.
1423 */
1424 ath6kl_wmi_sync_point(wmi);
1425 }
1426
1427 skb_push(skb, sizeof(struct wmi_cmd_hdr));
1428
1429 cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
1430 cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
1431 cmd_hdr->info1 = 0; /* added for virtual interface */
1432
1433 /* Only for OPT_TX_CMD, use BE endpoint. */
1434 if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
1435 ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
1436 false, false, 0, NULL);
1437 if (ret) {
1438 dev_kfree_skb(skb);
1439 return ret;
1440 }
1441 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE);
1442 }
1443
1444 ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
1445
1446 if ((sync_flag == SYNC_AFTER_WMIFLAG) ||
1447 (sync_flag == SYNC_BOTH_WMIFLAG)) {
1448 /*
1449 * Make sure all new data queued waits for the command to
1450 * execute. Establish a new sync point.
1451 */
1452 ath6kl_wmi_sync_point(wmi);
1453 }
1454
1455 return 0;
1456}
1457
1458int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
1459 enum dot11_auth_mode dot11_auth_mode,
1460 enum auth_mode auth_mode,
1461 enum crypto_type pairwise_crypto,
1462 u8 pairwise_crypto_len,
1463 enum crypto_type group_crypto,
1464 u8 group_crypto_len, int ssid_len, u8 *ssid,
1465 u8 *bssid, u16 channel, u32 ctrl_flags)
1466{
1467 struct sk_buff *skb;
1468 struct wmi_connect_cmd *cc;
1469 int ret;
1470
1471 wmi->traffic_class = 100;
1472
1473 if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT))
1474 return -EINVAL;
1475
1476 if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT))
1477 return -EINVAL;
1478
1479 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
1480 if (!skb)
1481 return -ENOMEM;
1482
1483 cc = (struct wmi_connect_cmd *) skb->data;
1484
1485 if (ssid_len)
1486 memcpy(cc->ssid, ssid, ssid_len);
1487
1488 cc->ssid_len = ssid_len;
1489 cc->nw_type = nw_type;
1490 cc->dot11_auth_mode = dot11_auth_mode;
1491 cc->auth_mode = auth_mode;
1492 cc->prwise_crypto_type = pairwise_crypto;
1493 cc->prwise_crypto_len = pairwise_crypto_len;
1494 cc->grp_crypto_type = group_crypto;
1495 cc->grp_crypto_len = group_crypto_len;
1496 cc->ch = cpu_to_le16(channel);
1497 cc->ctrl_flags = cpu_to_le32(ctrl_flags);
1498
1499 if (bssid != NULL)
1500 memcpy(cc->bssid, bssid, ETH_ALEN);
1501
1502 wmi->pair_crypto_type = pairwise_crypto;
1503 wmi->grp_crypto_type = group_crypto;
1504
1505 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
1506
1507 return ret;
1508}
1509
1510int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
1511{
1512 struct sk_buff *skb;
1513 struct wmi_reconnect_cmd *cc;
1514 int ret;
1515
1516 wmi->traffic_class = 100;
1517
1518 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd));
1519 if (!skb)
1520 return -ENOMEM;
1521
1522 cc = (struct wmi_reconnect_cmd *) skb->data;
1523 cc->channel = cpu_to_le16(channel);
1524
1525 if (bssid != NULL)
1526 memcpy(cc->bssid, bssid, ETH_ALEN);
1527
1528 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
1529 NO_SYNC_WMIFLAG);
1530
1531 return ret;
1532}
1533
1534int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
1535{
1536 int ret;
1537
1538 wmi->traffic_class = 100;
1539
1540 /* Disconnect command does not need to do a SYNC before. */
1541 ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
1542
1543 return ret;
1544}
1545
1546int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
1547 u32 force_fgscan, u32 is_legacy,
1548 u32 home_dwell_time, u32 force_scan_interval,
1549 s8 num_chan, u16 *ch_list)
1550{
1551 struct sk_buff *skb;
1552 struct wmi_start_scan_cmd *sc;
1553 s8 size;
1554 int ret;
1555
1556 size = sizeof(struct wmi_start_scan_cmd);
1557
1558 if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
1559 return -EINVAL;
1560
1561 if (num_chan > WMI_MAX_CHANNELS)
1562 return -EINVAL;
1563
1564 if (num_chan)
1565 size += sizeof(u16) * (num_chan - 1);
1566
1567 skb = ath6kl_wmi_get_new_buf(size);
1568 if (!skb)
1569 return -ENOMEM;
1570
1571 sc = (struct wmi_start_scan_cmd *) skb->data;
1572 sc->scan_type = scan_type;
1573 sc->force_fg_scan = cpu_to_le32(force_fgscan);
1574 sc->is_legacy = cpu_to_le32(is_legacy);
1575 sc->home_dwell_time = cpu_to_le32(home_dwell_time);
1576 sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
1577 sc->num_ch = num_chan;
1578
1579 if (num_chan)
1580 memcpy(sc->ch_list, ch_list, num_chan * sizeof(u16));
1581
1582 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
1583 NO_SYNC_WMIFLAG);
1584
1585 return ret;
1586}
1587
1588int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
1589 u16 fg_end_sec, u16 bg_sec,
1590 u16 minact_chdw_msec, u16 maxact_chdw_msec,
1591 u16 pas_chdw_msec, u8 short_scan_ratio,
1592 u8 scan_ctrl_flag, u32 max_dfsch_act_time,
1593 u16 maxact_scan_per_ssid)
1594{
1595 struct sk_buff *skb;
1596 struct wmi_scan_params_cmd *sc;
1597 int ret;
1598
1599 skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
1600 if (!skb)
1601 return -ENOMEM;
1602
1603 sc = (struct wmi_scan_params_cmd *) skb->data;
1604 sc->fg_start_period = cpu_to_le16(fg_start_sec);
1605 sc->fg_end_period = cpu_to_le16(fg_end_sec);
1606 sc->bg_period = cpu_to_le16(bg_sec);
1607 sc->minact_chdwell_time = cpu_to_le16(minact_chdw_msec);
1608 sc->maxact_chdwell_time = cpu_to_le16(maxact_chdw_msec);
1609 sc->pas_chdwell_time = cpu_to_le16(pas_chdw_msec);
1610 sc->short_scan_ratio = short_scan_ratio;
1611 sc->scan_ctrl_flags = scan_ctrl_flag;
1612 sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
1613 sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
1614
1615 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
1616 NO_SYNC_WMIFLAG);
1617 return ret;
1618}
1619
1620int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
1621{
1622 struct sk_buff *skb;
1623 struct wmi_bss_filter_cmd *cmd;
1624 int ret;
1625
1626 if (filter >= LAST_BSS_FILTER)
1627 return -EINVAL;
1628
1629 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1630 if (!skb)
1631 return -ENOMEM;
1632
1633 cmd = (struct wmi_bss_filter_cmd *) skb->data;
1634 cmd->bss_filter = filter;
1635 cmd->ie_mask = cpu_to_le32(ie_mask);
1636
1637 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
1638 NO_SYNC_WMIFLAG);
1639 return ret;
1640}
1641
1642int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
1643 u8 ssid_len, u8 *ssid)
1644{
1645 struct sk_buff *skb;
1646 struct wmi_probed_ssid_cmd *cmd;
1647 int ret;
1648
1649 if (index > MAX_PROBED_SSID_INDEX)
1650 return -EINVAL;
1651
1652 if (ssid_len > sizeof(cmd->ssid))
1653 return -EINVAL;
1654
1655 if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssid_len > 0))
1656 return -EINVAL;
1657
1658 if ((flag & SPECIFIC_SSID_FLAG) && !ssid_len)
1659 return -EINVAL;
1660
1661 if (flag & SPECIFIC_SSID_FLAG)
1662 wmi->is_probe_ssid = true;
1663
1664 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1665 if (!skb)
1666 return -ENOMEM;
1667
1668 cmd = (struct wmi_probed_ssid_cmd *) skb->data;
1669 cmd->entry_index = index;
1670 cmd->flag = flag;
1671 cmd->ssid_len = ssid_len;
1672 memcpy(cmd->ssid, ssid, ssid_len);
1673
1674 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
1675 NO_SYNC_WMIFLAG);
1676 return ret;
1677}
1678
1679int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
1680 u16 listen_beacons)
1681{
1682 struct sk_buff *skb;
1683 struct wmi_listen_int_cmd *cmd;
1684 int ret;
1685
1686 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1687 if (!skb)
1688 return -ENOMEM;
1689
1690 cmd = (struct wmi_listen_int_cmd *) skb->data;
1691 cmd->listen_intvl = cpu_to_le16(listen_interval);
1692 cmd->num_beacons = cpu_to_le16(listen_beacons);
1693
1694 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
1695 NO_SYNC_WMIFLAG);
1696 return ret;
1697}
1698
1699int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
1700{
1701 struct sk_buff *skb;
1702 struct wmi_power_mode_cmd *cmd;
1703 int ret;
1704
1705 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1706 if (!skb)
1707 return -ENOMEM;
1708
1709 cmd = (struct wmi_power_mode_cmd *) skb->data;
1710 cmd->pwr_mode = pwr_mode;
1711 wmi->pwr_mode = pwr_mode;
1712
1713 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
1714 NO_SYNC_WMIFLAG);
1715 return ret;
1716}
1717
1718int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
1719 u16 ps_poll_num, u16 dtim_policy,
1720 u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
1721 u16 ps_fail_event_policy)
1722{
1723 struct sk_buff *skb;
1724 struct wmi_power_params_cmd *pm;
1725 int ret;
1726
1727 skb = ath6kl_wmi_get_new_buf(sizeof(*pm));
1728 if (!skb)
1729 return -ENOMEM;
1730
1731 pm = (struct wmi_power_params_cmd *)skb->data;
1732 pm->idle_period = cpu_to_le16(idle_period);
1733 pm->pspoll_number = cpu_to_le16(ps_poll_num);
1734 pm->dtim_policy = cpu_to_le16(dtim_policy);
1735 pm->tx_wakeup_policy = cpu_to_le16(tx_wakeup_policy);
1736 pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
1737 pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
1738
1739 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
1740 NO_SYNC_WMIFLAG);
1741 return ret;
1742}
1743
1744int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
1745{
1746 struct sk_buff *skb;
1747 struct wmi_disc_timeout_cmd *cmd;
1748 int ret;
1749
1750 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1751 if (!skb)
1752 return -ENOMEM;
1753
1754 cmd = (struct wmi_disc_timeout_cmd *) skb->data;
1755 cmd->discon_timeout = timeout;
1756
1757 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
1758 NO_SYNC_WMIFLAG);
1759 return ret;
1760}
1761
1762int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
1763 enum crypto_type key_type,
1764 u8 key_usage, u8 key_len,
1765 u8 *key_rsc, u8 *key_material,
1766 u8 key_op_ctrl, u8 *mac_addr,
1767 enum wmi_sync_flag sync_flag)
1768{
1769 struct sk_buff *skb;
1770 struct wmi_add_cipher_key_cmd *cmd;
1771 int ret;
1772
1773 if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
1774 (key_material == NULL))
1775 return -EINVAL;
1776
1777 if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
1778 return -EINVAL;
1779
1780 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1781 if (!skb)
1782 return -ENOMEM;
1783
1784 cmd = (struct wmi_add_cipher_key_cmd *) skb->data;
1785 cmd->key_index = key_index;
1786 cmd->key_type = key_type;
1787 cmd->key_usage = key_usage;
1788 cmd->key_len = key_len;
1789 memcpy(cmd->key, key_material, key_len);
1790
1791 if (key_rsc != NULL)
1792 memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
1793
1794 cmd->key_op_ctrl = key_op_ctrl;
1795
1796 if (mac_addr)
1797 memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
1798
1799 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
1800 sync_flag);
1801
1802 return ret;
1803}
1804
1805int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
1806{
1807 struct sk_buff *skb;
1808 struct wmi_add_krk_cmd *cmd;
1809 int ret;
1810
1811 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1812 if (!skb)
1813 return -ENOMEM;
1814
1815 cmd = (struct wmi_add_krk_cmd *) skb->data;
1816 memcpy(cmd->krk, krk, WMI_KRK_LEN);
1817
1818 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
1819
1820 return ret;
1821}
1822
1823int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
1824{
1825 struct sk_buff *skb;
1826 struct wmi_delete_cipher_key_cmd *cmd;
1827 int ret;
1828
1829 if (key_index > WMI_MAX_KEY_INDEX)
1830 return -EINVAL;
1831
1832 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1833 if (!skb)
1834 return -ENOMEM;
1835
1836 cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
1837 cmd->key_index = key_index;
1838
1839 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
1840 NO_SYNC_WMIFLAG);
1841
1842 return ret;
1843}
1844
1845int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
1846 const u8 *pmkid, bool set)
1847{
1848 struct sk_buff *skb;
1849 struct wmi_setpmkid_cmd *cmd;
1850 int ret;
1851
1852 if (bssid == NULL)
1853 return -EINVAL;
1854
1855 if (set && pmkid == NULL)
1856 return -EINVAL;
1857
1858 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1859 if (!skb)
1860 return -ENOMEM;
1861
1862 cmd = (struct wmi_setpmkid_cmd *) skb->data;
1863 memcpy(cmd->bssid, bssid, ETH_ALEN);
1864 if (set) {
1865 memcpy(cmd->pmkid, pmkid, sizeof(cmd->pmkid));
1866 cmd->enable = PMKID_ENABLE;
1867 } else {
1868 memset(cmd->pmkid, 0, sizeof(cmd->pmkid));
1869 cmd->enable = PMKID_DISABLE;
1870 }
1871
1872 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
1873 NO_SYNC_WMIFLAG);
1874
1875 return ret;
1876}
1877
1878static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
1879 enum htc_endpoint_id ep_id)
1880{
1881 struct wmi_data_hdr *data_hdr;
1882 int ret;
1883
1884 if (WARN_ON(skb == NULL || ep_id == wmi->ep_id))
1885 return -EINVAL;
1886
1887 skb_push(skb, sizeof(struct wmi_data_hdr));
1888
1889 data_hdr = (struct wmi_data_hdr *) skb->data;
1890 data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
1891 data_hdr->info3 = 0;
1892
1893 ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
1894
1895 return ret;
1896}
1897
1898static int ath6kl_wmi_sync_point(struct wmi *wmi)
1899{
1900 struct sk_buff *skb;
1901 struct wmi_sync_cmd *cmd;
1902 struct wmi_data_sync_bufs data_sync_bufs[WMM_NUM_AC];
1903 enum htc_endpoint_id ep_id;
1904 u8 index, num_pri_streams = 0;
1905 int ret = 0;
1906
1907 memset(data_sync_bufs, 0, sizeof(data_sync_bufs));
1908
1909 spin_lock_bh(&wmi->lock);
1910
1911 for (index = 0; index < WMM_NUM_AC; index++) {
1912 if (wmi->fat_pipe_exist & (1 << index)) {
1913 num_pri_streams++;
1914 data_sync_bufs[num_pri_streams - 1].traffic_class =
1915 index;
1916 }
1917 }
1918
1919 spin_unlock_bh(&wmi->lock);
1920
1921 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1922 if (!skb) {
1923 ret = -ENOMEM;
1924 goto free_skb;
1925 }
1926
1927 cmd = (struct wmi_sync_cmd *) skb->data;
1928
1929 /*
1930 * In the SYNC cmd sent on the control Ep, send a bitmap
1931 * of the data eps on which the Data Sync will be sent
1932 */
1933 cmd->data_sync_map = wmi->fat_pipe_exist;
1934
1935 for (index = 0; index < num_pri_streams; index++) {
1936 data_sync_bufs[index].skb = ath6kl_buf_alloc(0);
1937 if (data_sync_bufs[index].skb == NULL) {
1938 ret = -ENOMEM;
1939 break;
1940 }
1941 }
1942
1943 /*
1944 * If buffer allocation for any of the dataSync fails,
1945 * then do not send the Synchronize cmd on the control ep
1946 */
1947 if (ret)
1948 goto free_skb;
1949
1950 /*
1951 * Send sync cmd followed by sync data messages on all
1952 * endpoints being used
1953 */
1954 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
1955 NO_SYNC_WMIFLAG);
1956
1957 if (ret)
1958 goto free_skb;
1959
1960 /* cmd buffer sent, we no longer own it */
1961 skb = NULL;
1962
1963 for (index = 0; index < num_pri_streams; index++) {
1964
1965 if (WARN_ON(!data_sync_bufs[index].skb))
1966 break;
1967
1968 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
1969 data_sync_bufs[index].
1970 traffic_class);
1971 ret =
1972 ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
1973 ep_id);
1974
1975 if (ret)
1976 break;
1977
1978 data_sync_bufs[index].skb = NULL;
1979 }
1980
1981free_skb:
1982 /* free up any resources left over (possibly due to an error) */
1983 if (skb)
1984 dev_kfree_skb(skb);
1985
1986 for (index = 0; index < num_pri_streams; index++) {
1987 if (data_sync_bufs[index].skb != NULL) {
1988 dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
1989 skb);
1990 }
1991 }
1992
1993 return ret;
1994}
1995
1996int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
1997 struct wmi_create_pstream_cmd *params)
1998{
1999 struct sk_buff *skb;
2000 struct wmi_create_pstream_cmd *cmd;
2001 u8 fatpipe_exist_for_ac = 0;
2002 s32 min_phy = 0;
2003 s32 nominal_phy = 0;
2004 int ret;
2005
2006 if (!((params->user_pri < 8) &&
2007 (params->user_pri <= 0x7) &&
2008 (up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
2009 (params->traffic_direc == UPLINK_TRAFFIC ||
2010 params->traffic_direc == DNLINK_TRAFFIC ||
2011 params->traffic_direc == BIDIR_TRAFFIC) &&
2012 (params->traffic_type == TRAFFIC_TYPE_APERIODIC ||
2013 params->traffic_type == TRAFFIC_TYPE_PERIODIC) &&
2014 (params->voice_psc_cap == DISABLE_FOR_THIS_AC ||
2015 params->voice_psc_cap == ENABLE_FOR_THIS_AC ||
2016 params->voice_psc_cap == ENABLE_FOR_ALL_AC) &&
2017 (params->tsid == WMI_IMPLICIT_PSTREAM ||
2018 params->tsid <= WMI_MAX_THINSTREAM))) {
2019 return -EINVAL;
2020 }
2021
2022 /*
2023 * Check nominal PHY rate is >= minimalPHY,
2024 * so that DUT can allow TSRS IE
2025 */
2026
2027 /* Get the physical rate (units of bps) */
2028 min_phy = ((le32_to_cpu(params->min_phy_rate) / 1000) / 1000);
2029
2030 /* Check minimal phy < nominal phy rate */
2031 if (params->nominal_phy >= min_phy) {
2032 /* unit of 500 kbps */
2033 nominal_phy = (params->nominal_phy * 1000) / 500;
2034 ath6kl_dbg(ATH6KL_DBG_WMI,
2035 "TSRS IE enabled::MinPhy %x->NominalPhy ===> %x\n",
2036 min_phy, nominal_phy);
2037
2038 params->nominal_phy = nominal_phy;
2039 } else {
2040 params->nominal_phy = 0;
2041 }
2042
2043 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2044 if (!skb)
2045 return -ENOMEM;
2046
2047 ath6kl_dbg(ATH6KL_DBG_WMI,
2048 "sending create_pstream_cmd: ac=%d tsid:%d\n",
2049 params->traffic_class, params->tsid);
2050
2051 cmd = (struct wmi_create_pstream_cmd *) skb->data;
2052 memcpy(cmd, params, sizeof(*cmd));
2053
2054 /* This is an implicitly created Fat pipe */
2055 if ((u32) params->tsid == (u32) WMI_IMPLICIT_PSTREAM) {
2056 spin_lock_bh(&wmi->lock);
2057 fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
2058 (1 << params->traffic_class));
2059 wmi->fat_pipe_exist |= (1 << params->traffic_class);
2060 spin_unlock_bh(&wmi->lock);
2061 } else {
2062 /* explicitly created thin stream within a fat pipe */
2063 spin_lock_bh(&wmi->lock);
2064 fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
2065 (1 << params->traffic_class));
2066 wmi->stream_exist_for_ac[params->traffic_class] |=
2067 (1 << params->tsid);
2068 /*
2069 * If a thinstream becomes active, the fat pipe automatically
2070 * becomes active
2071 */
2072 wmi->fat_pipe_exist |= (1 << params->traffic_class);
2073 spin_unlock_bh(&wmi->lock);
2074 }
2075
2076 /*
2077 * Indicate activty change to driver layer only if this is the
2078 * first TSID to get created in this AC explicitly or an implicit
2079 * fat pipe is getting created.
2080 */
2081 if (!fatpipe_exist_for_ac)
2082 ath6kl_indicate_tx_activity(wmi->parent_dev,
2083 params->traffic_class, true);
2084
2085 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
2086 NO_SYNC_WMIFLAG);
2087 return ret;
2088}
2089
2090int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
2091{
2092 struct sk_buff *skb;
2093 struct wmi_delete_pstream_cmd *cmd;
2094 u16 active_tsids = 0;
2095 int ret;
2096
2097 if (traffic_class > 3) {
2098 ath6kl_err("invalid traffic class: %d\n", traffic_class);
2099 return -EINVAL;
2100 }
2101
2102 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2103 if (!skb)
2104 return -ENOMEM;
2105
2106 cmd = (struct wmi_delete_pstream_cmd *) skb->data;
2107 cmd->traffic_class = traffic_class;
2108 cmd->tsid = tsid;
2109
2110 spin_lock_bh(&wmi->lock);
2111 active_tsids = wmi->stream_exist_for_ac[traffic_class];
2112 spin_unlock_bh(&wmi->lock);
2113
2114 if (!(active_tsids & (1 << tsid))) {
2115 dev_kfree_skb(skb);
2116 ath6kl_dbg(ATH6KL_DBG_WMI,
2117 "TSID %d doesn't exist for traffic class: %d\n",
2118 tsid, traffic_class);
2119 return -ENODATA;
2120 }
2121
2122 ath6kl_dbg(ATH6KL_DBG_WMI,
2123 "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
2124 traffic_class, tsid);
2125
2126 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
2127 SYNC_BEFORE_WMIFLAG);
2128
2129 spin_lock_bh(&wmi->lock);
2130 wmi->stream_exist_for_ac[traffic_class] &= ~(1 << tsid);
2131 active_tsids = wmi->stream_exist_for_ac[traffic_class];
2132 spin_unlock_bh(&wmi->lock);
2133
2134 /*
2135 * Indicate stream inactivity to driver layer only if all tsids
2136 * within this AC are deleted.
2137 */
2138 if (!active_tsids) {
2139 ath6kl_indicate_tx_activity(wmi->parent_dev,
2140 traffic_class, false);
2141 wmi->fat_pipe_exist &= ~(1 << traffic_class);
2142 }
2143
2144 return ret;
2145}
2146
2147int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
2148{
2149 struct sk_buff *skb;
2150 struct wmi_set_ip_cmd *cmd;
2151 int ret;
2152
2153 /* Multicast address are not valid */
2154 if ((*((u8 *) &ip_cmd->ips[0]) >= 0xE0) ||
2155 (*((u8 *) &ip_cmd->ips[1]) >= 0xE0))
2156 return -EINVAL;
2157
2158 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
2159 if (!skb)
2160 return -ENOMEM;
2161
2162 cmd = (struct wmi_set_ip_cmd *) skb->data;
2163 memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
2164
2165 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
2166 return ret;
2167}
2168
2169static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
2170 int len)
2171{
2172 if (len < sizeof(struct wmi_get_wow_list_reply))
2173 return -EINVAL;
2174
2175 return 0;
2176}
2177
2178static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
2179 enum wmix_command_id cmd_id,
2180 enum wmi_sync_flag sync_flag)
2181{
2182 struct wmix_cmd_hdr *cmd_hdr;
2183 int ret;
2184
2185 skb_push(skb, sizeof(struct wmix_cmd_hdr));
2186
2187 cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
2188 cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
2189
2190 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
2191
2192 return ret;
2193}
2194
2195int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source)
2196{
2197 struct sk_buff *skb;
2198 struct wmix_hb_challenge_resp_cmd *cmd;
2199 int ret;
2200
2201 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2202 if (!skb)
2203 return -ENOMEM;
2204
2205 cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data;
2206 cmd->cookie = cpu_to_le32(cookie);
2207 cmd->source = cpu_to_le32(source);
2208
2209 ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID,
2210 NO_SYNC_WMIFLAG);
2211 return ret;
2212}
2213
2214int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
2215{
2216 return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
2217}
2218
2219int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
2220{
2221 struct sk_buff *skb;
2222 struct wmi_set_tx_pwr_cmd *cmd;
2223 int ret;
2224
2225 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd));
2226 if (!skb)
2227 return -ENOMEM;
2228
2229 cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
2230 cmd->dbM = dbM;
2231
2232 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
2233 NO_SYNC_WMIFLAG);
2234
2235 return ret;
2236}
2237
2238int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
2239{
2240 return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
2241}
2242
2243int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
2244{
2245 struct sk_buff *skb;
2246 struct wmi_set_lpreamble_cmd *cmd;
2247 int ret;
2248
2249 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd));
2250 if (!skb)
2251 return -ENOMEM;
2252
2253 cmd = (struct wmi_set_lpreamble_cmd *) skb->data;
2254 cmd->status = status;
2255 cmd->preamble_policy = preamble_policy;
2256
2257 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
2258 NO_SYNC_WMIFLAG);
2259 return ret;
2260}
2261
2262int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
2263{
2264 struct sk_buff *skb;
2265 struct wmi_set_rts_cmd *cmd;
2266 int ret;
2267
2268 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd));
2269 if (!skb)
2270 return -ENOMEM;
2271
2272 cmd = (struct wmi_set_rts_cmd *) skb->data;
2273 cmd->threshold = cpu_to_le16(threshold);
2274
2275 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
2276 return ret;
2277}
2278
2279int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
2280{
2281 struct sk_buff *skb;
2282 struct wmi_set_wmm_txop_cmd *cmd;
2283 int ret;
2284
2285 if (!((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)))
2286 return -EINVAL;
2287
2288 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd));
2289 if (!skb)
2290 return -ENOMEM;
2291
2292 cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
2293 cmd->txop_enable = cfg;
2294
2295 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
2296 NO_SYNC_WMIFLAG);
2297 return ret;
2298}
2299
2300int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
2301{
2302 struct sk_buff *skb;
2303 struct wmi_set_keepalive_cmd *cmd;
2304 int ret;
2305
2306 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2307 if (!skb)
2308 return -ENOMEM;
2309
2310 cmd = (struct wmi_set_keepalive_cmd *) skb->data;
2311 cmd->keep_alive_intvl = keep_alive_intvl;
2312 wmi->keep_alive_intvl = keep_alive_intvl;
2313
2314 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
2315 NO_SYNC_WMIFLAG);
2316 return ret;
2317}
2318
2319s32 ath6kl_wmi_get_rate(s8 rate_index)
2320{
2321 if (rate_index == RATE_AUTO)
2322 return 0;
2323
2324 return wmi_rate_tbl[(u32) rate_index][0];
2325}
2326
2327void ath6kl_wmi_node_return(struct wmi *wmi, struct bss *bss)
2328{
2329 if (bss)
2330 wlan_node_return(&wmi->parent_dev->scan_table, bss);
2331}
2332
2333struct bss *ath6kl_wmi_find_ssid_node(struct wmi *wmi, u8 * ssid,
2334 u32 ssid_len, bool is_wpa2,
2335 bool match_ssid)
2336{
2337 struct bss *node = NULL;
2338
2339 node = wlan_find_ssid_node(&wmi->parent_dev->scan_table, ssid,
2340 ssid_len, is_wpa2, match_ssid);
2341 return node;
2342}
2343
2344struct bss *ath6kl_wmi_find_node(struct wmi *wmi, const u8 * mac_addr)
2345{
2346 struct bss *ni = NULL;
2347
2348 ni = wlan_find_node(&wmi->parent_dev->scan_table, mac_addr);
2349
2350 return ni;
2351}
2352
2353void ath6kl_wmi_node_free(struct wmi *wmi, const u8 * mac_addr)
2354{
2355 struct bss *ni = NULL;
2356
2357 ni = wlan_find_node(&wmi->parent_dev->scan_table, mac_addr);
2358 if (ni != NULL)
2359 wlan_node_reclaim(&wmi->parent_dev->scan_table, ni);
2360
2361 return;
2362}
2363
2364static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
2365 u32 len)
2366{
2367 struct wmi_pmkid_list_reply *reply;
2368 u32 expected_len;
2369
2370 if (len < sizeof(struct wmi_pmkid_list_reply))
2371 return -EINVAL;
2372
2373 reply = (struct wmi_pmkid_list_reply *)datap;
2374 expected_len = sizeof(reply->num_pmkid) +
2375 le32_to_cpu(reply->num_pmkid) * WMI_PMKID_LEN;
2376
2377 if (len < expected_len)
2378 return -EINVAL;
2379
2380 return 0;
2381}
2382
2383static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
2384{
2385 struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
2386
2387 aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
2388 le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
2389
2390 return 0;
2391}
2392
2393static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
2394{
2395 struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
2396
2397 aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
2398
2399 return 0;
2400}
2401
2402/* AP mode functions */
2403static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
2404{
2405 struct wmi_pspoll_event *ev;
2406
2407 if (len < sizeof(struct wmi_pspoll_event))
2408 return -EINVAL;
2409
2410 ev = (struct wmi_pspoll_event *) datap;
2411
2412 ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
2413
2414 return 0;
2415}
2416
2417static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
2418{
2419 ath6kl_dtimexpiry_event(wmi->parent_dev);
2420
2421 return 0;
2422}
2423
2424int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
2425{
2426 struct sk_buff *skb;
2427 struct wmi_ap_set_pvb_cmd *cmd;
2428 int ret;
2429
2430 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd));
2431 if (!skb)
2432 return -ENOMEM;
2433
2434 cmd = (struct wmi_ap_set_pvb_cmd *) skb->data;
2435 cmd->aid = cpu_to_le16(aid);
2436 cmd->flag = cpu_to_le32(flag);
2437
2438 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
2439 NO_SYNC_WMIFLAG);
2440
2441 return 0;
2442}
2443
2444int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
2445 bool rx_dot11_hdr, bool defrag_on_host)
2446{
2447 struct sk_buff *skb;
2448 struct wmi_rx_frame_format_cmd *cmd;
2449 int ret;
2450
2451 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2452 if (!skb)
2453 return -ENOMEM;
2454
2455 cmd = (struct wmi_rx_frame_format_cmd *) skb->data;
2456 cmd->dot11_hdr = rx_dot11_hdr ? 1 : 0;
2457 cmd->defrag_on_host = defrag_on_host ? 1 : 0;
2458 cmd->meta_ver = rx_meta_ver;
2459
2460 /* Delete the local aggr state, on host */
2461 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
2462 NO_SYNC_WMIFLAG);
2463
2464 return ret;
2465}
2466
2467static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
2468{
2469 struct wmix_cmd_hdr *cmd;
2470 u32 len;
2471 u16 id;
2472 u8 *datap;
2473 int ret = 0;
2474
2475 if (skb->len < sizeof(struct wmix_cmd_hdr)) {
2476 ath6kl_err("bad packet 1\n");
2477 wmi->stat.cmd_len_err++;
2478 return -EINVAL;
2479 }
2480
2481 cmd = (struct wmix_cmd_hdr *) skb->data;
2482 id = le32_to_cpu(cmd->cmd_id);
2483
2484 skb_pull(skb, sizeof(struct wmix_cmd_hdr));
2485
2486 datap = skb->data;
2487 len = skb->len;
2488
2489 switch (id) {
2490 case WMIX_HB_CHALLENGE_RESP_EVENTID:
2491 break;
2492 case WMIX_DBGLOG_EVENTID:
2493 break;
2494 default:
2495 ath6kl_err("unknown cmd id 0x%x\n", id);
2496 wmi->stat.cmd_id_err++;
2497 ret = -EINVAL;
2498 break;
2499 }
2500
2501 return ret;
2502}
2503
2504/* Control Path */
2505int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2506{
2507 struct wmi_cmd_hdr *cmd;
2508 u32 len;
2509 u16 id;
2510 u8 *datap;
2511 int ret = 0;
2512
2513 if (WARN_ON(skb == NULL))
2514 return -EINVAL;
2515
2516 if (skb->len < sizeof(struct wmi_cmd_hdr)) {
2517 ath6kl_err("bad packet 1\n");
2518 dev_kfree_skb(skb);
2519 wmi->stat.cmd_len_err++;
2520 return -EINVAL;
2521 }
2522
2523 cmd = (struct wmi_cmd_hdr *) skb->data;
2524 id = le16_to_cpu(cmd->cmd_id);
2525
2526 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
2527
2528 datap = skb->data;
2529 len = skb->len;
2530
2531 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: wmi id: %d\n", __func__, id);
2532 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "msg payload ", datap, len);
2533
2534 switch (id) {
2535 case WMI_GET_BITRATE_CMDID:
2536 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
2537 ret = ath6kl_wmi_bitrate_reply_rx(wmi, datap, len);
2538 break;
2539 case WMI_GET_CHANNEL_LIST_CMDID:
2540 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_CHANNEL_LIST_CMDID\n");
2541 ret = ath6kl_wmi_ch_list_reply_rx(wmi, datap, len);
2542 break;
2543 case WMI_GET_TX_PWR_CMDID:
2544 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_TX_PWR_CMDID\n");
2545 ret = ath6kl_wmi_tx_pwr_reply_rx(wmi, datap, len);
2546 break;
2547 case WMI_READY_EVENTID:
2548 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
2549 ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
2550 break;
2551 case WMI_CONNECT_EVENTID:
2552 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
2553 ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
2554 break;
2555 case WMI_DISCONNECT_EVENTID:
2556 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
2557 ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
2558 break;
2559 case WMI_PEER_NODE_EVENTID:
2560 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
2561 ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
2562 break;
2563 case WMI_TKIP_MICERR_EVENTID:
2564 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
2565 ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
2566 break;
2567 case WMI_BSSINFO_EVENTID:
2568 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
2569 ath6kl_wmi_convert_bssinfo_hdr2_to_hdr(skb, datap);
2570 ret = ath6kl_wmi_bssinfo_event_rx(wmi, skb->data, skb->len);
2571 break;
2572 case WMI_REGDOMAIN_EVENTID:
2573 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
2574 break;
2575 case WMI_PSTREAM_TIMEOUT_EVENTID:
2576 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
2577 ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
2578 break;
2579 case WMI_NEIGHBOR_REPORT_EVENTID:
2580 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
2581 break;
2582 case WMI_SCAN_COMPLETE_EVENTID:
2583 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
2584 ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
2585 break;
2586 case WMI_CMDERROR_EVENTID:
2587 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
2588 ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
2589 break;
2590 case WMI_REPORT_STATISTICS_EVENTID:
2591 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
2592 ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
2593 break;
2594 case WMI_RSSI_THRESHOLD_EVENTID:
2595 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
2596 ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
2597 break;
2598 case WMI_ERROR_REPORT_EVENTID:
2599 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ERROR_REPORT_EVENTID\n");
2600 break;
2601 case WMI_OPT_RX_FRAME_EVENTID:
2602 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_OPT_RX_FRAME_EVENTID\n");
2603 ret = ath6kl_wmi_opt_frame_event_rx(wmi, datap, len);
2604 break;
2605 case WMI_REPORT_ROAM_TBL_EVENTID:
2606 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
2607 break;
2608 case WMI_EXTENSION_EVENTID:
2609 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
2610 ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
2611 break;
2612 case WMI_CAC_EVENTID:
2613 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
2614 ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
2615 break;
2616 case WMI_CHANNEL_CHANGE_EVENTID:
2617 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
2618 break;
2619 case WMI_REPORT_ROAM_DATA_EVENTID:
2620 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_DATA_EVENTID\n");
2621 break;
2622 case WMI_GET_FIXRATES_CMDID:
2623 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
2624 ret = ath6kl_wmi_ratemask_reply_rx(wmi, datap, len);
2625 break;
2626 case WMI_TX_RETRY_ERR_EVENTID:
2627 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_RETRY_ERR_EVENTID\n");
2628 break;
2629 case WMI_SNR_THRESHOLD_EVENTID:
2630 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SNR_THRESHOLD_EVENTID\n");
2631 ret = ath6kl_wmi_snr_threshold_event_rx(wmi, datap, len);
2632 break;
2633 case WMI_LQ_THRESHOLD_EVENTID:
2634 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_LQ_THRESHOLD_EVENTID\n");
2635 break;
2636 case WMI_APLIST_EVENTID:
2637 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_APLIST_EVENTID\n");
2638 ret = ath6kl_wmi_aplist_event_rx(wmi, datap, len);
2639 break;
2640 case WMI_GET_KEEPALIVE_CMDID:
2641 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_KEEPALIVE_CMDID\n");
2642 ret = ath6kl_wmi_keepalive_reply_rx(wmi, datap, len);
2643 break;
2644 case WMI_GET_WOW_LIST_EVENTID:
2645 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
2646 ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
2647 break;
2648 case WMI_GET_PMKID_LIST_EVENTID:
2649 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
2650 ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
2651 break;
2652 case WMI_PSPOLL_EVENTID:
2653 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
2654 ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
2655 break;
2656 case WMI_DTIMEXPIRY_EVENTID:
2657 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
2658 ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
2659 break;
2660 case WMI_SET_PARAMS_REPLY_EVENTID:
2661 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
2662 break;
2663 case WMI_ADDBA_REQ_EVENTID:
2664 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
2665 ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
2666 break;
2667 case WMI_ADDBA_RESP_EVENTID:
2668 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
2669 break;
2670 case WMI_DELBA_REQ_EVENTID:
2671 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
2672 ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
2673 break;
2674 case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
2675 ath6kl_dbg(ATH6KL_DBG_WMI,
2676 "WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
2677 break;
2678 case WMI_REPORT_BTCOEX_STATS_EVENTID:
2679 ath6kl_dbg(ATH6KL_DBG_WMI,
2680 "WMI_REPORT_BTCOEX_STATS_EVENTID\n");
2681 break;
2682 case WMI_TX_COMPLETE_EVENTID:
2683 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
2684 ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
2685 break;
2686 default:
2687 ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
2688 wmi->stat.cmd_id_err++;
2689 ret = -EINVAL;
2690 break;
2691 }
2692
2693 dev_kfree_skb(skb);
2694
2695 return ret;
2696}
2697
2698static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
2699{
2700 if (!wmi)
2701 return;
2702
2703 spin_lock_bh(&wmi->lock);
2704
2705 wmi->fat_pipe_exist = 0;
2706 memset(wmi->stream_exist_for_ac, 0, sizeof(wmi->stream_exist_for_ac));
2707
2708 spin_unlock_bh(&wmi->lock);
2709}
2710
2711void *ath6kl_wmi_init(struct ath6kl *dev)
2712{
2713 struct wmi *wmi;
2714
2715 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
2716 if (!wmi)
2717 return NULL;
2718
2719 spin_lock_init(&wmi->lock);
2720
2721 wmi->parent_dev = dev;
2722
2723 ath6kl_wmi_qos_state_init(wmi);
2724
2725 wmi->pwr_mode = REC_POWER;
2726 wmi->phy_mode = WMI_11G_MODE;
2727
2728 wmi->pair_crypto_type = NONE_CRYPT;
2729 wmi->grp_crypto_type = NONE_CRYPT;
2730
2731 wmi->ht_allowed[A_BAND_24GHZ] = 1;
2732 wmi->ht_allowed[A_BAND_5GHZ] = 1;
2733
2734 return wmi;
2735}
2736
2737void ath6kl_wmi_shutdown(struct wmi *wmi)
2738{
2739 if (!wmi)
2740 return;
2741
2742 kfree(wmi);
2743}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
new file mode 100644
index 000000000000..fe3ddce64087
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -0,0 +1,2018 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * This file contains the definitions of the WMI protocol specified in the
19 * Wireless Module Interface (WMI). It includes definitions of all the
20 * commands and events. Commands are messages from the host to the WM.
21 * Events and Replies are messages from the WM to the host.
22 */
23
24#ifndef WMI_H
25#define WMI_H
26
27#include <linux/ieee80211.h>
28
29#include "htc.h"
30
31#define HTC_PROTOCOL_VERSION 0x0002
32#define WMI_PROTOCOL_VERSION 0x0002
33#define WMI_CONTROL_MSG_MAX_LEN 256
34#define is_ethertype(type_or_len) ((type_or_len) >= 0x0600)
35
36#define IP_ETHERTYPE 0x0800
37
38#define WMI_IMPLICIT_PSTREAM 0xFF
39#define WMI_MAX_THINSTREAM 15
40
41#define SSID_IE_LEN_INDEX 13
42
43/* Host side link management data structures */
44#define SIG_QUALITY_THRESH_LVLS 6
45#define SIG_QUALITY_UPPER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
46#define SIG_QUALITY_LOWER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
47
48#define A_BAND_24GHZ 0
49#define A_BAND_5GHZ 1
50#define A_NUM_BANDS 2
51
52/* in ms */
53#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
54
55/*
56 * There are no signed versions of __le16 and __le32, so for a temporary
57 * solution come up with our own version. The idea is from fs/ntfs/types.h.
58 *
59 * Use a_ prefix so that it doesn't conflict if we get proper support to
60 * linux/types.h.
61 */
62typedef __s16 __bitwise a_sle16;
63typedef __s32 __bitwise a_sle32;
64
65static inline a_sle32 a_cpu_to_sle32(s32 val)
66{
67 return (__force a_sle32) cpu_to_le32(val);
68}
69
70static inline s32 a_sle32_to_cpu(a_sle32 val)
71{
72 return le32_to_cpu((__force __le32) val);
73}
74
75static inline a_sle16 a_cpu_to_sle16(s16 val)
76{
77 return (__force a_sle16) cpu_to_le16(val);
78}
79
80static inline s16 a_sle16_to_cpu(a_sle16 val)
81{
82 return le16_to_cpu((__force __le16) val);
83}
84
85struct sq_threshold_params {
86 s16 upper_threshold[SIG_QUALITY_UPPER_THRESH_LVLS];
87 s16 lower_threshold[SIG_QUALITY_LOWER_THRESH_LVLS];
88 u32 upper_threshold_valid_count;
89 u32 lower_threshold_valid_count;
90 u32 polling_interval;
91 u8 weight;
92 u8 last_rssi;
93 u8 last_rssi_poll_event;
94};
95
96struct wmi_stats {
97 u32 cmd_len_err;
98 u32 cmd_id_err;
99};
100
101struct wmi_data_sync_bufs {
102 u8 traffic_class;
103 struct sk_buff *skb;
104};
105
106/* WMM stream classes */
107#define WMM_NUM_AC 4
108#define WMM_AC_BE 0 /* best effort */
109#define WMM_AC_BK 1 /* background */
110#define WMM_AC_VI 2 /* video */
111#define WMM_AC_VO 3 /* voice */
112
113struct wmi {
114 bool ready;
115 u16 stream_exist_for_ac[WMM_NUM_AC];
116 u8 fat_pipe_exist;
117 struct ath6kl *parent_dev;
118 struct wmi_stats stat;
119 u8 pwr_mode;
120 u8 phy_mode;
121 u8 keep_alive_intvl;
122 spinlock_t lock;
123 enum htc_endpoint_id ep_id;
124 struct sq_threshold_params
125 sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
126 enum crypto_type pair_crypto_type;
127 enum crypto_type grp_crypto_type;
128 bool is_wmm_enabled;
129 u8 ht_allowed[A_NUM_BANDS];
130 u8 traffic_class;
131 bool is_probe_ssid;
132};
133
134struct host_app_area {
135 u32 wmi_protocol_ver;
136};
137
138enum wmi_msg_type {
139 DATA_MSGTYPE = 0x0,
140 CNTL_MSGTYPE,
141 SYNC_MSGTYPE,
142 OPT_MSGTYPE,
143};
144
145/*
146 * Macros for operating on WMI_DATA_HDR (info) field
147 */
148
149#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
150#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
151#define WMI_DATA_HDR_UP_MASK 0x07
152#define WMI_DATA_HDR_UP_SHIFT 2
153
154/* In AP mode, the same bit (b5) is used to indicate Power save state in
155 * the Rx dir and More data bit state in the tx direction.
156 */
157#define WMI_DATA_HDR_PS_MASK 0x1
158#define WMI_DATA_HDR_PS_SHIFT 5
159
160#define WMI_DATA_HDR_MORE_MASK 0x1
161#define WMI_DATA_HDR_MORE_SHIFT 5
162
163enum wmi_data_hdr_data_type {
164 WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
165 WMI_DATA_HDR_DATA_TYPE_802_11,
166
167 /* used to be used for the PAL */
168 WMI_DATA_HDR_DATA_TYPE_ACL,
169};
170
171#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
172#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
173
174/* Macros for operating on WMI_DATA_HDR (info2) field */
175#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
176#define WMI_DATA_HDR_SEQNO_SHIFT 0
177
178#define WMI_DATA_HDR_AMSDU_MASK 0x1
179#define WMI_DATA_HDR_AMSDU_SHIFT 12
180
181#define WMI_DATA_HDR_META_MASK 0x7
182#define WMI_DATA_HDR_META_SHIFT 13
183
184struct wmi_data_hdr {
185 s8 rssi;
186
187 /*
188 * usage of 'info' field(8-bit):
189 *
190 * b1:b0 - WMI_MSG_TYPE
191 * b4:b3:b2 - UP(tid)
192 * b5 - Used in AP mode.
193 * More-data in tx dir, PS in rx.
194 * b7:b6 - Dot3 header(0),
195 * Dot11 Header(1),
196 * ACL data(2)
197 */
198 u8 info;
199
200 /*
201 * usage of 'info2' field(16-bit):
202 *
203 * b11:b0 - seq_no
204 * b12 - A-MSDU?
205 * b15:b13 - META_DATA_VERSION 0 - 7
206 */
207 __le16 info2;
208 __le16 info3;
209} __packed;
210
211static inline u8 wmi_data_hdr_get_up(struct wmi_data_hdr *dhdr)
212{
213 return (dhdr->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK;
214}
215
216static inline void wmi_data_hdr_set_up(struct wmi_data_hdr *dhdr,
217 u8 usr_pri)
218{
219 dhdr->info &= ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT);
220 dhdr->info |= usr_pri << WMI_DATA_HDR_UP_SHIFT;
221}
222
223static inline u8 wmi_data_hdr_get_dot11(struct wmi_data_hdr *dhdr)
224{
225 u8 data_type;
226
227 data_type = (dhdr->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) &
228 WMI_DATA_HDR_DATA_TYPE_MASK;
229 return (data_type == WMI_DATA_HDR_DATA_TYPE_802_11);
230}
231
232static inline u16 wmi_data_hdr_get_seqno(struct wmi_data_hdr *dhdr)
233{
234 return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_SEQNO_SHIFT) &
235 WMI_DATA_HDR_SEQNO_MASK;
236}
237
238static inline u8 wmi_data_hdr_is_amsdu(struct wmi_data_hdr *dhdr)
239{
240 return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_AMSDU_SHIFT) &
241 WMI_DATA_HDR_AMSDU_MASK;
242}
243
244static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
245{
246 return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
247 WMI_DATA_HDR_META_MASK;
248}
249
250/* Tx meta version definitions */
251#define WMI_MAX_TX_META_SZ 12
252#define WMI_META_VERSION_1 0x01
253#define WMI_META_VERSION_2 0x02
254
255struct wmi_tx_meta_v1 {
256 /* packet ID to identify the tx request */
257 u8 pkt_id;
258
259 /* rate policy to be used for the tx of this frame */
260 u8 rate_plcy_id;
261} __packed;
262
263struct wmi_tx_meta_v2 {
264 /*
265 * Offset from start of the WMI header for csum calculation to
266 * begin.
267 */
268 u8 csum_start;
269
270 /* offset from start of WMI header where final csum goes */
271 u8 csum_dest;
272
273 /* no of bytes over which csum is calculated */
274 u8 csum_flags;
275} __packed;
276
277struct wmi_rx_meta_v1 {
278 u8 status;
279
280 /* rate index mapped to rate at which this packet was received. */
281 u8 rix;
282
283 /* rssi of packet */
284 u8 rssi;
285
286 /* rf channel during packet reception */
287 u8 channel;
288
289 __le16 flags;
290} __packed;
291
292struct wmi_rx_meta_v2 {
293 __le16 csum;
294
295 /* bit 0 set -partial csum valid bit 1 set -test mode */
296 u8 csum_flags;
297} __packed;
298
299/* Control Path */
300struct wmi_cmd_hdr {
301 __le16 cmd_id;
302
303 /* info1 - 16 bits
304 * b03:b00 - id
305 * b15:b04 - unused */
306 __le16 info1;
307
308 /* for alignment */
309 __le16 reserved;
310} __packed;
311
312/* List of WMI commands */
313enum wmi_cmd_id {
314 WMI_CONNECT_CMDID = 0x0001,
315 WMI_RECONNECT_CMDID,
316 WMI_DISCONNECT_CMDID,
317 WMI_SYNCHRONIZE_CMDID,
318 WMI_CREATE_PSTREAM_CMDID,
319 WMI_DELETE_PSTREAM_CMDID,
320 WMI_START_SCAN_CMDID,
321 WMI_SET_SCAN_PARAMS_CMDID,
322 WMI_SET_BSS_FILTER_CMDID,
323 WMI_SET_PROBED_SSID_CMDID, /* 10 */
324 WMI_SET_LISTEN_INT_CMDID,
325 WMI_SET_BMISS_TIME_CMDID,
326 WMI_SET_DISC_TIMEOUT_CMDID,
327 WMI_GET_CHANNEL_LIST_CMDID,
328 WMI_SET_BEACON_INT_CMDID,
329 WMI_GET_STATISTICS_CMDID,
330 WMI_SET_CHANNEL_PARAMS_CMDID,
331 WMI_SET_POWER_MODE_CMDID,
332 WMI_SET_IBSS_PM_CAPS_CMDID,
333 WMI_SET_POWER_PARAMS_CMDID, /* 20 */
334 WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
335 WMI_ADD_CIPHER_KEY_CMDID,
336 WMI_DELETE_CIPHER_KEY_CMDID,
337 WMI_ADD_KRK_CMDID,
338 WMI_DELETE_KRK_CMDID,
339 WMI_SET_PMKID_CMDID,
340 WMI_SET_TX_PWR_CMDID,
341 WMI_GET_TX_PWR_CMDID,
342 WMI_SET_ASSOC_INFO_CMDID,
343 WMI_ADD_BAD_AP_CMDID, /* 30 */
344 WMI_DELETE_BAD_AP_CMDID,
345 WMI_SET_TKIP_COUNTERMEASURES_CMDID,
346 WMI_RSSI_THRESHOLD_PARAMS_CMDID,
347 WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
348 WMI_SET_ACCESS_PARAMS_CMDID,
349 WMI_SET_RETRY_LIMITS_CMDID,
350 WMI_SET_OPT_MODE_CMDID,
351 WMI_OPT_TX_FRAME_CMDID,
352 WMI_SET_VOICE_PKT_SIZE_CMDID,
353 WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
354 WMI_SET_ROAM_CTRL_CMDID,
355 WMI_GET_ROAM_TBL_CMDID,
356 WMI_GET_ROAM_DATA_CMDID,
357 WMI_ENABLE_RM_CMDID,
358 WMI_SET_MAX_OFFHOME_DURATION_CMDID,
359 WMI_EXTENSION_CMDID, /* Non-wireless extensions */
360 WMI_SNR_THRESHOLD_PARAMS_CMDID,
361 WMI_LQ_THRESHOLD_PARAMS_CMDID,
362 WMI_SET_LPREAMBLE_CMDID,
363 WMI_SET_RTS_CMDID, /* 50 */
364 WMI_CLR_RSSI_SNR_CMDID,
365 WMI_SET_FIXRATES_CMDID,
366 WMI_GET_FIXRATES_CMDID,
367 WMI_SET_AUTH_MODE_CMDID,
368 WMI_SET_REASSOC_MODE_CMDID,
369 WMI_SET_WMM_CMDID,
370 WMI_SET_WMM_TXOP_CMDID,
371 WMI_TEST_CMDID,
372
373 /* COEX AR6002 only */
374 WMI_SET_BT_STATUS_CMDID,
375 WMI_SET_BT_PARAMS_CMDID, /* 60 */
376
377 WMI_SET_KEEPALIVE_CMDID,
378 WMI_GET_KEEPALIVE_CMDID,
379 WMI_SET_APPIE_CMDID,
380 WMI_GET_APPIE_CMDID,
381 WMI_SET_WSC_STATUS_CMDID,
382
383 /* Wake on Wireless */
384 WMI_SET_HOST_SLEEP_MODE_CMDID,
385 WMI_SET_WOW_MODE_CMDID,
386 WMI_GET_WOW_LIST_CMDID,
387 WMI_ADD_WOW_PATTERN_CMDID,
388 WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
389
390 WMI_SET_FRAMERATES_CMDID,
391 WMI_SET_AP_PS_CMDID,
392 WMI_SET_QOS_SUPP_CMDID,
393
394 /* WMI_THIN_RESERVED_... mark the start and end
395 * values for WMI_THIN_RESERVED command IDs. These
396 * command IDs can be found in wmi_thin.h */
397 WMI_THIN_RESERVED_START = 0x8000,
398 WMI_THIN_RESERVED_END = 0x8fff,
399
400 /* Developer commands starts at 0xF000 */
401 WMI_SET_BITRATE_CMDID = 0xF000,
402 WMI_GET_BITRATE_CMDID,
403 WMI_SET_WHALPARAM_CMDID,
404 WMI_SET_MAC_ADDRESS_CMDID,
405 WMI_SET_AKMP_PARAMS_CMDID,
406 WMI_SET_PMKID_LIST_CMDID,
407 WMI_GET_PMKID_LIST_CMDID,
408 WMI_ABORT_SCAN_CMDID,
409 WMI_SET_TARGET_EVENT_REPORT_CMDID,
410
411 /* Unused */
412 WMI_UNUSED1,
413 WMI_UNUSED2,
414
415 /* AP mode commands */
416 WMI_AP_HIDDEN_SSID_CMDID,
417 WMI_AP_SET_NUM_STA_CMDID,
418 WMI_AP_ACL_POLICY_CMDID,
419 WMI_AP_ACL_MAC_LIST_CMDID,
420 WMI_AP_CONFIG_COMMIT_CMDID,
421 WMI_AP_SET_MLME_CMDID,
422 WMI_AP_SET_PVB_CMDID,
423 WMI_AP_CONN_INACT_CMDID,
424 WMI_AP_PROT_SCAN_TIME_CMDID,
425 WMI_AP_SET_COUNTRY_CMDID,
426 WMI_AP_SET_DTIM_CMDID,
427 WMI_AP_MODE_STAT_CMDID,
428
429 WMI_SET_IP_CMDID,
430 WMI_SET_PARAMS_CMDID,
431 WMI_SET_MCAST_FILTER_CMDID,
432 WMI_DEL_MCAST_FILTER_CMDID,
433
434 WMI_ALLOW_AGGR_CMDID,
435 WMI_ADDBA_REQ_CMDID,
436 WMI_DELBA_REQ_CMDID,
437 WMI_SET_HT_CAP_CMDID,
438 WMI_SET_HT_OP_CMDID,
439 WMI_SET_TX_SELECT_RATES_CMDID,
440 WMI_SET_TX_SGI_PARAM_CMDID,
441 WMI_SET_RATE_POLICY_CMDID,
442
443 WMI_HCI_CMD_CMDID,
444 WMI_RX_FRAME_FORMAT_CMDID,
445 WMI_SET_THIN_MODE_CMDID,
446 WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
447
448 WMI_AP_SET_11BG_RATESET_CMDID,
449 WMI_SET_PMK_CMDID,
450 WMI_MCAST_FILTER_CMDID,
451
452 /* COEX CMDID AR6003 */
453 WMI_SET_BTCOEX_FE_ANT_CMDID,
454 WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
455 WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
456 WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
457 WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
458 WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
459 WMI_SET_BTCOEX_DEBUG_CMDID,
460 WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
461 WMI_GET_BTCOEX_STATS_CMDID,
462 WMI_GET_BTCOEX_CONFIG_CMDID,
463
464 WMI_SET_DFS_ENABLE_CMDID, /* F034 */
465 WMI_SET_DFS_MINRSSITHRESH_CMDID,
466 WMI_SET_DFS_MAXPULSEDUR_CMDID,
467 WMI_DFS_RADAR_DETECTED_CMDID,
468
469 /* P2P commands */
470 WMI_P2P_SET_CONFIG_CMDID, /* F038 */
471 WMI_WPS_SET_CONFIG_CMDID,
472 WMI_SET_REQ_DEV_ATTR_CMDID,
473 WMI_P2P_FIND_CMDID,
474 WMI_P2P_STOP_FIND_CMDID,
475 WMI_P2P_GO_NEG_START_CMDID,
476 WMI_P2P_LISTEN_CMDID,
477
478 WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */
479 WMI_SET_PROMISCUOUS_MODE_CMDID,
480 WMI_RX_FRAME_FILTER_CMDID,
481 WMI_SET_CHANNEL_CMDID,
482
483 /* WAC commands */
484 WMI_ENABLE_WAC_CMDID,
485 WMI_WAC_SCAN_REPLY_CMDID,
486 WMI_WAC_CTRL_REQ_CMDID,
487 WMI_SET_DIV_PARAMS_CMDID,
488
489 WMI_GET_PMK_CMDID,
490 WMI_SET_PASSPHRASE_CMDID,
491 WMI_SEND_ASSOC_RES_CMDID,
492 WMI_SET_ASSOC_REQ_RELAY_CMDID,
493 WMI_GET_RFKILL_MODE_CMDID,
494
495 /* ACS command, consists of sub-commands */
496 WMI_ACS_CTRL_CMDID,
497
498 /* Ultra low power store / recall commands */
499 WMI_STORERECALL_CONFIGURE_CMDID,
500 WMI_STORERECALL_RECALL_CMDID,
501 WMI_STORERECALL_HOST_READY_CMDID,
502 WMI_FORCE_TARGET_ASSERT_CMDID,
503 WMI_SET_EXCESS_TX_RETRY_THRES_CMDID,
504};
505
506/* WMI_CONNECT_CMDID */
507enum network_type {
508 INFRA_NETWORK = 0x01,
509 ADHOC_NETWORK = 0x02,
510 ADHOC_CREATOR = 0x04,
511 AP_NETWORK = 0x10,
512};
513
514enum dot11_auth_mode {
515 OPEN_AUTH = 0x01,
516 SHARED_AUTH = 0x02,
517
518 /* different from IEEE_AUTH_MODE definitions */
519 LEAP_AUTH = 0x04,
520};
521
522enum {
523 AUTH_IDLE,
524 AUTH_OPEN_IN_PROGRESS,
525};
526
527enum auth_mode {
528 NONE_AUTH = 0x01,
529 WPA_AUTH = 0x02,
530 WPA2_AUTH = 0x04,
531 WPA_PSK_AUTH = 0x08,
532 WPA2_PSK_AUTH = 0x10,
533 WPA_AUTH_CCKM = 0x20,
534 WPA2_AUTH_CCKM = 0x40,
535};
536
537#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
538#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
539
540#define WMI_MIN_KEY_INDEX 0
541#define WMI_MAX_KEY_INDEX 3
542
543#define WMI_MAX_KEY_LEN 32
544
545/*
546 * NB: these values are ordered carefully; there are lots of
547 * of implications in any reordering. In particular beware
548 * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
549 */
550#define ATH6KL_CIPHER_WEP 0
551#define ATH6KL_CIPHER_TKIP 1
552#define ATH6KL_CIPHER_AES_OCB 2
553#define ATH6KL_CIPHER_AES_CCM 3
554#define ATH6KL_CIPHER_CKIP 5
555#define ATH6KL_CIPHER_CCKM_KRK 6
556#define ATH6KL_CIPHER_NONE 7 /* pseudo value */
557
558/*
559 * 802.11 rate set.
560 */
561#define ATH6KL_RATE_MAXSIZE 15 /* max rates we'll handle */
562
563#define ATH_OUI_TYPE 0x01
564#define WPA_OUI_TYPE 0x01
565#define WMM_PARAM_OUI_SUBTYPE 0x01
566#define WMM_OUI_TYPE 0x02
567#define WSC_OUT_TYPE 0x04
568
569enum wmi_connect_ctrl_flags_bits {
570 CONNECT_ASSOC_POLICY_USER = 0x0001,
571 CONNECT_SEND_REASSOC = 0x0002,
572 CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
573 CONNECT_PROFILE_MATCH_DONE = 0x0008,
574 CONNECT_IGNORE_AAC_BEACON = 0x0010,
575 CONNECT_CSA_FOLLOW_BSS = 0x0020,
576 CONNECT_DO_WPA_OFFLOAD = 0x0040,
577 CONNECT_DO_NOT_DEAUTH = 0x0080,
578};
579
580struct wmi_connect_cmd {
581 u8 nw_type;
582 u8 dot11_auth_mode;
583 u8 auth_mode;
584 u8 prwise_crypto_type;
585 u8 prwise_crypto_len;
586 u8 grp_crypto_type;
587 u8 grp_crypto_len;
588 u8 ssid_len;
589 u8 ssid[IEEE80211_MAX_SSID_LEN];
590 __le16 ch;
591 u8 bssid[ETH_ALEN];
592 __le32 ctrl_flags;
593} __packed;
594
595/* WMI_RECONNECT_CMDID */
596struct wmi_reconnect_cmd {
597 /* channel hint */
598 __le16 channel;
599
600 /* mandatory if set */
601 u8 bssid[ETH_ALEN];
602} __packed;
603
604/* WMI_ADD_CIPHER_KEY_CMDID */
605enum key_usage {
606 PAIRWISE_USAGE = 0x00,
607 GROUP_USAGE = 0x01,
608
609 /* default Tx Key - static WEP only */
610 TX_USAGE = 0x02,
611};
612
613/*
614 * Bit Flag
615 * Bit 0 - Initialise TSC - default is Initialize
616 */
617#define KEY_OP_INIT_TSC 0x01
618#define KEY_OP_INIT_RSC 0x02
619
620/* default initialise the TSC & RSC */
621#define KEY_OP_INIT_VAL 0x03
622#define KEY_OP_VALID_MASK 0x03
623
624struct wmi_add_cipher_key_cmd {
625 u8 key_index;
626 u8 key_type;
627
628 /* enum key_usage */
629 u8 key_usage;
630
631 u8 key_len;
632
633 /* key replay sequence counter */
634 u8 key_rsc[8];
635
636 u8 key[WLAN_MAX_KEY_LEN];
637
638 /* additional key control info */
639 u8 key_op_ctrl;
640
641 u8 key_mac_addr[ETH_ALEN];
642} __packed;
643
644/* WMI_DELETE_CIPHER_KEY_CMDID */
645struct wmi_delete_cipher_key_cmd {
646 u8 key_index;
647} __packed;
648
649#define WMI_KRK_LEN 16
650
651/* WMI_ADD_KRK_CMDID */
652struct wmi_add_krk_cmd {
653 u8 krk[WMI_KRK_LEN];
654} __packed;
655
656/* WMI_SETPMKID_CMDID */
657
658#define WMI_PMKID_LEN 16
659
660enum pmkid_enable_flg {
661 PMKID_DISABLE = 0,
662 PMKID_ENABLE = 1,
663};
664
665struct wmi_setpmkid_cmd {
666 u8 bssid[ETH_ALEN];
667
668 /* enum pmkid_enable_flg */
669 u8 enable;
670
671 u8 pmkid[WMI_PMKID_LEN];
672} __packed;
673
674/* WMI_START_SCAN_CMD */
675enum wmi_scan_type {
676 WMI_LONG_SCAN = 0,
677 WMI_SHORT_SCAN = 1,
678};
679
680struct wmi_start_scan_cmd {
681 __le32 force_fg_scan;
682
683 /* for legacy cisco AP compatibility */
684 __le32 is_legacy;
685
686 /* max duration in the home channel(msec) */
687 __le32 home_dwell_time;
688
689 /* time interval between scans (msec) */
690 __le32 force_scan_intvl;
691
692 /* enum wmi_scan_type */
693 u8 scan_type;
694
695 /* how many channels follow */
696 u8 num_ch;
697
698 /* channels in Mhz */
699 __le16 ch_list[1];
700} __packed;
701
702/* WMI_SET_SCAN_PARAMS_CMDID */
703#define WMI_SHORTSCANRATIO_DEFAULT 3
704
705/*
706 * Warning: scan control flag value of 0xFF is used to disable
707 * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more
708 * flags here
709 */
710enum wmi_scan_ctrl_flags_bits {
711
712 /* set if can scan in the connect cmd */
713 CONNECT_SCAN_CTRL_FLAGS = 0x01,
714
715 /* set if scan for the SSID it is already connected to */
716 SCAN_CONNECTED_CTRL_FLAGS = 0x02,
717
718 /* set if enable active scan */
719 ACTIVE_SCAN_CTRL_FLAGS = 0x04,
720
721 /* set if enable roam scan when bmiss and lowrssi */
722 ROAM_SCAN_CTRL_FLAGS = 0x08,
723
724 /* set if follows customer BSSINFO reporting rule */
725 REPORT_BSSINFO_CTRL_FLAGS = 0x10,
726
727 /* if disabled, target doesn't scan after a disconnect event */
728 ENABLE_AUTO_CTRL_FLAGS = 0x20,
729
730 /*
731 * Scan complete event with canceled status will be generated when
732 * a scan is prempted before it gets completed.
733 */
734 ENABLE_SCAN_ABORT_EVENT = 0x40
735};
736
737#define DEFAULT_SCAN_CTRL_FLAGS \
738 (CONNECT_SCAN_CTRL_FLAGS | \
739 SCAN_CONNECTED_CTRL_FLAGS | \
740 ACTIVE_SCAN_CTRL_FLAGS | \
741 ROAM_SCAN_CTRL_FLAGS | \
742 ENABLE_AUTO_CTRL_FLAGS)
743
744struct wmi_scan_params_cmd {
745 /* sec */
746 __le16 fg_start_period;
747
748 /* sec */
749 __le16 fg_end_period;
750
751 /* sec */
752 __le16 bg_period;
753
754 /* msec */
755 __le16 maxact_chdwell_time;
756
757 /* msec */
758 __le16 pas_chdwell_time;
759
760 /* how many shorts scan for one long */
761 u8 short_scan_ratio;
762
763 u8 scan_ctrl_flags;
764
765 /* msec */
766 __le16 minact_chdwell_time;
767
768 /* max active scans per ssid */
769 __le16 maxact_scan_per_ssid;
770
771 /* msecs */
772 __le32 max_dfsch_act_time;
773} __packed;
774
775/* WMI_SET_BSS_FILTER_CMDID */
776enum wmi_bss_filter {
777 /* no beacons forwarded */
778 NONE_BSS_FILTER = 0x0,
779
780 /* all beacons forwarded */
781 ALL_BSS_FILTER,
782
783 /* only beacons matching profile */
784 PROFILE_FILTER,
785
786 /* all but beacons matching profile */
787 ALL_BUT_PROFILE_FILTER,
788
789 /* only beacons matching current BSS */
790 CURRENT_BSS_FILTER,
791
792 /* all but beacons matching BSS */
793 ALL_BUT_BSS_FILTER,
794
795 /* beacons matching probed ssid */
796 PROBED_SSID_FILTER,
797
798 /* marker only */
799 LAST_BSS_FILTER,
800};
801
802struct wmi_bss_filter_cmd {
803 /* see, enum wmi_bss_filter */
804 u8 bss_filter;
805
806 /* for alignment */
807 u8 reserved1;
808
809 /* for alignment */
810 __le16 reserved2;
811
812 __le32 ie_mask;
813} __packed;
814
815/* WMI_SET_PROBED_SSID_CMDID */
816#define MAX_PROBED_SSID_INDEX 9
817
818enum wmi_ssid_flag {
819 /* disables entry */
820 DISABLE_SSID_FLAG = 0,
821
822 /* probes specified ssid */
823 SPECIFIC_SSID_FLAG = 0x01,
824
825 /* probes for any ssid */
826 ANY_SSID_FLAG = 0x02,
827};
828
829struct wmi_probed_ssid_cmd {
830 /* 0 to MAX_PROBED_SSID_INDEX */
831 u8 entry_index;
832
833 /* see, enum wmi_ssid_flg */
834 u8 flag;
835
836 u8 ssid_len;
837 u8 ssid[IEEE80211_MAX_SSID_LEN];
838} __packed;
839
840/*
841 * WMI_SET_LISTEN_INT_CMDID
842 * The Listen interval is between 15 and 3000 TUs
843 */
844struct wmi_listen_int_cmd {
845 __le16 listen_intvl;
846 __le16 num_beacons;
847} __packed;
848
849/* WMI_SET_POWER_MODE_CMDID */
850enum wmi_power_mode {
851 REC_POWER = 0x01,
852 MAX_PERF_POWER,
853};
854
855struct wmi_power_mode_cmd {
856 /* see, enum wmi_power_mode */
857 u8 pwr_mode;
858} __packed;
859
860/*
861 * Policy to determnine whether power save failure event should be sent to
862 * host during scanning
863 */
864enum power_save_fail_event_policy {
865 SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
866 IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
867};
868
869struct wmi_power_params_cmd {
870 /* msec */
871 __le16 idle_period;
872
873 __le16 pspoll_number;
874 __le16 dtim_policy;
875 __le16 tx_wakeup_policy;
876 __le16 num_tx_to_wakeup;
877 __le16 ps_fail_event_policy;
878} __packed;
879
880/* WMI_SET_DISC_TIMEOUT_CMDID */
881struct wmi_disc_timeout_cmd {
882 /* seconds */
883 u8 discon_timeout;
884} __packed;
885
886enum dir_type {
887 UPLINK_TRAFFIC = 0,
888 DNLINK_TRAFFIC = 1,
889 BIDIR_TRAFFIC = 2,
890};
891
892enum voiceps_cap_type {
893 DISABLE_FOR_THIS_AC = 0,
894 ENABLE_FOR_THIS_AC = 1,
895 ENABLE_FOR_ALL_AC = 2,
896};
897
898enum traffic_type {
899 TRAFFIC_TYPE_APERIODIC = 0,
900 TRAFFIC_TYPE_PERIODIC = 1,
901};
902
903/* WMI_SYNCHRONIZE_CMDID */
904struct wmi_sync_cmd {
905 u8 data_sync_map;
906} __packed;
907
908/* WMI_CREATE_PSTREAM_CMDID */
909struct wmi_create_pstream_cmd {
910 /* msec */
911 __le32 min_service_int;
912
913 /* msec */
914 __le32 max_service_int;
915
916 /* msec */
917 __le32 inactivity_int;
918
919 /* msec */
920 __le32 suspension_int;
921
922 __le32 service_start_time;
923
924 /* in bps */
925 __le32 min_data_rate;
926
927 /* in bps */
928 __le32 mean_data_rate;
929
930 /* in bps */
931 __le32 peak_data_rate;
932
933 __le32 max_burst_size;
934 __le32 delay_bound;
935
936 /* in bps */
937 __le32 min_phy_rate;
938
939 __le32 sba;
940 __le32 medium_time;
941
942 /* in octects */
943 __le16 nominal_msdu;
944
945 /* in octects */
946 __le16 max_msdu;
947
948 u8 traffic_class;
949
950 /* see, enum dir_type */
951 u8 traffic_direc;
952
953 u8 rx_queue_num;
954
955 /* see, enum traffic_type */
956 u8 traffic_type;
957
958 /* see, enum voiceps_cap_type */
959 u8 voice_psc_cap;
960 u8 tsid;
961
962 /* 802.1D user priority */
963 u8 user_pri;
964
965 /* nominal phy rate */
966 u8 nominal_phy;
967} __packed;
968
969/* WMI_DELETE_PSTREAM_CMDID */
970struct wmi_delete_pstream_cmd {
971 u8 tx_queue_num;
972 u8 rx_queue_num;
973 u8 traffic_direc;
974 u8 traffic_class;
975 u8 tsid;
976} __packed;
977
978/* WMI_SET_CHANNEL_PARAMS_CMDID */
979enum wmi_phy_mode {
980 WMI_11A_MODE = 0x1,
981 WMI_11G_MODE = 0x2,
982 WMI_11AG_MODE = 0x3,
983 WMI_11B_MODE = 0x4,
984 WMI_11GONLY_MODE = 0x5,
985};
986
987#define WMI_MAX_CHANNELS 32
988
989/*
990 * WMI_RSSI_THRESHOLD_PARAMS_CMDID
991 * Setting the polltime to 0 would disable polling. Threshold values are
992 * in the ascending order, and should agree to:
993 * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
994 * < highThreshold_upperVal)
995 */
996
997struct wmi_rssi_threshold_params_cmd {
998 /* polling time as a factor of LI */
999 __le32 poll_time;
1000
1001 /* lowest of upper */
1002 a_sle16 thresh_above1_val;
1003
1004 a_sle16 thresh_above2_val;
1005 a_sle16 thresh_above3_val;
1006 a_sle16 thresh_above4_val;
1007 a_sle16 thresh_above5_val;
1008
1009 /* highest of upper */
1010 a_sle16 thresh_above6_val;
1011
1012 /* lowest of bellow */
1013 a_sle16 thresh_below1_val;
1014
1015 a_sle16 thresh_below2_val;
1016 a_sle16 thresh_below3_val;
1017 a_sle16 thresh_below4_val;
1018 a_sle16 thresh_below5_val;
1019
1020 /* highest of bellow */
1021 a_sle16 thresh_below6_val;
1022
1023 /* "alpha" */
1024 u8 weight;
1025
1026 u8 reserved[3];
1027} __packed;
1028
1029/*
1030 * WMI_SNR_THRESHOLD_PARAMS_CMDID
1031 * Setting the polltime to 0 would disable polling.
1032 */
1033
1034struct wmi_snr_threshold_params_cmd {
1035 /* polling time as a factor of LI */
1036 __le32 poll_time;
1037
1038 /* "alpha" */
1039 u8 weight;
1040
1041 /* lowest of uppper */
1042 u8 thresh_above1_val;
1043
1044 u8 thresh_above2_val;
1045 u8 thresh_above3_val;
1046
1047 /* highest of upper */
1048 u8 thresh_above4_val;
1049
1050 /* lowest of bellow */
1051 u8 thresh_below1_val;
1052
1053 u8 thresh_below2_val;
1054 u8 thresh_below3_val;
1055
1056 /* highest of bellow */
1057 u8 thresh_below4_val;
1058
1059 u8 reserved[3];
1060} __packed;
1061
1062enum wmi_preamble_policy {
1063 WMI_IGNORE_BARKER_IN_ERP = 0,
1064 WMI_DONOT_IGNORE_BARKER_IN_ERP
1065};
1066
1067struct wmi_set_lpreamble_cmd {
1068 u8 status;
1069 u8 preamble_policy;
1070} __packed;
1071
1072struct wmi_set_rts_cmd {
1073 __le16 threshold;
1074} __packed;
1075
1076/* WMI_SET_TX_PWR_CMDID */
1077struct wmi_set_tx_pwr_cmd {
1078 /* in dbM units */
1079 u8 dbM;
1080} __packed;
1081
1082struct wmi_tx_pwr_reply {
1083 /* in dbM units */
1084 u8 dbM;
1085} __packed;
1086
1087struct wmi_report_sleep_state_event {
1088 __le32 sleep_state;
1089};
1090
1091enum wmi_report_sleep_status {
1092 WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP = 0,
1093 WMI_REPORT_SLEEP_STATUS_IS_AWAKE
1094};
1095enum target_event_report_config {
1096 /* default */
1097 DISCONN_EVT_IN_RECONN = 0,
1098
1099 NO_DISCONN_EVT_IN_RECONN
1100};
1101
1102/* Command Replies */
1103
1104/* WMI_GET_CHANNEL_LIST_CMDID reply */
1105struct wmi_channel_list_reply {
1106 u8 reserved;
1107
1108 /* number of channels in reply */
1109 u8 num_ch;
1110
1111 /* channel in Mhz */
1112 __le16 ch_list[1];
1113} __packed;
1114
1115/* List of Events (target to host) */
1116enum wmi_event_id {
1117 WMI_READY_EVENTID = 0x1001,
1118 WMI_CONNECT_EVENTID,
1119 WMI_DISCONNECT_EVENTID,
1120 WMI_BSSINFO_EVENTID,
1121 WMI_CMDERROR_EVENTID,
1122 WMI_REGDOMAIN_EVENTID,
1123 WMI_PSTREAM_TIMEOUT_EVENTID,
1124 WMI_NEIGHBOR_REPORT_EVENTID,
1125 WMI_TKIP_MICERR_EVENTID,
1126 WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
1127 WMI_REPORT_STATISTICS_EVENTID,
1128 WMI_RSSI_THRESHOLD_EVENTID,
1129 WMI_ERROR_REPORT_EVENTID,
1130 WMI_OPT_RX_FRAME_EVENTID,
1131 WMI_REPORT_ROAM_TBL_EVENTID,
1132 WMI_EXTENSION_EVENTID,
1133 WMI_CAC_EVENTID,
1134 WMI_SNR_THRESHOLD_EVENTID,
1135 WMI_LQ_THRESHOLD_EVENTID,
1136 WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
1137 WMI_REPORT_ROAM_DATA_EVENTID,
1138 WMI_TEST_EVENTID,
1139 WMI_APLIST_EVENTID,
1140 WMI_GET_WOW_LIST_EVENTID,
1141 WMI_GET_PMKID_LIST_EVENTID,
1142 WMI_CHANNEL_CHANGE_EVENTID,
1143 WMI_PEER_NODE_EVENTID,
1144 WMI_PSPOLL_EVENTID,
1145 WMI_DTIMEXPIRY_EVENTID,
1146 WMI_WLAN_VERSION_EVENTID,
1147 WMI_SET_PARAMS_REPLY_EVENTID,
1148 WMI_ADDBA_REQ_EVENTID, /*0x1020 */
1149 WMI_ADDBA_RESP_EVENTID,
1150 WMI_DELBA_REQ_EVENTID,
1151 WMI_TX_COMPLETE_EVENTID,
1152 WMI_HCI_EVENT_EVENTID,
1153 WMI_ACL_DATA_EVENTID,
1154 WMI_REPORT_SLEEP_STATE_EVENTID,
1155 WMI_REPORT_BTCOEX_STATS_EVENTID,
1156 WMI_REPORT_BTCOEX_CONFIG_EVENTID,
1157 WMI_GET_PMK_EVENTID,
1158
1159 /* DFS Events */
1160 WMI_DFS_HOST_ATTACH_EVENTID,
1161 WMI_DFS_HOST_INIT_EVENTID,
1162 WMI_DFS_RESET_DELAYLINES_EVENTID,
1163 WMI_DFS_RESET_RADARQ_EVENTID,
1164 WMI_DFS_RESET_AR_EVENTID,
1165 WMI_DFS_RESET_ARQ_EVENTID,
1166 WMI_DFS_SET_DUR_MULTIPLIER_EVENTID,
1167 WMI_DFS_SET_BANGRADAR_EVENTID,
1168 WMI_DFS_SET_DEBUGLEVEL_EVENTID,
1169 WMI_DFS_PHYERR_EVENTID,
1170
1171 /* CCX Evants */
1172 WMI_CCX_RM_STATUS_EVENTID,
1173
1174 /* P2P Events */
1175 WMI_P2P_GO_NEG_RESULT_EVENTID,
1176
1177 WMI_WAC_SCAN_DONE_EVENTID,
1178 WMI_WAC_REPORT_BSS_EVENTID,
1179 WMI_WAC_START_WPS_EVENTID,
1180 WMI_WAC_CTRL_REQ_REPLY_EVENTID,
1181
1182 /* RFKILL Events */
1183 WMI_RFKILL_STATE_CHANGE_EVENTID,
1184 WMI_RFKILL_GET_MODE_CMD_EVENTID,
1185 WMI_THIN_RESERVED_START_EVENTID = 0x8000,
1186
1187 /*
1188 * Events in this range are reserved for thinmode
1189 * See wmi_thin.h for actual definitions
1190 */
1191 WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
1192
1193 WMI_SET_CHANNEL_EVENTID,
1194 WMI_ASSOC_REQ_EVENTID,
1195
1196 /* Generic ACS event */
1197 WMI_ACS_EVENTID,
1198 WMI_REPORT_WMM_PARAMS_EVENTID
1199};
1200
1201struct wmi_ready_event_2 {
1202 __le32 sw_version;
1203 __le32 abi_version;
1204 u8 mac_addr[ETH_ALEN];
1205 u8 phy_cap;
1206} __packed;
1207
1208/* Connect Event */
1209struct wmi_connect_event {
1210 __le16 ch;
1211 u8 bssid[ETH_ALEN];
1212 __le16 listen_intvl;
1213 __le16 beacon_intvl;
1214 __le32 nw_type;
1215 u8 beacon_ie_len;
1216 u8 assoc_req_len;
1217 u8 assoc_resp_len;
1218 u8 assoc_info[1];
1219} __packed;
1220
1221/* Disconnect Event */
1222enum wmi_disconnect_reason {
1223 NO_NETWORK_AVAIL = 0x01,
1224
1225 /* bmiss */
1226 LOST_LINK = 0x02,
1227
1228 DISCONNECT_CMD = 0x03,
1229 BSS_DISCONNECTED = 0x04,
1230 AUTH_FAILED = 0x05,
1231 ASSOC_FAILED = 0x06,
1232 NO_RESOURCES_AVAIL = 0x07,
1233 CSERV_DISCONNECT = 0x08,
1234 INVALID_PROFILE = 0x0a,
1235 DOT11H_CHANNEL_SWITCH = 0x0b,
1236 PROFILE_MISMATCH = 0x0c,
1237 CONNECTION_EVICTED = 0x0d,
1238 IBSS_MERGE = 0xe,
1239};
1240
1241struct wmi_disconnect_event {
1242 /* reason code, see 802.11 spec. */
1243 __le16 proto_reason_status;
1244
1245 /* set if known */
1246 u8 bssid[ETH_ALEN];
1247
1248 /* see WMI_DISCONNECT_REASON */
1249 u8 disconn_reason;
1250
1251 u8 assoc_resp_len;
1252 u8 assoc_info[1];
1253} __packed;
1254
1255/*
1256 * BSS Info Event.
1257 * Mechanism used to inform host of the presence and characteristic of
1258 * wireless networks present. Consists of bss info header followed by
1259 * the beacon or probe-response frame body. The 802.11 header is no included.
1260 */
1261enum wmi_bi_ftype {
1262 BEACON_FTYPE = 0x1,
1263 PROBERESP_FTYPE,
1264 ACTION_MGMT_FTYPE,
1265 PROBEREQ_FTYPE,
1266};
1267
1268struct wmi_bss_info_hdr {
1269 __le16 ch;
1270
1271 /* see, enum wmi_bi_ftype */
1272 u8 frame_type;
1273
1274 u8 snr;
1275 a_sle16 rssi;
1276 u8 bssid[ETH_ALEN];
1277 __le32 ie_mask;
1278} __packed;
1279
1280/*
1281 * BSS INFO HDR version 2.0
1282 * With 6 bytes HTC header and 6 bytes of WMI header
1283 * WMI_BSS_INFO_HDR cannot be accommodated in the removed 802.11 management
1284 * header space.
1285 * - Reduce the ie_mask to 2 bytes as only two bit flags are used
1286 * - Remove rssi and compute it on the host. rssi = snr - 95
1287 */
1288struct wmi_bss_info_hdr2 {
1289 __le16 ch;
1290
1291 /* see, enum wmi_bi_ftype */
1292 u8 frame_type;
1293
1294 u8 snr;
1295 u8 bssid[ETH_ALEN];
1296 __le16 ie_mask;
1297} __packed;
1298
1299/* Command Error Event */
1300enum wmi_error_code {
1301 INVALID_PARAM = 0x01,
1302 ILLEGAL_STATE = 0x02,
1303 INTERNAL_ERROR = 0x03,
1304};
1305
1306struct wmi_cmd_error_event {
1307 __le16 cmd_id;
1308 u8 err_code;
1309} __packed;
1310
1311struct wmi_pstream_timeout_event {
1312 u8 tx_queue_num;
1313 u8 rx_queue_num;
1314 u8 traffic_direc;
1315 u8 traffic_class;
1316} __packed;
1317
1318/*
1319 * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
1320 * the host of BSS's it has found that matches the current profile.
1321 * It can be used by the host to cache PMKs and/to initiate pre-authentication
1322 * if the BSS supports it. The first bssid is always the current associated
1323 * BSS.
1324 * The bssid and bssFlags information repeats according to the number
1325 * or APs reported.
1326 */
1327enum wmi_bss_flags {
1328 WMI_DEFAULT_BSS_FLAGS = 0x00,
1329 WMI_PREAUTH_CAPABLE_BSS = 0x01,
1330 WMI_PMKID_VALID_BSS = 0x02,
1331};
1332
1333/* TKIP MIC Error Event */
1334struct wmi_tkip_micerr_event {
1335 u8 key_id;
1336 u8 is_mcast;
1337} __packed;
1338
1339/* WMI_SCAN_COMPLETE_EVENTID */
1340struct wmi_scan_complete_event {
1341 a_sle32 status;
1342} __packed;
1343
1344#define MAX_OPT_DATA_LEN 1400
1345
1346/*
1347 * Special frame receive Event.
1348 * Mechanism used to inform host of the receiption of the special frames.
1349 * Consists of special frame info header followed by special frame body.
1350 * The 802.11 header is not included.
1351 */
1352struct wmi_opt_rx_info_hdr {
1353 __le16 ch;
1354 u8 frame_type;
1355 s8 snr;
1356 u8 src_addr[ETH_ALEN];
1357 u8 bssid[ETH_ALEN];
1358} __packed;
1359
1360/* Reporting statistic */
1361struct tx_stats {
1362 __le32 pkt;
1363 __le32 byte;
1364 __le32 ucast_pkt;
1365 __le32 ucast_byte;
1366 __le32 mcast_pkt;
1367 __le32 mcast_byte;
1368 __le32 bcast_pkt;
1369 __le32 bcast_byte;
1370 __le32 rts_success_cnt;
1371 __le32 pkt_per_ac[4];
1372 __le32 err_per_ac[4];
1373
1374 __le32 err;
1375 __le32 fail_cnt;
1376 __le32 retry_cnt;
1377 __le32 mult_retry_cnt;
1378 __le32 rts_fail_cnt;
1379 a_sle32 ucast_rate;
1380} __packed;
1381
1382struct rx_stats {
1383 __le32 pkt;
1384 __le32 byte;
1385 __le32 ucast_pkt;
1386 __le32 ucast_byte;
1387 __le32 mcast_pkt;
1388 __le32 mcast_byte;
1389 __le32 bcast_pkt;
1390 __le32 bcast_byte;
1391 __le32 frgment_pkt;
1392
1393 __le32 err;
1394 __le32 crc_err;
1395 __le32 key_cache_miss;
1396 __le32 decrypt_err;
1397 __le32 dupl_frame;
1398 a_sle32 ucast_rate;
1399} __packed;
1400
1401struct tkip_ccmp_stats {
1402 __le32 tkip_local_mic_fail;
1403 __le32 tkip_cnter_measures_invoked;
1404 __le32 tkip_replays;
1405 __le32 tkip_fmt_err;
1406 __le32 ccmp_fmt_err;
1407 __le32 ccmp_replays;
1408} __packed;
1409
1410struct pm_stats {
1411 __le32 pwr_save_failure_cnt;
1412 __le16 stop_tx_failure_cnt;
1413 __le16 atim_tx_failure_cnt;
1414 __le16 atim_rx_failure_cnt;
1415 __le16 bcn_rx_failure_cnt;
1416} __packed;
1417
1418struct cserv_stats {
1419 __le32 cs_bmiss_cnt;
1420 __le32 cs_low_rssi_cnt;
1421 __le16 cs_connect_cnt;
1422 __le16 cs_discon_cnt;
1423 a_sle16 cs_ave_beacon_rssi;
1424 __le16 cs_roam_count;
1425 a_sle16 cs_rssi;
1426 u8 cs_snr;
1427 u8 cs_ave_beacon_snr;
1428 u8 cs_last_roam_msec;
1429} __packed;
1430
1431struct wlan_net_stats {
1432 struct tx_stats tx;
1433 struct rx_stats rx;
1434 struct tkip_ccmp_stats tkip_ccmp_stats;
1435} __packed;
1436
1437struct arp_stats {
1438 __le32 arp_received;
1439 __le32 arp_matched;
1440 __le32 arp_replied;
1441} __packed;
1442
1443struct wlan_wow_stats {
1444 __le32 wow_pkt_dropped;
1445 __le16 wow_evt_discarded;
1446 u8 wow_host_pkt_wakeups;
1447 u8 wow_host_evt_wakeups;
1448} __packed;
1449
1450struct wmi_target_stats {
1451 __le32 lq_val;
1452 a_sle32 noise_floor_calib;
1453 struct pm_stats pm_stats;
1454 struct wlan_net_stats stats;
1455 struct wlan_wow_stats wow_stats;
1456 struct arp_stats arp_stats;
1457 struct cserv_stats cserv_stats;
1458} __packed;
1459
1460/*
1461 * WMI_RSSI_THRESHOLD_EVENTID.
1462 * Indicate the RSSI events to host. Events are indicated when we breach a
1463 * thresold value.
1464 */
1465enum wmi_rssi_threshold_val {
1466 WMI_RSSI_THRESHOLD1_ABOVE = 0,
1467 WMI_RSSI_THRESHOLD2_ABOVE,
1468 WMI_RSSI_THRESHOLD3_ABOVE,
1469 WMI_RSSI_THRESHOLD4_ABOVE,
1470 WMI_RSSI_THRESHOLD5_ABOVE,
1471 WMI_RSSI_THRESHOLD6_ABOVE,
1472 WMI_RSSI_THRESHOLD1_BELOW,
1473 WMI_RSSI_THRESHOLD2_BELOW,
1474 WMI_RSSI_THRESHOLD3_BELOW,
1475 WMI_RSSI_THRESHOLD4_BELOW,
1476 WMI_RSSI_THRESHOLD5_BELOW,
1477 WMI_RSSI_THRESHOLD6_BELOW
1478};
1479
1480struct wmi_rssi_threshold_event {
1481 a_sle16 rssi;
1482 u8 range;
1483} __packed;
1484
1485enum wmi_snr_threshold_val {
1486 WMI_SNR_THRESHOLD1_ABOVE = 1,
1487 WMI_SNR_THRESHOLD1_BELOW,
1488 WMI_SNR_THRESHOLD2_ABOVE,
1489 WMI_SNR_THRESHOLD2_BELOW,
1490 WMI_SNR_THRESHOLD3_ABOVE,
1491 WMI_SNR_THRESHOLD3_BELOW,
1492 WMI_SNR_THRESHOLD4_ABOVE,
1493 WMI_SNR_THRESHOLD4_BELOW
1494};
1495
1496struct wmi_snr_threshold_event {
1497 /* see, enum wmi_snr_threshold_val */
1498 u8 range;
1499
1500 u8 snr;
1501} __packed;
1502
1503/* WMI_REPORT_ROAM_TBL_EVENTID */
1504#define MAX_ROAM_TBL_CAND 5
1505
1506struct wmi_bss_roam_info {
1507 a_sle32 roam_util;
1508 u8 bssid[ETH_ALEN];
1509 s8 rssi;
1510 s8 rssidt;
1511 s8 last_rssi;
1512 s8 util;
1513 s8 bias;
1514
1515 /* for alignment */
1516 u8 reserved;
1517} __packed;
1518
1519/* WMI_CAC_EVENTID */
1520enum cac_indication {
1521 CAC_INDICATION_ADMISSION = 0x00,
1522 CAC_INDICATION_ADMISSION_RESP = 0x01,
1523 CAC_INDICATION_DELETE = 0x02,
1524 CAC_INDICATION_NO_RESP = 0x03,
1525};
1526
1527#define WMM_TSPEC_IE_LEN 63
1528
1529struct wmi_cac_event {
1530 u8 ac;
1531 u8 cac_indication;
1532 u8 status_code;
1533 u8 tspec_suggestion[WMM_TSPEC_IE_LEN];
1534} __packed;
1535
1536/* WMI_APLIST_EVENTID */
1537
1538enum aplist_ver {
1539 APLIST_VER1 = 1,
1540};
1541
1542struct wmi_ap_info_v1 {
1543 u8 bssid[ETH_ALEN];
1544 __le16 channel;
1545} __packed;
1546
1547union wmi_ap_info {
1548 struct wmi_ap_info_v1 ap_info_v1;
1549} __packed;
1550
1551struct wmi_aplist_event {
1552 u8 ap_list_ver;
1553 u8 num_ap;
1554 union wmi_ap_info ap_list[1];
1555} __packed;
1556
1557/* Developer Commands */
1558
1559/*
1560 * WMI_SET_BITRATE_CMDID
1561 *
1562 * Get bit rate cmd uses same definition as set bit rate cmd
1563 */
1564enum wmi_bit_rate {
1565 RATE_AUTO = -1,
1566 RATE_1Mb = 0,
1567 RATE_2Mb = 1,
1568 RATE_5_5Mb = 2,
1569 RATE_11Mb = 3,
1570 RATE_6Mb = 4,
1571 RATE_9Mb = 5,
1572 RATE_12Mb = 6,
1573 RATE_18Mb = 7,
1574 RATE_24Mb = 8,
1575 RATE_36Mb = 9,
1576 RATE_48Mb = 10,
1577 RATE_54Mb = 11,
1578 RATE_MCS_0_20 = 12,
1579 RATE_MCS_1_20 = 13,
1580 RATE_MCS_2_20 = 14,
1581 RATE_MCS_3_20 = 15,
1582 RATE_MCS_4_20 = 16,
1583 RATE_MCS_5_20 = 17,
1584 RATE_MCS_6_20 = 18,
1585 RATE_MCS_7_20 = 19,
1586 RATE_MCS_0_40 = 20,
1587 RATE_MCS_1_40 = 21,
1588 RATE_MCS_2_40 = 22,
1589 RATE_MCS_3_40 = 23,
1590 RATE_MCS_4_40 = 24,
1591 RATE_MCS_5_40 = 25,
1592 RATE_MCS_6_40 = 26,
1593 RATE_MCS_7_40 = 27,
1594};
1595
1596struct wmi_bit_rate_reply {
1597 /* see, enum wmi_bit_rate */
1598 s8 rate_index;
1599} __packed;
1600
1601/*
1602 * WMI_SET_FIXRATES_CMDID
1603 *
1604 * Get fix rates cmd uses same definition as set fix rates cmd
1605 */
1606struct wmi_fix_rates_reply {
1607 /* see wmi_bit_rate */
1608 __le32 fix_rate_mask;
1609} __packed;
1610
1611enum roam_data_type {
1612 /* get the roam time data */
1613 ROAM_DATA_TIME = 1,
1614};
1615
1616struct wmi_target_roam_time {
1617 __le32 disassoc_time;
1618 __le32 no_txrx_time;
1619 __le32 assoc_time;
1620 __le32 allow_txrx_time;
1621 u8 disassoc_bssid[ETH_ALEN];
1622 s8 disassoc_bss_rssi;
1623 u8 assoc_bssid[ETH_ALEN];
1624 s8 assoc_bss_rssi;
1625} __packed;
1626
1627enum wmi_txop_cfg {
1628 WMI_TXOP_DISABLED = 0,
1629 WMI_TXOP_ENABLED
1630};
1631
1632struct wmi_set_wmm_txop_cmd {
1633 u8 txop_enable;
1634} __packed;
1635
1636struct wmi_set_keepalive_cmd {
1637 u8 keep_alive_intvl;
1638} __packed;
1639
1640struct wmi_get_keepalive_cmd {
1641 __le32 configured;
1642 u8 keep_alive_intvl;
1643} __packed;
1644
1645/* Notify the WSC registration status to the target */
1646#define WSC_REG_ACTIVE 1
1647#define WSC_REG_INACTIVE 0
1648
1649#define WOW_MAX_FILTER_LISTS 1
1650#define WOW_MAX_FILTERS_PER_LIST 4
1651#define WOW_PATTERN_SIZE 64
1652#define WOW_MASK_SIZE 64
1653
1654#define MAC_MAX_FILTERS_PER_LIST 4
1655
1656struct wow_filter {
1657 u8 wow_valid_filter;
1658 u8 wow_filter_id;
1659 u8 wow_filter_size;
1660 u8 wow_filter_offset;
1661 u8 wow_filter_mask[WOW_MASK_SIZE];
1662 u8 wow_filter_pattern[WOW_PATTERN_SIZE];
1663} __packed;
1664
1665#define MAX_IP_ADDRS 2
1666
1667struct wmi_set_ip_cmd {
1668 /* IP in network byte order */
1669 __le32 ips[MAX_IP_ADDRS];
1670} __packed;
1671
1672/* WMI_GET_WOW_LIST_CMD reply */
1673struct wmi_get_wow_list_reply {
1674 /* number of patterns in reply */
1675 u8 num_filters;
1676
1677 /* this is filter # x of total num_filters */
1678 u8 this_filter_num;
1679
1680 u8 wow_mode;
1681 u8 host_mode;
1682 struct wow_filter wow_filters[1];
1683} __packed;
1684
1685/* WMI_SET_AKMP_PARAMS_CMD */
1686
1687struct wmi_pmkid {
1688 u8 pmkid[WMI_PMKID_LEN];
1689} __packed;
1690
1691/* WMI_GET_PMKID_LIST_CMD Reply */
1692struct wmi_pmkid_list_reply {
1693 __le32 num_pmkid;
1694 u8 bssid_list[ETH_ALEN][1];
1695 struct wmi_pmkid pmkid_list[1];
1696} __packed;
1697
1698/* WMI_ADDBA_REQ_EVENTID */
1699struct wmi_addba_req_event {
1700 u8 tid;
1701 u8 win_sz;
1702 __le16 st_seq_no;
1703
1704 /* f/w response for ADDBA Req; OK (0) or failure (!=0) */
1705 u8 status;
1706} __packed;
1707
1708/* WMI_ADDBA_RESP_EVENTID */
1709struct wmi_addba_resp_event {
1710 u8 tid;
1711
1712 /* OK (0), failure (!=0) */
1713 u8 status;
1714
1715 /* three values: not supported(0), 3839, 8k */
1716 __le16 amsdu_sz;
1717} __packed;
1718
1719/* WMI_DELBA_EVENTID
1720 * f/w received a DELBA for peer and processed it.
1721 * Host is notified of this
1722 */
1723struct wmi_delba_event {
1724 u8 tid;
1725 u8 is_peer_initiator;
1726 __le16 reason_code;
1727} __packed;
1728
1729#define PEER_NODE_JOIN_EVENT 0x00
1730#define PEER_NODE_LEAVE_EVENT 0x01
1731#define PEER_FIRST_NODE_JOIN_EVENT 0x10
1732#define PEER_LAST_NODE_LEAVE_EVENT 0x11
1733
1734struct wmi_peer_node_event {
1735 u8 event_code;
1736 u8 peer_mac_addr[ETH_ALEN];
1737} __packed;
1738
1739/* Transmit complete event data structure(s) */
1740
1741/* version 1 of tx complete msg */
1742struct tx_complete_msg_v1 {
1743#define TX_COMPLETE_STATUS_SUCCESS 0
1744#define TX_COMPLETE_STATUS_RETRIES 1
1745#define TX_COMPLETE_STATUS_NOLINK 2
1746#define TX_COMPLETE_STATUS_TIMEOUT 3
1747#define TX_COMPLETE_STATUS_OTHER 4
1748
1749 u8 status;
1750
1751 /* packet ID to identify parent packet */
1752 u8 pkt_id;
1753
1754 /* rate index on successful transmission */
1755 u8 rate_idx;
1756
1757 /* number of ACK failures in tx attempt */
1758 u8 ack_failures;
1759} __packed;
1760
1761struct wmi_tx_complete_event {
1762 /* no of tx comp msgs following this struct */
1763 u8 num_msg;
1764
1765 /* length in bytes for each individual msg following this struct */
1766 u8 msg_len;
1767
1768 /* version of tx complete msg data following this struct */
1769 u8 msg_type;
1770
1771 /* individual messages follow this header */
1772 u8 reserved;
1773} __packed;
1774
1775/*
1776 * ------- AP Mode definitions --------------
1777 */
1778
1779/*
1780 * !!! Warning !!!
1781 * -Changing the following values needs compilation of both driver and firmware
1782 */
1783#define AP_MAX_NUM_STA 8
1784
1785/* Spl. AID used to set DTIM flag in the beacons */
1786#define MCAST_AID 0xFF
1787
1788#define DEF_AP_COUNTRY_CODE "US "
1789
1790/* Used with WMI_AP_SET_NUM_STA_CMDID */
1791
1792struct wmi_ap_set_pvb_cmd {
1793 __le32 flag;
1794 __le16 aid;
1795} __packed;
1796
1797struct wmi_rx_frame_format_cmd {
1798 /* version of meta data for rx packets <0 = default> (0-7 = valid) */
1799 u8 meta_ver;
1800
1801 /*
1802 * 1 == leave .11 header intact,
1803 * 0 == replace .11 header with .3 <default>
1804 */
1805 u8 dot11_hdr;
1806
1807 /*
1808 * 1 == defragmentation is performed by host,
1809 * 0 == performed by target <default>
1810 */
1811 u8 defrag_on_host;
1812
1813 /* for alignment */
1814 u8 reserved[1];
1815} __packed;
1816
1817/* AP mode events */
1818
1819/* WMI_PS_POLL_EVENT */
1820struct wmi_pspoll_event {
1821 __le16 aid;
1822} __packed;
1823
1824struct wmi_per_sta_stat {
1825 __le32 tx_bytes;
1826 __le32 tx_pkts;
1827 __le32 tx_error;
1828 __le32 tx_discard;
1829 __le32 rx_bytes;
1830 __le32 rx_pkts;
1831 __le32 rx_error;
1832 __le32 rx_discard;
1833 __le32 aid;
1834} __packed;
1835
1836struct wmi_ap_mode_stat {
1837 __le32 action;
1838 struct wmi_per_sta_stat sta[AP_MAX_NUM_STA + 1];
1839} __packed;
1840
1841/* End of AP mode definitions */
1842
1843/* Extended WMI (WMIX)
1844 *
1845 * Extended WMIX commands are encapsulated in a WMI message with
1846 * cmd=WMI_EXTENSION_CMD.
1847 *
1848 * Extended WMI commands are those that are needed during wireless
1849 * operation, but which are not really wireless commands. This allows,
1850 * for instance, platform-specific commands. Extended WMI commands are
1851 * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
1852 * Extended WMI events are similarly embedded in a WMI event message with
1853 * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
1854 */
1855struct wmix_cmd_hdr {
1856 __le32 cmd_id;
1857} __packed;
1858
1859enum wmix_command_id {
1860 WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
1861 WMIX_DSETDATA_REPLY_CMDID,
1862 WMIX_GPIO_OUTPUT_SET_CMDID,
1863 WMIX_GPIO_INPUT_GET_CMDID,
1864 WMIX_GPIO_REGISTER_SET_CMDID,
1865 WMIX_GPIO_REGISTER_GET_CMDID,
1866 WMIX_GPIO_INTR_ACK_CMDID,
1867 WMIX_HB_CHALLENGE_RESP_CMDID,
1868 WMIX_DBGLOG_CFG_MODULE_CMDID,
1869 WMIX_PROF_CFG_CMDID, /* 0x200a */
1870 WMIX_PROF_ADDR_SET_CMDID,
1871 WMIX_PROF_START_CMDID,
1872 WMIX_PROF_STOP_CMDID,
1873 WMIX_PROF_COUNT_GET_CMDID,
1874};
1875
1876enum wmix_event_id {
1877 WMIX_DSETOPENREQ_EVENTID = 0x3001,
1878 WMIX_DSETCLOSE_EVENTID,
1879 WMIX_DSETDATAREQ_EVENTID,
1880 WMIX_GPIO_INTR_EVENTID,
1881 WMIX_GPIO_DATA_EVENTID,
1882 WMIX_GPIO_ACK_EVENTID,
1883 WMIX_HB_CHALLENGE_RESP_EVENTID,
1884 WMIX_DBGLOG_EVENTID,
1885 WMIX_PROF_COUNT_EVENTID,
1886};
1887
1888/*
1889 * ------Error Detection support-------
1890 */
1891
1892/*
1893 * WMIX_HB_CHALLENGE_RESP_CMDID
1894 * Heartbeat Challenge Response command
1895 */
1896struct wmix_hb_challenge_resp_cmd {
1897 __le32 cookie;
1898 __le32 source;
1899} __packed;
1900
1901/* End of Extended WMI (WMIX) */
1902
1903enum wmi_sync_flag {
1904 NO_SYNC_WMIFLAG = 0,
1905
1906 /* transmit all queued data before cmd */
1907 SYNC_BEFORE_WMIFLAG,
1908
1909 /* any new data waits until cmd execs */
1910 SYNC_AFTER_WMIFLAG,
1911
1912 SYNC_BOTH_WMIFLAG,
1913
1914 /* end marker */
1915 END_WMIFLAG
1916};
1917
1918enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
1919void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
1920int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
1921int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
1922 u8 msg_type, bool more_data,
1923 enum wmi_data_hdr_data_type data_type,
1924 u8 meta_ver, void *tx_meta_info);
1925
1926int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
1927int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
1928int ath6kl_wmi_data_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
1929int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
1930 u32 layer2_priority, bool wmm_enabled,
1931 u8 *ac);
1932
1933int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
1934struct bss *ath6kl_wmi_find_node(struct wmi *wmi, const u8 *mac_addr);
1935void ath6kl_wmi_node_free(struct wmi *wmi, const u8 *mac_addr);
1936
1937int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
1938 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
1939
1940int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
1941 enum dot11_auth_mode dot11_auth_mode,
1942 enum auth_mode auth_mode,
1943 enum crypto_type pairwise_crypto,
1944 u8 pairwise_crypto_len,
1945 enum crypto_type group_crypto,
1946 u8 group_crypto_len, int ssid_len, u8 *ssid,
1947 u8 *bssid, u16 channel, u32 ctrl_flags);
1948
1949int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
1950int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
1951int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
1952 u32 force_fgscan, u32 is_legacy,
1953 u32 home_dwell_time, u32 force_scan_interval,
1954 s8 num_chan, u16 *ch_list);
1955int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
1956 u16 fg_end_sec, u16 bg_sec,
1957 u16 minact_chdw_msec, u16 maxact_chdw_msec,
1958 u16 pas_chdw_msec, u8 short_scan_ratio,
1959 u8 scan_ctrl_flag, u32 max_dfsch_act_time,
1960 u16 maxact_scan_per_ssid);
1961int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
1962int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
1963 u8 ssid_len, u8 *ssid);
1964int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
1965 u16 listen_beacons);
1966int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
1967int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
1968 u16 ps_poll_num, u16 dtim_policy,
1969 u16 tx_wakup_policy, u16 num_tx_to_wakeup,
1970 u16 ps_fail_event_policy);
1971int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
1972int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
1973 struct wmi_create_pstream_cmd *pstream);
1974int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
1975
1976int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
1977int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
1978 u8 preamble_policy);
1979
1980int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
1981
1982int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
1983int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
1984 enum crypto_type key_type,
1985 u8 key_usage, u8 key_len,
1986 u8 *key_rsc, u8 *key_material,
1987 u8 key_op_ctrl, u8 *mac_addr,
1988 enum wmi_sync_flag sync_flag);
1989int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
1990int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
1991int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
1992 const u8 *pmkid, bool set);
1993int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
1994int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
1995
1996int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
1997int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
1998
1999s32 ath6kl_wmi_get_rate(s8 rate_index);
2000
2001int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
2002
2003struct bss *ath6kl_wmi_find_ssid_node(struct wmi *wmi, u8 *ssid,
2004 u32 ssid_len, bool is_wpa2,
2005 bool match_ssid);
2006
2007void ath6kl_wmi_node_return(struct wmi *wmi, struct bss *bss);
2008
2009/* AP mode */
2010int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
2011
2012int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
2013 bool rx_dot11_hdr, bool defrag_on_host);
2014
2015void *ath6kl_wmi_init(struct ath6kl *devt);
2016void ath6kl_wmi_shutdown(struct wmi *wmi);
2017
2018#endif /* WMI_H */