aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorKalle Valo <kvalo@qca.qualcomm.com>2011-07-17 17:22:30 -0400
committerKalle Valo <kvalo@qca.qualcomm.com>2011-08-09 12:45:18 -0400
commitbdcd81707973cf8aa9305337166f8ee842a050d4 (patch)
treedf4ebda7e48afcd1e68d93d3d81a500e9c8c52a6 /drivers/net
parentf749b94679c71a9c74ad9509dbbf00d8f3d620ad (diff)
Add ath6kl cleaned up driver
Last May we started working on cleaning up ath6kl driver which is currently in staging. The work has happened in a separate ath6kl-cleanup tree: http://git.kernel.org/?p=linux/kernel/git/kvalo/ath6kl-cleanup.git;a=summary After over 1100 (!) patches we have now reached a state where I would like to start discussing about pushing the driver to the wireless trees and replacing the staging driver. The driver is now a lot smaller and looks like a proper Linux driver. The size of the driver (measured with simple wc -l) dropped from 49 kLOC to 18 kLOC and the number of the .c and .h files dropped from 107 to 22. Most importantly the number of subdirectories reduced from 26 to zero :) There are two remaining checkpatch warnings in the driver which we decided to omit for now: drivers/net/wireless/ath/ath6kl/debug.c:31: WARNING: printk() should include KERN_ facility level drivers/net/wireless/ath/ath6kl/sdio.c:527: WARNING: msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt The driver has endian annotations for all the hardware specific structures and there are no sparse errors. Unfortunately I don't have any big endian hardware to test that right now. We have been testing the driver both on x86 and arm platforms. The code is also compiled with sparc and parisc cross compilers. Notable missing features compared to the current staging driver are: o HCI over SDIO support o nl80211 testmode o firmware logging o suspend support Testmode, firmware logging and suspend support will be added soon. HCI over SDIO support will be more difficult as the HCI driver needs to share code with the wifi driver. This is something we need to research more. Also I want to point out the changes I did for signed endian support. As I wasn't able to find any support for signed endian annotations I decided to follow what NTFS has done and added my own. Grep for sle16 and sle32, especially from wmi.h. Various people have been working on the cleanup, the hall of fame based on number of patches is: 543 Vasanthakumar Thiagarajan 403 Raja Mani 252 Kalle Valo 16 Vivek Natarajan 12 Suraj Sumangala 3 Joe Perches 2 Jouni Malinen Signed-off-by: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com> Signed-off-by: Raja Mani <rmani@qca.qualcomm.com> Signed-off-by: Vivek Natarajan <nataraja@qca.qualcomm.com> Signed-off-by: Suraj Sumangala <surajs@qca.qualcomm.com> Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Jouni Malinen <jouni@qca.qualcomm.com> Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig17
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile35
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c692
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h250
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1538
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h39
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h183
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h546
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c150
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h104
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h67
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h216
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c2466
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h596
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.c811
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h113
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1293
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1337
-rw-r--r--drivers/net/wireless/ath/ath6kl/node.c238
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c853
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h331
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c1452
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c2762
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2024
26 files changed, 18115 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d1b23067619f..073548836413 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -25,5 +25,6 @@ config ATH_DEBUG
25source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
26source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
27source "drivers/net/wireless/ath/carl9170/Kconfig" 27source "drivers/net/wireless/ath/carl9170/Kconfig"
28source "drivers/net/wireless/ath/ath6kl/Kconfig"
28 29
29endif 30endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 0e8f528c81c0..d1214696a35b 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,7 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K_HW) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_CARL9170) += carl9170/ 3obj-$(CONFIG_CARL9170) += carl9170/
4obj-$(CONFIG_ATH6KL) += ath6kl/
4 5
5obj-$(CONFIG_ATH_COMMON) += ath.o 6obj-$(CONFIG_ATH_COMMON) += ath.o
6 7
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
new file mode 100644
index 000000000000..fc9f69c1f945
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -0,0 +1,17 @@
1config ATH6KL
2 tristate "Atheros ath6kl support"
3 depends on MMC
4 depends on CFG80211
5 select WIRELESS_EXT
6 select WEXT_PRIV
7 ---help---
8 This module adds support for wireless adapters based on
9 Atheros AR6003 chipset running over SDIO. If you choose to
10 build it as a module, it will be called ath6kl. Pls note
11 that AR6002 and AR6001 are not supported by this driver.
12
13config ATH6KL_DEBUG
14 bool "Atheros ath6kl debugging"
15 depends on ATH6KL
16 ---help---
17 Enables debug support
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
new file mode 100644
index 000000000000..e1bb07ea8e80
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -0,0 +1,35 @@
1#------------------------------------------------------------------------------
2# Copyright (c) 2004-2010 Atheros Communications Inc.
3# All rights reserved.
4#
5#
6#
7# Permission to use, copy, modify, and/or distribute this software for any
8# purpose with or without fee is hereby granted, provided that the above
9# copyright notice and this permission notice appear in all copies.
10#
11# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18#
19#
20#
21# Author(s): ="Atheros"
22#------------------------------------------------------------------------------
23
24obj-$(CONFIG_ATH6KL) := ath6kl.o
25ath6kl-y += debug.o
26ath6kl-y += htc_hif.o
27ath6kl-y += htc.o
28ath6kl-y += bmi.o
29ath6kl-y += cfg80211.o
30ath6kl-y += init.o
31ath6kl-y += main.o
32ath6kl-y += txrx.o
33ath6kl-y += wmi.o
34ath6kl-y += node.o
35ath6kl-y += sdio.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
new file mode 100644
index 000000000000..84676697d7eb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -0,0 +1,692 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hif-ops.h"
19#include "target.h"
20#include "debug.h"
21
22static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
23{
24 u32 addr;
25 unsigned long timeout;
26 int ret;
27
28 ar->bmi.cmd_credits = 0;
29
30 /* Read the counter register to get the command credits */
31 addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
32
33 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
34 while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
35
36 /*
37 * Hit the credit counter with a 4-byte access, the first byte
38 * read will hit the counter and cause a decrement, while the
39 * remaining 3 bytes has no effect. The rationale behind this
40 * is to make all HIF accesses 4-byte aligned.
41 */
42 ret = hif_read_write_sync(ar, addr,
43 (u8 *)&ar->bmi.cmd_credits, 4,
44 HIF_RD_SYNC_BYTE_INC);
45 if (ret) {
46 ath6kl_err("Unable to decrement the command credit count register: %d\n",
47 ret);
48 return ret;
49 }
50
51 /* The counter is only 8 bits.
52 * Ignore anything in the upper 3 bytes
53 */
54 ar->bmi.cmd_credits &= 0xFF;
55 }
56
57 if (!ar->bmi.cmd_credits) {
58 ath6kl_err("bmi communication timeout\n");
59 return -ETIMEDOUT;
60 }
61
62 return 0;
63}
64
65static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar, bool need_timeout)
66{
67 unsigned long timeout;
68 u32 rx_word = 0;
69 int ret = 0;
70
71 timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
72 while ((!need_timeout || time_before(jiffies, timeout)) && !rx_word) {
73 ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
74 (u8 *)&rx_word, sizeof(rx_word),
75 HIF_RD_SYNC_BYTE_INC);
76 if (ret) {
77 ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
78 return ret;
79 }
80
81 /* all we really want is one bit */
82 rx_word &= (1 << ENDPOINT1);
83 }
84
85 if (!rx_word) {
86 ath6kl_err("bmi_recv_buf FIFO empty\n");
87 return -EINVAL;
88 }
89
90 return ret;
91}
92
93static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
94{
95 int ret;
96 u32 addr;
97
98 ret = ath6kl_get_bmi_cmd_credits(ar);
99 if (ret)
100 return ret;
101
102 addr = ar->mbox_info.htc_addr;
103
104 ret = hif_read_write_sync(ar, addr, buf, len,
105 HIF_WR_SYNC_BYTE_INC);
106 if (ret)
107 ath6kl_err("unable to send the bmi data to the device\n");
108
109 return ret;
110}
111
112static int ath6kl_bmi_recv_buf(struct ath6kl *ar,
113 u8 *buf, u32 len, bool want_timeout)
114{
115 int ret;
116 u32 addr;
117
118 /*
119 * During normal bootup, small reads may be required.
120 * Rather than issue an HIF Read and then wait as the Target
121 * adds successive bytes to the FIFO, we wait here until
122 * we know that response data is available.
123 *
124 * This allows us to cleanly timeout on an unexpected
125 * Target failure rather than risk problems at the HIF level.
126 * In particular, this avoids SDIO timeouts and possibly garbage
127 * data on some host controllers. And on an interconnect
128 * such as Compact Flash (as well as some SDIO masters) which
129 * does not provide any indication on data timeout, it avoids
130 * a potential hang or garbage response.
131 *
132 * Synchronization is more difficult for reads larger than the
133 * size of the MBOX FIFO (128B), because the Target is unable
134 * to push the 129th byte of data until AFTER the Host posts an
135 * HIF Read and removes some FIFO data. So for large reads the
136 * Host proceeds to post an HIF Read BEFORE all the data is
137 * actually available to read. Fortunately, large BMI reads do
138 * not occur in practice -- they're supported for debug/development.
139 *
140 * So Host/Target BMI synchronization is divided into these cases:
141 * CASE 1: length < 4
142 * Should not happen
143 *
144 * CASE 2: 4 <= length <= 128
145 * Wait for first 4 bytes to be in FIFO
146 * If CONSERVATIVE_BMI_READ is enabled, also wait for
147 * a BMI command credit, which indicates that the ENTIRE
148 * response is available in the the FIFO
149 *
150 * CASE 3: length > 128
151 * Wait for the first 4 bytes to be in FIFO
152 *
153 * For most uses, a small timeout should be sufficient and we will
154 * usually see a response quickly; but there may be some unusual
155 * (debug) cases of BMI_EXECUTE where we want an larger timeout.
156 * For now, we use an unbounded busy loop while waiting for
157 * BMI_EXECUTE.
158 *
159 * If BMI_EXECUTE ever needs to support longer-latency execution,
160 * especially in production, this code needs to be enhanced to sleep
161 * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
162 * a function of Host processor speed.
163 */
164 if (len >= 4) { /* NB: Currently, always true */
165 ret = ath6kl_bmi_get_rx_lkahd(ar, want_timeout);
166 if (ret)
167 return ret;
168 }
169
170 addr = ar->mbox_info.htc_addr;
171 ret = hif_read_write_sync(ar, addr, buf, len,
172 HIF_RD_SYNC_BYTE_INC);
173 if (ret) {
174 ath6kl_err("Unable to read the bmi data from the device: %d\n",
175 ret);
176 return ret;
177 }
178
179 return 0;
180}
181
182int ath6kl_bmi_done(struct ath6kl *ar)
183{
184 int ret;
185 u32 cid = BMI_DONE;
186
187 if (ar->bmi.done_sent) {
188 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
189 return 0;
190 }
191
192 ar->bmi.done_sent = true;
193
194 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
195 if (ret) {
196 ath6kl_err("Unable to send bmi done: %d\n", ret);
197 return ret;
198 }
199
200 ath6kl_bmi_cleanup(ar);
201
202 return 0;
203}
204
205int ath6kl_bmi_get_target_info(struct ath6kl *ar,
206 struct ath6kl_bmi_target_info *targ_info)
207{
208 int ret;
209 u32 cid = BMI_GET_TARGET_INFO;
210
211 if (ar->bmi.done_sent) {
212 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
213 return -EACCES;
214 }
215
216 ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
217 if (ret) {
218 ath6kl_err("Unable to send get target info: %d\n", ret);
219 return ret;
220 }
221
222 ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
223 sizeof(targ_info->version), true);
224 if (ret) {
225 ath6kl_err("Unable to recv target info: %d\n", ret);
226 return ret;
227 }
228
229 if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
230 /* Determine how many bytes are in the Target's targ_info */
231 ret = ath6kl_bmi_recv_buf(ar,
232 (u8 *)&targ_info->byte_count,
233 sizeof(targ_info->byte_count),
234 true);
235 if (ret) {
236 ath6kl_err("unable to read target info byte count: %d\n",
237 ret);
238 return ret;
239 }
240
241 /*
242 * The target's targ_info doesn't match the host's targ_info.
243 * We need to do some backwards compatibility to make this work.
244 */
245 if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
246 WARN_ON(1);
247 return -EINVAL;
248 }
249
250 /* Read the remainder of the targ_info */
251 ret = ath6kl_bmi_recv_buf(ar,
252 ((u8 *)targ_info) +
253 sizeof(targ_info->byte_count),
254 sizeof(*targ_info) -
255 sizeof(targ_info->byte_count),
256 true);
257
258 if (ret) {
259 ath6kl_err("Unable to read target info (%d bytes): %d\n",
260 targ_info->byte_count, ret);
261 return ret;
262 }
263 }
264
265 ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
266 targ_info->version, targ_info->type);
267
268 return 0;
269}
270
271int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
272{
273 u32 cid = BMI_READ_MEMORY;
274 int ret;
275 u32 offset;
276 u32 len_remain, rx_len;
277 u16 size;
278
279 if (ar->bmi.done_sent) {
280 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
281 return -EACCES;
282 }
283
284 size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
285 if (size > MAX_BMI_CMDBUF_SZ) {
286 WARN_ON(1);
287 return -EINVAL;
288 }
289 memset(ar->bmi.cmd_buf, 0, size);
290
291 ath6kl_dbg(ATH6KL_DBG_BMI,
292 "bmi read memory: device: addr: 0x%x, len: %d\n",
293 addr, len);
294
295 len_remain = len;
296
297 while (len_remain) {
298 rx_len = (len_remain < BMI_DATASZ_MAX) ?
299 len_remain : BMI_DATASZ_MAX;
300 offset = 0;
301 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
302 offset += sizeof(cid);
303 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
304 offset += sizeof(addr);
305 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
306 offset += sizeof(len);
307
308 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
309 if (ret) {
310 ath6kl_err("Unable to write to the device: %d\n",
311 ret);
312 return ret;
313 }
314 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len, true);
315 if (ret) {
316 ath6kl_err("Unable to read from the device: %d\n",
317 ret);
318 return ret;
319 }
320 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
321 len_remain -= rx_len; addr += rx_len;
322 }
323
324 return 0;
325}
326
327int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
328{
329 u32 cid = BMI_WRITE_MEMORY;
330 int ret;
331 u32 offset;
332 u32 len_remain, tx_len;
333 const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
334 u8 aligned_buf[BMI_DATASZ_MAX];
335 u8 *src;
336
337 if (ar->bmi.done_sent) {
338 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
339 return -EACCES;
340 }
341
342 if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
343 WARN_ON(1);
344 return -EINVAL;
345 }
346
347 memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
348
349 ath6kl_dbg(ATH6KL_DBG_BMI,
350 "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
351
352 len_remain = len;
353 while (len_remain) {
354 src = &buf[len - len_remain];
355
356 if (len_remain < (BMI_DATASZ_MAX - header)) {
357 if (len_remain & 3) {
358 /* align it with 4 bytes */
359 len_remain = len_remain +
360 (4 - (len_remain & 3));
361 memcpy(aligned_buf, src, len_remain);
362 src = aligned_buf;
363 }
364 tx_len = len_remain;
365 } else {
366 tx_len = (BMI_DATASZ_MAX - header);
367 }
368
369 offset = 0;
370 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
371 offset += sizeof(cid);
372 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
373 offset += sizeof(addr);
374 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
375 offset += sizeof(tx_len);
376 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
377 offset += tx_len;
378
379 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
380 if (ret) {
381 ath6kl_err("Unable to write to the device: %d\n",
382 ret);
383 return ret;
384 }
385 len_remain -= tx_len; addr += tx_len;
386 }
387
388 return 0;
389}
390
391int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
392{
393 u32 cid = BMI_EXECUTE;
394 int ret;
395 u32 offset;
396 u16 size;
397
398 if (ar->bmi.done_sent) {
399 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
400 return -EACCES;
401 }
402
403 size = sizeof(cid) + sizeof(addr) + sizeof(param);
404 if (size > MAX_BMI_CMDBUF_SZ) {
405 WARN_ON(1);
406 return -EINVAL;
407 }
408 memset(ar->bmi.cmd_buf, 0, size);
409
410 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
411 addr, *param);
412
413 offset = 0;
414 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
415 offset += sizeof(cid);
416 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
417 offset += sizeof(addr);
418 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
419 offset += sizeof(*param);
420
421 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
422 if (ret) {
423 ath6kl_err("Unable to write to the device: %d\n", ret);
424 return ret;
425 }
426
427 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), false);
428 if (ret) {
429 ath6kl_err("Unable to read from the device: %d\n", ret);
430 return ret;
431 }
432
433 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
434
435 return 0;
436}
437
438int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
439{
440 u32 cid = BMI_SET_APP_START;
441 int ret;
442 u32 offset;
443 u16 size;
444
445 if (ar->bmi.done_sent) {
446 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
447 return -EACCES;
448 }
449
450 size = sizeof(cid) + sizeof(addr);
451 if (size > MAX_BMI_CMDBUF_SZ) {
452 WARN_ON(1);
453 return -EINVAL;
454 }
455 memset(ar->bmi.cmd_buf, 0, size);
456
457 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
458
459 offset = 0;
460 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
461 offset += sizeof(cid);
462 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
463 offset += sizeof(addr);
464
465 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
466 if (ret) {
467 ath6kl_err("Unable to write to the device: %d\n", ret);
468 return ret;
469 }
470
471 return 0;
472}
473
474int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
475{
476 u32 cid = BMI_READ_SOC_REGISTER;
477 int ret;
478 u32 offset;
479 u16 size;
480
481 if (ar->bmi.done_sent) {
482 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
483 return -EACCES;
484 }
485
486 size = sizeof(cid) + sizeof(addr);
487 if (size > MAX_BMI_CMDBUF_SZ) {
488 WARN_ON(1);
489 return -EINVAL;
490 }
491 memset(ar->bmi.cmd_buf, 0, size);
492
493 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
494
495 offset = 0;
496 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
497 offset += sizeof(cid);
498 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
499 offset += sizeof(addr);
500
501 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
502 if (ret) {
503 ath6kl_err("Unable to write to the device: %d\n", ret);
504 return ret;
505 }
506
507 ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param), true);
508 if (ret) {
509 ath6kl_err("Unable to read from the device: %d\n", ret);
510 return ret;
511 }
512 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
513
514 return 0;
515}
516
517int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
518{
519 u32 cid = BMI_WRITE_SOC_REGISTER;
520 int ret;
521 u32 offset;
522 u16 size;
523
524 if (ar->bmi.done_sent) {
525 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
526 return -EACCES;
527 }
528
529 size = sizeof(cid) + sizeof(addr) + sizeof(param);
530 if (size > MAX_BMI_CMDBUF_SZ) {
531 WARN_ON(1);
532 return -EINVAL;
533 }
534 memset(ar->bmi.cmd_buf, 0, size);
535
536 ath6kl_dbg(ATH6KL_DBG_BMI,
537 "bmi write SOC reg: addr: 0x%x, param: %d\n",
538 addr, param);
539
540 offset = 0;
541 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
542 offset += sizeof(cid);
543 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
544 offset += sizeof(addr);
545 memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
546 offset += sizeof(param);
547
548 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
549 if (ret) {
550 ath6kl_err("Unable to write to the device: %d\n", ret);
551 return ret;
552 }
553
554 return 0;
555}
556
557int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
558{
559 u32 cid = BMI_LZ_DATA;
560 int ret;
561 u32 offset;
562 u32 len_remain, tx_len;
563 const u32 header = sizeof(cid) + sizeof(len);
564 u16 size;
565
566 if (ar->bmi.done_sent) {
567 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
568 return -EACCES;
569 }
570
571 size = BMI_DATASZ_MAX + header;
572 if (size > MAX_BMI_CMDBUF_SZ) {
573 WARN_ON(1);
574 return -EINVAL;
575 }
576 memset(ar->bmi.cmd_buf, 0, size);
577
578 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
579 len);
580
581 len_remain = len;
582 while (len_remain) {
583 tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
584 len_remain : (BMI_DATASZ_MAX - header);
585
586 offset = 0;
587 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
588 offset += sizeof(cid);
589 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
590 offset += sizeof(tx_len);
591 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
592 tx_len);
593 offset += tx_len;
594
595 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
596 if (ret) {
597 ath6kl_err("Unable to write to the device: %d\n",
598 ret);
599 return ret;
600 }
601
602 len_remain -= tx_len;
603 }
604
605 return 0;
606}
607
608int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
609{
610 u32 cid = BMI_LZ_STREAM_START;
611 int ret;
612 u32 offset;
613 u16 size;
614
615 if (ar->bmi.done_sent) {
616 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
617 return -EACCES;
618 }
619
620 size = sizeof(cid) + sizeof(addr);
621 if (size > MAX_BMI_CMDBUF_SZ) {
622 WARN_ON(1);
623 return -EINVAL;
624 }
625 memset(ar->bmi.cmd_buf, 0, size);
626
627 ath6kl_dbg(ATH6KL_DBG_BMI,
628 "bmi LZ stream start: addr: 0x%x)\n",
629 addr);
630
631 offset = 0;
632 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
633 offset += sizeof(cid);
634 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
635 offset += sizeof(addr);
636
637 ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
638 if (ret) {
639 ath6kl_err("Unable to start LZ stream to the device: %d\n",
640 ret);
641 return ret;
642 }
643
644 return 0;
645}
646
647int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
648{
649 int ret;
650 u32 last_word = 0;
651 u32 last_word_offset = len & ~0x3;
652 u32 unaligned_bytes = len & 0x3;
653
654 ret = ath6kl_bmi_lz_stream_start(ar, addr);
655 if (ret)
656 return ret;
657
658 if (unaligned_bytes) {
659 /* copy the last word into a zero padded buffer */
660 memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
661 }
662
663 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
664 if (ret)
665 return ret;
666
667 if (unaligned_bytes)
668 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
669
670 if (!ret) {
671 /* Close compressed stream and open a new (fake) one.
672 * This serves mainly to flush Target caches. */
673 ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
674 }
675 return ret;
676}
677
678int ath6kl_bmi_init(struct ath6kl *ar)
679{
680 ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
681
682 if (!ar->bmi.cmd_buf)
683 return -ENOMEM;
684
685 return 0;
686}
687
688void ath6kl_bmi_cleanup(struct ath6kl *ar)
689{
690 kfree(ar->bmi.cmd_buf);
691 ar->bmi.cmd_buf = NULL;
692}
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
new file mode 100644
index 000000000000..83546d76d979
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -0,0 +1,250 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef BMI_H
18#define BMI_H
19
20/*
21 * Bootloader Messaging Interface (BMI)
22 *
23 * BMI is a very simple messaging interface used during initialization
24 * to read memory, write memory, execute code, and to define an
25 * application entry PC.
26 *
27 * It is used to download an application to ATH6KL, to provide
28 * patches to code that is already resident on ATH6KL, and generally
29 * to examine and modify state. The Host has an opportunity to use
30 * BMI only once during bootup. Once the Host issues a BMI_DONE
31 * command, this opportunity ends.
32 *
33 * The Host writes BMI requests to mailbox0, and reads BMI responses
34 * from mailbox0. BMI requests all begin with a command
35 * (see below for specific commands), and are followed by
36 * command-specific data.
37 *
38 * Flow control:
39 * The Host can only issue a command once the Target gives it a
40 * "BMI Command Credit", using ATH6KL Counter #4. As soon as the
41 * Target has completed a command, it issues another BMI Command
42 * Credit (so the Host can issue the next command).
43 *
44 * BMI handles all required Target-side cache flushing.
45 */
46
47#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
48 (sizeof(u32) * 3 /* cmd + addr + len */))
49
50/* Maximum data size used for BMI transfers */
51#define BMI_DATASZ_MAX 256
52
53/* BMI Commands */
54
55#define BMI_NO_COMMAND 0
56
57#define BMI_DONE 1
58/*
59 * Semantics: Host is done using BMI
60 * Request format:
61 * u32 command (BMI_DONE)
62 * Response format: none
63 */
64
65#define BMI_READ_MEMORY 2
66/*
67 * Semantics: Host reads ATH6KL memory
68 * Request format:
69 * u32 command (BMI_READ_MEMORY)
70 * u32 address
71 * u32 length, at most BMI_DATASZ_MAX
72 * Response format:
73 * u8 data[length]
74 */
75
76#define BMI_WRITE_MEMORY 3
77/*
78 * Semantics: Host writes ATH6KL memory
79 * Request format:
80 * u32 command (BMI_WRITE_MEMORY)
81 * u32 address
82 * u32 length, at most BMI_DATASZ_MAX
83 * u8 data[length]
84 * Response format: none
85 */
86
87#define BMI_EXECUTE 4
88/*
89 * Semantics: Causes ATH6KL to execute code
90 * Request format:
91 * u32 command (BMI_EXECUTE)
92 * u32 address
93 * u32 parameter
94 * Response format:
95 * u32 return value
96 */
97
98#define BMI_SET_APP_START 5
99/*
100 * Semantics: Set Target application starting address
101 * Request format:
102 * u32 command (BMI_SET_APP_START)
103 * u32 address
104 * Response format: none
105 */
106
107#define BMI_READ_SOC_REGISTER 6
108/*
109 * Semantics: Read a 32-bit Target SOC register.
110 * Request format:
111 * u32 command (BMI_READ_REGISTER)
112 * u32 address
113 * Response format:
114 * u32 value
115 */
116
117#define BMI_WRITE_SOC_REGISTER 7
118/*
119 * Semantics: Write a 32-bit Target SOC register.
120 * Request format:
121 * u32 command (BMI_WRITE_REGISTER)
122 * u32 address
123 * u32 value
124 *
125 * Response format: none
126 */
127
128#define BMI_GET_TARGET_ID 8
129#define BMI_GET_TARGET_INFO 8
130/*
131 * Semantics: Fetch the 4-byte Target information
132 * Request format:
133 * u32 command (BMI_GET_TARGET_ID/INFO)
134 * Response format1 (old firmware):
135 * u32 TargetVersionID
136 * Response format2 (newer firmware):
137 * u32 TARGET_VERSION_SENTINAL
138 * struct bmi_target_info;
139 */
140
141#define TARGET_VERSION_SENTINAL 0xffffffff
142#define TARGET_TYPE_AR6003 3
143
144#define BMI_ROMPATCH_INSTALL 9
145/*
146 * Semantics: Install a ROM Patch.
147 * Request format:
148 * u32 command (BMI_ROMPATCH_INSTALL)
149 * u32 Target ROM Address
150 * u32 Target RAM Address or Value (depending on Target Type)
151 * u32 Size, in bytes
152 * u32 Activate? 1-->activate;
153 * 0-->install but do not activate
154 * Response format:
155 * u32 PatchID
156 */
157
158#define BMI_ROMPATCH_UNINSTALL 10
159/*
160 * Semantics: Uninstall a previously-installed ROM Patch,
161 * automatically deactivating, if necessary.
162 * Request format:
163 * u32 command (BMI_ROMPATCH_UNINSTALL)
164 * u32 PatchID
165 *
166 * Response format: none
167 */
168
169#define BMI_ROMPATCH_ACTIVATE 11
170/*
171 * Semantics: Activate a list of previously-installed ROM Patches.
172 * Request format:
173 * u32 command (BMI_ROMPATCH_ACTIVATE)
174 * u32 rompatch_count
175 * u32 PatchID[rompatch_count]
176 *
177 * Response format: none
178 */
179
180#define BMI_ROMPATCH_DEACTIVATE 12
181/*
182 * Semantics: Deactivate a list of active ROM Patches.
183 * Request format:
184 * u32 command (BMI_ROMPATCH_DEACTIVATE)
185 * u32 rompatch_count
186 * u32 PatchID[rompatch_count]
187 *
188 * Response format: none
189 */
190
191
192#define BMI_LZ_STREAM_START 13
193/*
194 * Semantics: Begin an LZ-compressed stream of input
195 * which is to be uncompressed by the Target to an
196 * output buffer at address. The output buffer must
197 * be sufficiently large to hold the uncompressed
198 * output from the compressed input stream. This BMI
199 * command should be followed by a series of 1 or more
200 * BMI_LZ_DATA commands.
201 * u32 command (BMI_LZ_STREAM_START)
202 * u32 address
203 * Note: Not supported on all versions of ROM firmware.
204 */
205
206#define BMI_LZ_DATA 14
207/*
208 * Semantics: Host writes ATH6KL memory with LZ-compressed
209 * data which is uncompressed by the Target. This command
210 * must be preceded by a BMI_LZ_STREAM_START command. A series
211 * of BMI_LZ_DATA commands are considered part of a single
212 * input stream until another BMI_LZ_STREAM_START is issued.
213 * Request format:
214 * u32 command (BMI_LZ_DATA)
215 * u32 length (of compressed data),
216 * at most BMI_DATASZ_MAX
217 * u8 CompressedData[length]
218 * Response format: none
219 * Note: Not supported on all versions of ROM firmware.
220 */
221
222#define BMI_COMMUNICATION_TIMEOUT 1000 /* in msec */
223
224struct ath6kl;
225struct ath6kl_bmi_target_info {
226 __le32 byte_count; /* size of this structure */
227 __le32 version; /* target version id */
228 __le32 type; /* target type */
229} __packed;
230
231int ath6kl_bmi_init(struct ath6kl *ar);
232void ath6kl_bmi_cleanup(struct ath6kl *ar);
233int ath6kl_bmi_done(struct ath6kl *ar);
234int ath6kl_bmi_get_target_info(struct ath6kl *ar,
235 struct ath6kl_bmi_target_info *targ_info);
236int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
237int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len);
238int ath6kl_bmi_execute(struct ath6kl *ar,
239 u32 addr, u32 *param);
240int ath6kl_bmi_set_app_start(struct ath6kl *ar,
241 u32 addr);
242int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param);
243int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param);
244int ath6kl_bmi_lz_data(struct ath6kl *ar,
245 u8 *buf, u32 len);
246int ath6kl_bmi_lz_stream_start(struct ath6kl *ar,
247 u32 addr);
248int ath6kl_bmi_fast_download(struct ath6kl *ar,
249 u32 addr, u8 *buf, u32 len);
250#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
new file mode 100644
index 000000000000..4284a41ff775
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -0,0 +1,1538 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "cfg80211.h"
19#include "debug.h"
20
21#define RATETAB_ENT(_rate, _rateid, _flags) { \
22 .bitrate = (_rate), \
23 .flags = (_flags), \
24 .hw_value = (_rateid), \
25}
26
27#define CHAN2G(_channel, _freq, _flags) { \
28 .band = IEEE80211_BAND_2GHZ, \
29 .hw_value = (_channel), \
30 .center_freq = (_freq), \
31 .flags = (_flags), \
32 .max_antenna_gain = 0, \
33 .max_power = 30, \
34}
35
36#define CHAN5G(_channel, _flags) { \
37 .band = IEEE80211_BAND_5GHZ, \
38 .hw_value = (_channel), \
39 .center_freq = 5000 + (5 * (_channel)), \
40 .flags = (_flags), \
41 .max_antenna_gain = 0, \
42 .max_power = 30, \
43}
44
45static struct ieee80211_rate ath6kl_rates[] = {
46 RATETAB_ENT(10, 0x1, 0),
47 RATETAB_ENT(20, 0x2, 0),
48 RATETAB_ENT(55, 0x4, 0),
49 RATETAB_ENT(110, 0x8, 0),
50 RATETAB_ENT(60, 0x10, 0),
51 RATETAB_ENT(90, 0x20, 0),
52 RATETAB_ENT(120, 0x40, 0),
53 RATETAB_ENT(180, 0x80, 0),
54 RATETAB_ENT(240, 0x100, 0),
55 RATETAB_ENT(360, 0x200, 0),
56 RATETAB_ENT(480, 0x400, 0),
57 RATETAB_ENT(540, 0x800, 0),
58};
59
60#define ath6kl_a_rates (ath6kl_rates + 4)
61#define ath6kl_a_rates_size 8
62#define ath6kl_g_rates (ath6kl_rates + 0)
63#define ath6kl_g_rates_size 12
64
65static struct ieee80211_channel ath6kl_2ghz_channels[] = {
66 CHAN2G(1, 2412, 0),
67 CHAN2G(2, 2417, 0),
68 CHAN2G(3, 2422, 0),
69 CHAN2G(4, 2427, 0),
70 CHAN2G(5, 2432, 0),
71 CHAN2G(6, 2437, 0),
72 CHAN2G(7, 2442, 0),
73 CHAN2G(8, 2447, 0),
74 CHAN2G(9, 2452, 0),
75 CHAN2G(10, 2457, 0),
76 CHAN2G(11, 2462, 0),
77 CHAN2G(12, 2467, 0),
78 CHAN2G(13, 2472, 0),
79 CHAN2G(14, 2484, 0),
80};
81
82static struct ieee80211_channel ath6kl_5ghz_a_channels[] = {
83 CHAN5G(34, 0), CHAN5G(36, 0),
84 CHAN5G(38, 0), CHAN5G(40, 0),
85 CHAN5G(42, 0), CHAN5G(44, 0),
86 CHAN5G(46, 0), CHAN5G(48, 0),
87 CHAN5G(52, 0), CHAN5G(56, 0),
88 CHAN5G(60, 0), CHAN5G(64, 0),
89 CHAN5G(100, 0), CHAN5G(104, 0),
90 CHAN5G(108, 0), CHAN5G(112, 0),
91 CHAN5G(116, 0), CHAN5G(120, 0),
92 CHAN5G(124, 0), CHAN5G(128, 0),
93 CHAN5G(132, 0), CHAN5G(136, 0),
94 CHAN5G(140, 0), CHAN5G(149, 0),
95 CHAN5G(153, 0), CHAN5G(157, 0),
96 CHAN5G(161, 0), CHAN5G(165, 0),
97 CHAN5G(184, 0), CHAN5G(188, 0),
98 CHAN5G(192, 0), CHAN5G(196, 0),
99 CHAN5G(200, 0), CHAN5G(204, 0),
100 CHAN5G(208, 0), CHAN5G(212, 0),
101 CHAN5G(216, 0),
102};
103
104static struct ieee80211_supported_band ath6kl_band_2ghz = {
105 .n_channels = ARRAY_SIZE(ath6kl_2ghz_channels),
106 .channels = ath6kl_2ghz_channels,
107 .n_bitrates = ath6kl_g_rates_size,
108 .bitrates = ath6kl_g_rates,
109};
110
111static struct ieee80211_supported_band ath6kl_band_5ghz = {
112 .n_channels = ARRAY_SIZE(ath6kl_5ghz_a_channels),
113 .channels = ath6kl_5ghz_a_channels,
114 .n_bitrates = ath6kl_a_rates_size,
115 .bitrates = ath6kl_a_rates,
116};
117
118static int ath6kl_set_wpa_version(struct ath6kl *ar,
119 enum nl80211_wpa_versions wpa_version)
120{
121 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
122
123 if (!wpa_version) {
124 ar->auth_mode = NONE_AUTH;
125 } else if (wpa_version & NL80211_WPA_VERSION_2) {
126 ar->auth_mode = WPA2_AUTH;
127 } else if (wpa_version & NL80211_WPA_VERSION_1) {
128 ar->auth_mode = WPA_AUTH;
129 } else {
130 ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
131 return -ENOTSUPP;
132 }
133
134 return 0;
135}
136
137static int ath6kl_set_auth_type(struct ath6kl *ar,
138 enum nl80211_auth_type auth_type)
139{
140
141 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
142
143 switch (auth_type) {
144 case NL80211_AUTHTYPE_OPEN_SYSTEM:
145 ar->dot11_auth_mode = OPEN_AUTH;
146 break;
147 case NL80211_AUTHTYPE_SHARED_KEY:
148 ar->dot11_auth_mode = SHARED_AUTH;
149 break;
150 case NL80211_AUTHTYPE_NETWORK_EAP:
151 ar->dot11_auth_mode = LEAP_AUTH;
152 break;
153
154 case NL80211_AUTHTYPE_AUTOMATIC:
155 ar->dot11_auth_mode = OPEN_AUTH;
156 ar->auto_auth_stage = AUTH_OPEN_IN_PROGRESS;
157 break;
158
159 default:
160 ath6kl_err("%s: 0x%x not spported\n", __func__, auth_type);
161 return -ENOTSUPP;
162 }
163
164 return 0;
165}
166
167static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
168{
169 u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
170 u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len : &ar->grp_crpto_len;
171
172 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
173 __func__, cipher, ucast);
174
175 switch (cipher) {
176 case 0:
177 /* our own hack to use value 0 as no crypto used */
178 *ar_cipher = NONE_CRYPT;
179 *ar_cipher_len = 0;
180 break;
181 case WLAN_CIPHER_SUITE_WEP40:
182 *ar_cipher = WEP_CRYPT;
183 *ar_cipher_len = 5;
184 break;
185 case WLAN_CIPHER_SUITE_WEP104:
186 *ar_cipher = WEP_CRYPT;
187 *ar_cipher_len = 13;
188 break;
189 case WLAN_CIPHER_SUITE_TKIP:
190 *ar_cipher = TKIP_CRYPT;
191 *ar_cipher_len = 0;
192 break;
193 case WLAN_CIPHER_SUITE_CCMP:
194 *ar_cipher = AES_CRYPT;
195 *ar_cipher_len = 0;
196 break;
197 default:
198 ath6kl_err("cipher 0x%x not supported\n", cipher);
199 return -ENOTSUPP;
200 }
201
202 return 0;
203}
204
205static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
206{
207 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
208
209 if (key_mgmt == WLAN_AKM_SUITE_PSK) {
210 if (ar->auth_mode == WPA_AUTH)
211 ar->auth_mode = WPA_PSK_AUTH;
212 else if (ar->auth_mode == WPA2_AUTH)
213 ar->auth_mode = WPA2_PSK_AUTH;
214 } else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
215 ar->auth_mode = NONE_AUTH;
216 }
217}
218
219static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
220{
221 if (!test_bit(WMI_READY, &ar->flag)) {
222 ath6kl_err("wmi is not ready\n");
223 return false;
224 }
225
226 if (ar->wlan_state == WLAN_DISABLED) {
227 ath6kl_err("wlan disabled\n");
228 return false;
229 }
230
231 return true;
232}
233
234static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
235 struct cfg80211_connect_params *sme)
236{
237 struct ath6kl *ar = ath6kl_priv(dev);
238 int status;
239
240 ar->sme_state = SME_CONNECTING;
241
242 if (!ath6kl_cfg80211_ready(ar))
243 return -EIO;
244
245 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
246 ath6kl_err("destroy in progress\n");
247 return -EBUSY;
248 }
249
250 if (test_bit(SKIP_SCAN, &ar->flag) &&
251 ((sme->channel && sme->channel->center_freq == 0) ||
252 (sme->bssid && is_zero_ether_addr(sme->bssid)))) {
253 ath6kl_err("SkipScan: channel or bssid invalid\n");
254 return -EINVAL;
255 }
256
257 if (down_interruptible(&ar->sem)) {
258 ath6kl_err("busy, couldn't get access\n");
259 return -ERESTARTSYS;
260 }
261
262 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
263 ath6kl_err("busy, destroy in progress\n");
264 up(&ar->sem);
265 return -EBUSY;
266 }
267
268 if (ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)]) {
269 /*
270 * sleep until the command queue drains
271 */
272 wait_event_interruptible_timeout(ar->event_wq,
273 ar->tx_pending[ath6kl_wmi_get_control_ep(ar->wmi)] == 0,
274 WMI_TIMEOUT);
275 if (signal_pending(current)) {
276 ath6kl_err("cmd queue drain timeout\n");
277 up(&ar->sem);
278 return -EINTR;
279 }
280 }
281
282 if (test_bit(CONNECTED, &ar->flag) &&
283 ar->ssid_len == sme->ssid_len &&
284 !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
285 ar->reconnect_flag = true;
286 status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
287 ar->ch_hint);
288
289 up(&ar->sem);
290 if (status) {
291 ath6kl_err("wmi_reconnect_cmd failed\n");
292 return -EIO;
293 }
294 return 0;
295 } else if (ar->ssid_len == sme->ssid_len &&
296 !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
297 ath6kl_disconnect(ar);
298 }
299
300 memset(ar->ssid, 0, sizeof(ar->ssid));
301 ar->ssid_len = sme->ssid_len;
302 memcpy(ar->ssid, sme->ssid, sme->ssid_len);
303
304 if (sme->channel)
305 ar->ch_hint = sme->channel->center_freq;
306
307 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
308 if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
309 memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
310
311 ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
312
313 status = ath6kl_set_auth_type(ar, sme->auth_type);
314 if (status) {
315 up(&ar->sem);
316 return status;
317 }
318
319 if (sme->crypto.n_ciphers_pairwise)
320 ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
321 else
322 ath6kl_set_cipher(ar, 0, true);
323
324 ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
325
326 if (sme->crypto.n_akm_suites)
327 ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
328
329 if ((sme->key_len) &&
330 (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
331 struct ath6kl_key *key = NULL;
332
333 if (sme->key_idx < WMI_MIN_KEY_INDEX ||
334 sme->key_idx > WMI_MAX_KEY_INDEX) {
335 ath6kl_err("key index %d out of bounds\n",
336 sme->key_idx);
337 up(&ar->sem);
338 return -ENOENT;
339 }
340
341 key = &ar->keys[sme->key_idx];
342 key->key_len = sme->key_len;
343 memcpy(key->key, sme->key, key->key_len);
344 key->cipher = ar->prwise_crypto;
345 ar->def_txkey_index = sme->key_idx;
346
347 ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
348 ar->prwise_crypto,
349 GROUP_USAGE | TX_USAGE,
350 key->key_len,
351 NULL,
352 key->key, KEY_OP_INIT_VAL, NULL,
353 NO_SYNC_WMIFLAG);
354 }
355
356 if (!ar->usr_bss_filter) {
357 if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
358 ath6kl_err("couldn't set bss filtering\n");
359 up(&ar->sem);
360 return -EIO;
361 }
362 }
363
364 ar->nw_type = ar->next_mode;
365
366 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
367 "%s: connect called with authmode %d dot11 auth %d"
368 " PW crypto %d PW crypto len %d GRP crypto %d"
369 " GRP crypto len %d channel hint %u\n",
370 __func__,
371 ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
372 ar->prwise_crypto_len, ar->grp_crypto,
373 ar->grp_crpto_len, ar->ch_hint);
374
375 ar->reconnect_flag = 0;
376 status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
377 ar->dot11_auth_mode, ar->auth_mode,
378 ar->prwise_crypto,
379 ar->prwise_crypto_len,
380 ar->grp_crypto, ar->grp_crpto_len,
381 ar->ssid_len, ar->ssid,
382 ar->req_bssid, ar->ch_hint,
383 ar->connect_ctrl_flags);
384
385 up(&ar->sem);
386
387 if (status == -EINVAL) {
388 memset(ar->ssid, 0, sizeof(ar->ssid));
389 ar->ssid_len = 0;
390 ath6kl_err("invalid request\n");
391 return -ENOENT;
392 } else if (status) {
393 ath6kl_err("ath6kl_wmi_connect_cmd failed\n");
394 return -EIO;
395 }
396
397 if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
398 ((ar->auth_mode == WPA_PSK_AUTH)
399 || (ar->auth_mode == WPA2_PSK_AUTH))) {
400 mod_timer(&ar->disconnect_timer,
401 jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
402 }
403
404 ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
405 set_bit(CONNECT_PEND, &ar->flag);
406
407 return 0;
408}
409
410void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
411 u8 *bssid, u16 listen_intvl,
412 u16 beacon_intvl,
413 enum network_type nw_type,
414 u8 beacon_ie_len, u8 assoc_req_len,
415 u8 assoc_resp_len, u8 *assoc_info)
416{
417 u16 size = 0;
418 u16 capability = 0;
419 struct cfg80211_bss *bss = NULL;
420 struct ieee80211_mgmt *mgmt = NULL;
421 struct ieee80211_channel *ibss_ch = NULL;
422 s32 signal = 50 * 100;
423 u8 ie_buf_len = 0;
424 unsigned char ie_buf[256];
425 unsigned char *ptr_ie_buf = ie_buf;
426 unsigned char *ieeemgmtbuf = NULL;
427 u8 source_mac[ETH_ALEN];
428 u16 capa_mask;
429 u16 capa_val;
430
431 /* capinfo + listen interval */
432 u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
433
434 /* capinfo + status code + associd */
435 u8 assoc_resp_ie_offset = sizeof(u16) + sizeof(u16) + sizeof(u16);
436
437 u8 *assoc_req_ie = assoc_info + beacon_ie_len + assoc_req_ie_offset;
438 u8 *assoc_resp_ie = assoc_info + beacon_ie_len + assoc_req_len +
439 assoc_resp_ie_offset;
440
441 assoc_req_len -= assoc_req_ie_offset;
442 assoc_resp_len -= assoc_resp_ie_offset;
443
444 ar->auto_auth_stage = AUTH_IDLE;
445
446 if (nw_type & ADHOC_NETWORK) {
447 if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
448 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
449 "%s: ath6k not in ibss mode\n", __func__);
450 return;
451 }
452 }
453
454 if (nw_type & INFRA_NETWORK) {
455 if (ar->wdev->iftype != NL80211_IFTYPE_STATION) {
456 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
457 "%s: ath6k not in station mode\n", __func__);
458 return;
459 }
460 }
461
462 if (nw_type & ADHOC_NETWORK) {
463 capa_mask = WLAN_CAPABILITY_IBSS;
464 capa_val = WLAN_CAPABILITY_IBSS;
465 } else {
466 capa_mask = WLAN_CAPABILITY_ESS;
467 capa_val = WLAN_CAPABILITY_ESS;
468 }
469
470 /* Before informing the join/connect event, make sure that
471 * bss entry is present in scan list, if it not present
472 * construct and insert into scan list, otherwise that
473 * event will be dropped on the way by cfg80211, due to
474 * this keys will not be plumbed in case of WEP and
475 * application will not be aware of join/connect status. */
476 bss = cfg80211_get_bss(ar->wdev->wiphy, NULL, bssid,
477 ar->wdev->ssid, ar->wdev->ssid_len,
478 capa_mask, capa_val);
479
480 /*
481 * Earlier we were updating the cfg about bss by making a beacon frame
482 * only if the entry for bss is not there. This can have some issue if
483 * ROAM event is generated and a heavy traffic is ongoing. The ROAM
484 * event is handled through a work queue and by the time it really gets
485 * handled, BSS would have been aged out. So it is better to update the
486 * cfg about BSS irrespective of its entry being present right now or
487 * not.
488 */
489
490 if (nw_type & ADHOC_NETWORK) {
491 /* construct 802.11 mgmt beacon */
492 if (ptr_ie_buf) {
493 *ptr_ie_buf++ = WLAN_EID_SSID;
494 *ptr_ie_buf++ = ar->ssid_len;
495 memcpy(ptr_ie_buf, ar->ssid, ar->ssid_len);
496 ptr_ie_buf += ar->ssid_len;
497
498 *ptr_ie_buf++ = WLAN_EID_IBSS_PARAMS;
499 *ptr_ie_buf++ = 2; /* length */
500 *ptr_ie_buf++ = 0; /* ATIM window */
501 *ptr_ie_buf++ = 0; /* ATIM window */
502
503 /* TODO: update ibss params and include supported rates,
504 * DS param set, extened support rates, wmm. */
505
506 ie_buf_len = ptr_ie_buf - ie_buf;
507 }
508
509 capability |= WLAN_CAPABILITY_IBSS;
510
511 if (ar->prwise_crypto == WEP_CRYPT)
512 capability |= WLAN_CAPABILITY_PRIVACY;
513
514 memcpy(source_mac, ar->net_dev->dev_addr, ETH_ALEN);
515 ptr_ie_buf = ie_buf;
516 } else {
517 capability = *(u16 *) (&assoc_info[beacon_ie_len]);
518 memcpy(source_mac, bssid, ETH_ALEN);
519 ptr_ie_buf = assoc_req_ie;
520 ie_buf_len = assoc_req_len;
521 }
522
523 size = offsetof(struct ieee80211_mgmt, u)
524 + sizeof(mgmt->u.beacon)
525 + ie_buf_len;
526
527 ieeemgmtbuf = kzalloc(size, GFP_ATOMIC);
528 if (!ieeemgmtbuf) {
529 ath6kl_err("ieee mgmt buf alloc error\n");
530 cfg80211_put_bss(bss);
531 return;
532 }
533
534 mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
535 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
536 IEEE80211_STYPE_BEACON);
537 memset(mgmt->da, 0xff, ETH_ALEN); /* broadcast addr */
538 memcpy(mgmt->sa, source_mac, ETH_ALEN);
539 memcpy(mgmt->bssid, bssid, ETH_ALEN);
540 mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_intvl);
541 mgmt->u.beacon.capab_info = cpu_to_le16(capability);
542 memcpy(mgmt->u.beacon.variable, ptr_ie_buf, ie_buf_len);
543
544 ibss_ch = ieee80211_get_channel(ar->wdev->wiphy, (int)channel);
545
546 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
547 "%s: inform bss with bssid %pM channel %d beacon_intvl %d capability 0x%x\n",
548 __func__, mgmt->bssid, ibss_ch->hw_value,
549 beacon_intvl, capability);
550
551 bss = cfg80211_inform_bss_frame(ar->wdev->wiphy,
552 ibss_ch, mgmt,
553 size, signal, GFP_KERNEL);
554 kfree(ieeemgmtbuf);
555 cfg80211_put_bss(bss);
556
557 if (nw_type & ADHOC_NETWORK) {
558 cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
559 return;
560 }
561
562 if (!test_bit(CONNECTED, &ar->flag)) {
563 /* inform connect result to cfg80211 */
564 ar->sme_state = SME_DISCONNECTED;
565 cfg80211_connect_result(ar->net_dev, bssid,
566 assoc_req_ie, assoc_req_len,
567 assoc_resp_ie, assoc_resp_len,
568 WLAN_STATUS_SUCCESS, GFP_KERNEL);
569 } else {
570 /* inform roam event to cfg80211 */
571 cfg80211_roamed(ar->net_dev, ibss_ch, bssid,
572 assoc_req_ie, assoc_req_len,
573 assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
574 }
575}
576
577static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
578 struct net_device *dev, u16 reason_code)
579{
580 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
581
582 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
583 reason_code);
584
585 if (!ath6kl_cfg80211_ready(ar))
586 return -EIO;
587
588 if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
589 ath6kl_err("busy, destroy in progress\n");
590 return -EBUSY;
591 }
592
593 if (down_interruptible(&ar->sem)) {
594 ath6kl_err("busy, couldn't get access\n");
595 return -ERESTARTSYS;
596 }
597
598 ar->reconnect_flag = 0;
599 ath6kl_disconnect(ar);
600 memset(ar->ssid, 0, sizeof(ar->ssid));
601 ar->ssid_len = 0;
602
603 if (!test_bit(SKIP_SCAN, &ar->flag))
604 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
605
606 up(&ar->sem);
607
608 return 0;
609}
610
611void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
612 u8 *bssid, u8 assoc_resp_len,
613 u8 *assoc_info, u16 proto_reason)
614{
615 struct ath6kl_key *key = NULL;
616 u16 status;
617
618 if (ar->scan_req) {
619 cfg80211_scan_done(ar->scan_req, true);
620 ar->scan_req = NULL;
621 }
622
623 if (ar->nw_type & ADHOC_NETWORK) {
624 if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
625 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
626 "%s: ath6k not in ibss mode\n", __func__);
627 return;
628 }
629 memset(bssid, 0, ETH_ALEN);
630 cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
631 return;
632 }
633
634 if (ar->nw_type & INFRA_NETWORK) {
635 if (ar->wdev->iftype != NL80211_IFTYPE_STATION) {
636 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
637 "%s: ath6k not in station mode\n", __func__);
638 return;
639 }
640 }
641
642 if (!test_bit(CONNECT_PEND, &ar->flag)) {
643 if (reason != DISCONNECT_CMD)
644 ath6kl_wmi_disconnect_cmd(ar->wmi);
645
646 return;
647 }
648
649 if (reason == NO_NETWORK_AVAIL) {
650 /* connect cmd failed */
651 ath6kl_wmi_disconnect_cmd(ar->wmi);
652 return;
653 }
654
655 if (reason != DISCONNECT_CMD)
656 return;
657
658 if (!ar->auto_auth_stage) {
659 clear_bit(CONNECT_PEND, &ar->flag);
660
661 if (ar->sme_state == SME_CONNECTING) {
662 cfg80211_connect_result(ar->net_dev,
663 bssid, NULL, 0,
664 NULL, 0,
665 WLAN_STATUS_UNSPECIFIED_FAILURE,
666 GFP_KERNEL);
667 } else {
668 cfg80211_disconnected(ar->net_dev, reason,
669 NULL, 0, GFP_KERNEL);
670 }
671
672 ar->sme_state = SME_DISCONNECTED;
673 return;
674 }
675
676 if (ar->dot11_auth_mode != OPEN_AUTH)
677 return;
678
679 /*
680 * If the current auth algorithm is open, try shared and
681 * make autoAuthStage idle. We do not make it leap for now
682 * being.
683 */
684 key = &ar->keys[ar->def_txkey_index];
685 if (down_interruptible(&ar->sem)) {
686 ath6kl_err("busy, couldn't get access\n");
687 return;
688 }
689
690 ar->dot11_auth_mode = SHARED_AUTH;
691 ar->auto_auth_stage = AUTH_IDLE;
692
693 ath6kl_wmi_addkey_cmd(ar->wmi,
694 ar->def_txkey_index,
695 ar->prwise_crypto,
696 GROUP_USAGE | TX_USAGE,
697 key->key_len, NULL,
698 key->key,
699 KEY_OP_INIT_VAL, NULL,
700 NO_SYNC_WMIFLAG);
701
702 status = ath6kl_wmi_connect_cmd(ar->wmi,
703 ar->nw_type,
704 ar->dot11_auth_mode,
705 ar->auth_mode,
706 ar->prwise_crypto,
707 ar->prwise_crypto_len,
708 ar->grp_crypto,
709 ar->grp_crpto_len,
710 ar->ssid_len,
711 ar->ssid,
712 ar->req_bssid,
713 ar->ch_hint,
714 ar->connect_ctrl_flags);
715 up(&ar->sem);
716}
717
718static inline bool is_ch_11a(u16 ch)
719{
720 return (!((ch >= 2412) && (ch <= 2484)));
721}
722
723static void ath6kl_cfg80211_scan_node(void *arg, struct bss *ni)
724{
725 struct wiphy *wiphy = (struct wiphy *)arg;
726 u16 size;
727 unsigned char *ieeemgmtbuf = NULL;
728 struct ieee80211_mgmt *mgmt;
729 struct ieee80211_channel *channel;
730 struct ieee80211_supported_band *band;
731 struct ath6kl_common_ie *cie;
732 s32 signal;
733 int freq;
734
735 cie = &ni->ni_cie;
736
737 if (is_ch_11a(cie->ie_chan))
738 band = wiphy->bands[IEEE80211_BAND_5GHZ]; /* 11a */
739 else if ((cie->ie_erp) || (cie->ie_xrates))
740 band = wiphy->bands[IEEE80211_BAND_2GHZ]; /* 11g */
741 else
742 band = wiphy->bands[IEEE80211_BAND_2GHZ]; /* 11b */
743
744 size = ni->ni_framelen + offsetof(struct ieee80211_mgmt, u);
745 ieeemgmtbuf = kmalloc(size, GFP_ATOMIC);
746 if (!ieeemgmtbuf) {
747 ath6kl_err("ieee mgmt buf alloc error\n");
748 return;
749 }
750
751 /*
752 * TODO: Update target to include 802.11 mac header while sending
753 * bss info. Target removes 802.11 mac header while sending the bss
754 * info to host, cfg80211 needs it, for time being just filling the
755 * da, sa and bssid fields alone.
756 */
757 mgmt = (struct ieee80211_mgmt *)ieeemgmtbuf;
758 memset(mgmt->da, 0xff, ETH_ALEN); /*broadcast addr */
759 memcpy(mgmt->sa, ni->ni_macaddr, ETH_ALEN);
760 memcpy(mgmt->bssid, ni->ni_macaddr, ETH_ALEN);
761 memcpy(ieeemgmtbuf + offsetof(struct ieee80211_mgmt, u),
762 ni->ni_buf, ni->ni_framelen);
763
764 freq = cie->ie_chan;
765 channel = ieee80211_get_channel(wiphy, freq);
766 signal = ni->ni_snr * 100;
767
768 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
769 "%s: bssid %pM ch %d freq %d size %d\n", __func__,
770 mgmt->bssid, channel->hw_value, freq, size);
771 cfg80211_inform_bss_frame(wiphy, channel, mgmt,
772 size, signal, GFP_KERNEL);
773
774 kfree(ieeemgmtbuf);
775}
776
777static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
778 struct cfg80211_scan_request *request)
779{
780 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
781 int ret = 0;
782 u32 force_fg_scan = 0;
783
784 if (!ath6kl_cfg80211_ready(ar))
785 return -EIO;
786
787 if (!ar->usr_bss_filter) {
788 if (ath6kl_wmi_bssfilter_cmd(ar->wmi,
789 (test_bit(CONNECTED, &ar->flag) ?
790 ALL_BUT_BSS_FILTER :
791 ALL_BSS_FILTER), 0) != 0) {
792 ath6kl_err("couldn't set bss filtering\n");
793 return -EIO;
794 }
795 }
796
797 if (request->n_ssids && request->ssids[0].ssid_len) {
798 u8 i;
799
800 if (request->n_ssids > (MAX_PROBED_SSID_INDEX - 1))
801 request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
802
803 for (i = 0; i < request->n_ssids; i++)
804 ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
805 SPECIFIC_SSID_FLAG,
806 request->ssids[i].ssid_len,
807 request->ssids[i].ssid);
808 }
809
810 if (test_bit(CONNECTED, &ar->flag))
811 force_fg_scan = 1;
812
813 if (ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, force_fg_scan,
814 false, 0, 0, 0, NULL) != 0) {
815 ath6kl_err("wmi_startscan_cmd failed\n");
816 ret = -EIO;
817 }
818
819 ar->scan_req = request;
820
821 return ret;
822}
823
824void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
825{
826
827 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
828
829 if (ar->scan_req) {
830 /* Translate data to cfg80211 mgmt format */
831 ath6kl_wmi_iterate_nodes(ar->wmi, ath6kl_cfg80211_scan_node,
832 ar->wdev->wiphy);
833
834 cfg80211_scan_done(ar->scan_req, ((status & -ECANCELED)
835 || (status & -EBUSY)) ? true :
836 false);
837
838 if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
839 u8 i;
840
841 for (i = 0; i < ar->scan_req->n_ssids; i++) {
842 ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
843 DISABLE_SSID_FLAG,
844 0, NULL);
845 }
846 }
847 ar->scan_req = NULL;
848 }
849}
850
851static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
852 u8 key_index, bool pairwise,
853 const u8 *mac_addr,
854 struct key_params *params)
855{
856 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
857 struct ath6kl_key *key = NULL;
858 u8 key_usage;
859 u8 key_type;
860 int status = 0;
861
862 if (!ath6kl_cfg80211_ready(ar))
863 return -EIO;
864
865 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
866 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
867 "%s: key index %d out of bounds\n", __func__,
868 key_index);
869 return -ENOENT;
870 }
871
872 key = &ar->keys[key_index];
873 memset(key, 0, sizeof(struct ath6kl_key));
874
875 if (pairwise)
876 key_usage = PAIRWISE_USAGE;
877 else
878 key_usage = GROUP_USAGE;
879
880 if (params) {
881 if (params->key_len > WLAN_MAX_KEY_LEN ||
882 params->seq_len > sizeof(key->seq))
883 return -EINVAL;
884
885 key->key_len = params->key_len;
886 memcpy(key->key, params->key, key->key_len);
887 key->seq_len = params->seq_len;
888 memcpy(key->seq, params->seq, key->seq_len);
889 key->cipher = params->cipher;
890 }
891
892 switch (key->cipher) {
893 case WLAN_CIPHER_SUITE_WEP40:
894 case WLAN_CIPHER_SUITE_WEP104:
895 key_type = WEP_CRYPT;
896 break;
897
898 case WLAN_CIPHER_SUITE_TKIP:
899 key_type = TKIP_CRYPT;
900 break;
901
902 case WLAN_CIPHER_SUITE_CCMP:
903 key_type = AES_CRYPT;
904 break;
905
906 default:
907 return -ENOTSUPP;
908 }
909
910 if (((ar->auth_mode == WPA_PSK_AUTH)
911 || (ar->auth_mode == WPA2_PSK_AUTH))
912 && (key_usage & GROUP_USAGE))
913 del_timer(&ar->disconnect_timer);
914
915 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
916 "%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
917 __func__, key_index, key->key_len, key_type,
918 key_usage, key->seq_len);
919
920 ar->def_txkey_index = key_index;
921 status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
922 key_type, key_usage, key->key_len,
923 key->seq, key->key, KEY_OP_INIT_VAL,
924 (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
925
926 if (status)
927 return -EIO;
928
929 return 0;
930}
931
932static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
933 u8 key_index, bool pairwise,
934 const u8 *mac_addr)
935{
936 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
937
938 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
939
940 if (!ath6kl_cfg80211_ready(ar))
941 return -EIO;
942
943 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
944 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
945 "%s: key index %d out of bounds\n", __func__,
946 key_index);
947 return -ENOENT;
948 }
949
950 if (!ar->keys[key_index].key_len) {
951 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
952 "%s: index %d is empty\n", __func__, key_index);
953 return 0;
954 }
955
956 ar->keys[key_index].key_len = 0;
957
958 return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
959}
960
961static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
962 u8 key_index, bool pairwise,
963 const u8 *mac_addr, void *cookie,
964 void (*callback) (void *cookie,
965 struct key_params *))
966{
967 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
968 struct ath6kl_key *key = NULL;
969 struct key_params params;
970
971 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
972
973 if (!ath6kl_cfg80211_ready(ar))
974 return -EIO;
975
976 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
977 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
978 "%s: key index %d out of bounds\n", __func__,
979 key_index);
980 return -ENOENT;
981 }
982
983 key = &ar->keys[key_index];
984 memset(&params, 0, sizeof(params));
985 params.cipher = key->cipher;
986 params.key_len = key->key_len;
987 params.seq_len = key->seq_len;
988 params.seq = key->seq;
989 params.key = key->key;
990
991 callback(cookie, &params);
992
993 return key->key_len ? 0 : -ENOENT;
994}
995
996static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
997 struct net_device *ndev,
998 u8 key_index, bool unicast,
999 bool multicast)
1000{
1001 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
1002 struct ath6kl_key *key = NULL;
1003 int status = 0;
1004 u8 key_usage;
1005
1006 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
1007
1008 if (!ath6kl_cfg80211_ready(ar))
1009 return -EIO;
1010
1011 if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
1012 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1013 "%s: key index %d out of bounds\n",
1014 __func__, key_index);
1015 return -ENOENT;
1016 }
1017
1018 if (!ar->keys[key_index].key_len) {
1019 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
1020 __func__, key_index);
1021 return -EINVAL;
1022 }
1023
1024 ar->def_txkey_index = key_index;
1025 key = &ar->keys[ar->def_txkey_index];
1026 key_usage = GROUP_USAGE;
1027 if (ar->prwise_crypto == WEP_CRYPT)
1028 key_usage |= TX_USAGE;
1029
1030 status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
1031 ar->prwise_crypto, key_usage,
1032 key->key_len, key->seq, key->key,
1033 KEY_OP_INIT_VAL, NULL,
1034 SYNC_BOTH_WMIFLAG);
1035 if (status)
1036 return -EIO;
1037
1038 return 0;
1039}
1040
1041void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
1042 bool ismcast)
1043{
1044 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1045 "%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
1046
1047 cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
1048 (ismcast ? NL80211_KEYTYPE_GROUP :
1049 NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
1050 GFP_KERNEL);
1051}
1052
1053static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1054{
1055 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1056 int ret;
1057
1058 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
1059 changed);
1060
1061 if (!ath6kl_cfg80211_ready(ar))
1062 return -EIO;
1063
1064 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
1065 ret = ath6kl_wmi_set_rts_cmd(ar->wmi, wiphy->rts_threshold);
1066 if (ret != 0) {
1067 ath6kl_err("ath6kl_wmi_set_rts_cmd failed\n");
1068 return -EIO;
1069 }
1070 }
1071
1072 return 0;
1073}
1074
1075/*
1076 * The type nl80211_tx_power_setting replaces the following
1077 * data type from 2.6.36 onwards
1078*/
1079static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1080 enum nl80211_tx_power_setting type,
1081 int dbm)
1082{
1083 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1084 u8 ath6kl_dbm;
1085
1086 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
1087 type, dbm);
1088
1089 if (!ath6kl_cfg80211_ready(ar))
1090 return -EIO;
1091
1092 switch (type) {
1093 case NL80211_TX_POWER_AUTOMATIC:
1094 return 0;
1095 case NL80211_TX_POWER_LIMITED:
1096 ar->tx_pwr = ath6kl_dbm = dbm;
1097 break;
1098 default:
1099 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x not supported\n",
1100 __func__, type);
1101 return -EOPNOTSUPP;
1102 }
1103
1104 ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
1105
1106 return 0;
1107}
1108
1109static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
1110{
1111 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1112
1113 if (!ath6kl_cfg80211_ready(ar))
1114 return -EIO;
1115
1116 if (test_bit(CONNECTED, &ar->flag)) {
1117 ar->tx_pwr = 0;
1118
1119 if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
1120 ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
1121 return -EIO;
1122 }
1123
1124 wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
1125 5 * HZ);
1126
1127 if (signal_pending(current)) {
1128 ath6kl_err("target did not respond\n");
1129 return -EINTR;
1130 }
1131 }
1132
1133 *dbm = ar->tx_pwr;
1134 return 0;
1135}
1136
1137static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
1138 struct net_device *dev,
1139 bool pmgmt, int timeout)
1140{
1141 struct ath6kl *ar = ath6kl_priv(dev);
1142 struct wmi_power_mode_cmd mode;
1143
1144 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
1145 __func__, pmgmt, timeout);
1146
1147 if (!ath6kl_cfg80211_ready(ar))
1148 return -EIO;
1149
1150 if (pmgmt) {
1151 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: max perf\n", __func__);
1152 mode.pwr_mode = REC_POWER;
1153 } else {
1154 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: rec power\n", __func__);
1155 mode.pwr_mode = MAX_PERF_POWER;
1156 }
1157
1158 if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
1159 ath6kl_err("wmi_powermode_cmd failed\n");
1160 return -EIO;
1161 }
1162
1163 return 0;
1164}
1165
1166static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1167 struct net_device *ndev,
1168 enum nl80211_iftype type, u32 *flags,
1169 struct vif_params *params)
1170{
1171 struct ath6kl *ar = ath6kl_priv(ndev);
1172 struct wireless_dev *wdev = ar->wdev;
1173
1174 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
1175
1176 if (!ath6kl_cfg80211_ready(ar))
1177 return -EIO;
1178
1179 switch (type) {
1180 case NL80211_IFTYPE_STATION:
1181 ar->next_mode = INFRA_NETWORK;
1182 break;
1183 case NL80211_IFTYPE_ADHOC:
1184 ar->next_mode = ADHOC_NETWORK;
1185 break;
1186 default:
1187 ath6kl_err("invalid interface type %u\n", type);
1188 return -EOPNOTSUPP;
1189 }
1190
1191 wdev->iftype = type;
1192
1193 return 0;
1194}
1195
1196static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
1197 struct net_device *dev,
1198 struct cfg80211_ibss_params *ibss_param)
1199{
1200 struct ath6kl *ar = ath6kl_priv(dev);
1201 int status;
1202
1203 if (!ath6kl_cfg80211_ready(ar))
1204 return -EIO;
1205
1206 ar->ssid_len = ibss_param->ssid_len;
1207 memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
1208
1209 if (ibss_param->channel)
1210 ar->ch_hint = ibss_param->channel->center_freq;
1211
1212 if (ibss_param->channel_fixed) {
1213 /*
1214 * TODO: channel_fixed: The channel should be fixed, do not
1215 * search for IBSSs to join on other channels. Target
1216 * firmware does not support this feature, needs to be
1217 * updated.
1218 */
1219 return -EOPNOTSUPP;
1220 }
1221
1222 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
1223 if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
1224 memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
1225
1226 ath6kl_set_wpa_version(ar, 0);
1227
1228 status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
1229 if (status)
1230 return status;
1231
1232 if (ibss_param->privacy) {
1233 ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
1234 ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
1235 } else {
1236 ath6kl_set_cipher(ar, 0, true);
1237 ath6kl_set_cipher(ar, 0, false);
1238 }
1239
1240 ar->nw_type = ar->next_mode;
1241
1242 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1243 "%s: connect called with authmode %d dot11 auth %d"
1244 " PW crypto %d PW crypto len %d GRP crypto %d"
1245 " GRP crypto len %d channel hint %u\n",
1246 __func__,
1247 ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
1248 ar->prwise_crypto_len, ar->grp_crypto,
1249 ar->grp_crpto_len, ar->ch_hint);
1250
1251 status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
1252 ar->dot11_auth_mode, ar->auth_mode,
1253 ar->prwise_crypto,
1254 ar->prwise_crypto_len,
1255 ar->grp_crypto, ar->grp_crpto_len,
1256 ar->ssid_len, ar->ssid,
1257 ar->req_bssid, ar->ch_hint,
1258 ar->connect_ctrl_flags);
1259 set_bit(CONNECT_PEND, &ar->flag);
1260
1261 return 0;
1262}
1263
1264static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
1265 struct net_device *dev)
1266{
1267 struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
1268
1269 if (!ath6kl_cfg80211_ready(ar))
1270 return -EIO;
1271
1272 ath6kl_disconnect(ar);
1273 memset(ar->ssid, 0, sizeof(ar->ssid));
1274 ar->ssid_len = 0;
1275
1276 return 0;
1277}
1278
1279static const u32 cipher_suites[] = {
1280 WLAN_CIPHER_SUITE_WEP40,
1281 WLAN_CIPHER_SUITE_WEP104,
1282 WLAN_CIPHER_SUITE_TKIP,
1283 WLAN_CIPHER_SUITE_CCMP,
1284};
1285
1286static bool is_rate_legacy(s32 rate)
1287{
1288 static const s32 legacy[] = { 1000, 2000, 5500, 11000,
1289 6000, 9000, 12000, 18000, 24000,
1290 36000, 48000, 54000
1291 };
1292 u8 i;
1293
1294 for (i = 0; i < ARRAY_SIZE(legacy); i++)
1295 if (rate == legacy[i])
1296 return true;
1297
1298 return false;
1299}
1300
1301static bool is_rate_ht20(s32 rate, u8 *mcs, bool *sgi)
1302{
1303 static const s32 ht20[] = { 6500, 13000, 19500, 26000, 39000,
1304 52000, 58500, 65000, 72200
1305 };
1306 u8 i;
1307
1308 for (i = 0; i < ARRAY_SIZE(ht20); i++) {
1309 if (rate == ht20[i]) {
1310 if (i == ARRAY_SIZE(ht20) - 1)
1311 /* last rate uses sgi */
1312 *sgi = true;
1313 else
1314 *sgi = false;
1315
1316 *mcs = i;
1317 return true;
1318 }
1319 }
1320 return false;
1321}
1322
1323static bool is_rate_ht40(s32 rate, u8 *mcs, bool *sgi)
1324{
1325 static const s32 ht40[] = { 13500, 27000, 40500, 54000,
1326 81000, 108000, 121500, 135000,
1327 150000
1328 };
1329 u8 i;
1330
1331 for (i = 0; i < ARRAY_SIZE(ht40); i++) {
1332 if (rate == ht40[i]) {
1333 if (i == ARRAY_SIZE(ht40) - 1)
1334 /* last rate uses sgi */
1335 *sgi = true;
1336 else
1337 *sgi = false;
1338
1339 *mcs = i;
1340 return true;
1341 }
1342 }
1343
1344 return false;
1345}
1346
1347static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
1348 u8 *mac, struct station_info *sinfo)
1349{
1350 struct ath6kl *ar = ath6kl_priv(dev);
1351 long left;
1352 bool sgi;
1353 s32 rate;
1354 int ret;
1355 u8 mcs;
1356
1357 if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
1358 return -ENOENT;
1359
1360 if (down_interruptible(&ar->sem))
1361 return -EBUSY;
1362
1363 set_bit(STATS_UPDATE_PEND, &ar->flag);
1364
1365 ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
1366
1367 if (ret != 0) {
1368 up(&ar->sem);
1369 return -EIO;
1370 }
1371
1372 left = wait_event_interruptible_timeout(ar->event_wq,
1373 !test_bit(STATS_UPDATE_PEND,
1374 &ar->flag),
1375 WMI_TIMEOUT);
1376
1377 up(&ar->sem);
1378
1379 if (left == 0)
1380 return -ETIMEDOUT;
1381 else if (left < 0)
1382 return left;
1383
1384 if (ar->target_stats.rx_byte) {
1385 sinfo->rx_bytes = ar->target_stats.rx_byte;
1386 sinfo->filled |= STATION_INFO_RX_BYTES;
1387 sinfo->rx_packets = ar->target_stats.rx_pkt;
1388 sinfo->filled |= STATION_INFO_RX_PACKETS;
1389 }
1390
1391 if (ar->target_stats.tx_byte) {
1392 sinfo->tx_bytes = ar->target_stats.tx_byte;
1393 sinfo->filled |= STATION_INFO_TX_BYTES;
1394 sinfo->tx_packets = ar->target_stats.tx_pkt;
1395 sinfo->filled |= STATION_INFO_TX_PACKETS;
1396 }
1397
1398 sinfo->signal = ar->target_stats.cs_rssi;
1399 sinfo->filled |= STATION_INFO_SIGNAL;
1400
1401 rate = ar->target_stats.tx_ucast_rate;
1402
1403 if (is_rate_legacy(rate)) {
1404 sinfo->txrate.legacy = rate / 100;
1405 } else if (is_rate_ht20(rate, &mcs, &sgi)) {
1406 if (sgi) {
1407 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1408 sinfo->txrate.mcs = mcs - 1;
1409 } else {
1410 sinfo->txrate.mcs = mcs;
1411 }
1412
1413 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1414 } else if (is_rate_ht40(rate, &mcs, &sgi)) {
1415 if (sgi) {
1416 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1417 sinfo->txrate.mcs = mcs - 1;
1418 } else {
1419 sinfo->txrate.mcs = mcs;
1420 }
1421
1422 sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
1423 sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
1424 } else {
1425 ath6kl_warn("invalid rate: %d\n", rate);
1426 return 0;
1427 }
1428
1429 sinfo->filled |= STATION_INFO_TX_BITRATE;
1430
1431 return 0;
1432}
1433
1434static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
1435 struct cfg80211_pmksa *pmksa)
1436{
1437 struct ath6kl *ar = ath6kl_priv(netdev);
1438 return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
1439 pmksa->pmkid, true);
1440}
1441
1442static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
1443 struct cfg80211_pmksa *pmksa)
1444{
1445 struct ath6kl *ar = ath6kl_priv(netdev);
1446 return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
1447 pmksa->pmkid, false);
1448}
1449
1450static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
1451{
1452 struct ath6kl *ar = ath6kl_priv(netdev);
1453 if (test_bit(CONNECTED, &ar->flag))
1454 return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
1455 return 0;
1456}
1457
1458static struct cfg80211_ops ath6kl_cfg80211_ops = {
1459 .change_virtual_intf = ath6kl_cfg80211_change_iface,
1460 .scan = ath6kl_cfg80211_scan,
1461 .connect = ath6kl_cfg80211_connect,
1462 .disconnect = ath6kl_cfg80211_disconnect,
1463 .add_key = ath6kl_cfg80211_add_key,
1464 .get_key = ath6kl_cfg80211_get_key,
1465 .del_key = ath6kl_cfg80211_del_key,
1466 .set_default_key = ath6kl_cfg80211_set_default_key,
1467 .set_wiphy_params = ath6kl_cfg80211_set_wiphy_params,
1468 .set_tx_power = ath6kl_cfg80211_set_txpower,
1469 .get_tx_power = ath6kl_cfg80211_get_txpower,
1470 .set_power_mgmt = ath6kl_cfg80211_set_power_mgmt,
1471 .join_ibss = ath6kl_cfg80211_join_ibss,
1472 .leave_ibss = ath6kl_cfg80211_leave_ibss,
1473 .get_station = ath6kl_get_station,
1474 .set_pmksa = ath6kl_set_pmksa,
1475 .del_pmksa = ath6kl_del_pmksa,
1476 .flush_pmksa = ath6kl_flush_pmksa,
1477};
1478
1479struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
1480{
1481 int ret = 0;
1482 struct wireless_dev *wdev;
1483
1484 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1485 if (!wdev) {
1486 ath6kl_err("couldn't allocate wireless device\n");
1487 return NULL;
1488 }
1489
1490 /* create a new wiphy for use with cfg80211 */
1491 wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
1492 if (!wdev->wiphy) {
1493 ath6kl_err("couldn't allocate wiphy device\n");
1494 kfree(wdev);
1495 return NULL;
1496 }
1497
1498 /* set device pointer for wiphy */
1499 set_wiphy_dev(wdev->wiphy, dev);
1500
1501 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1502 BIT(NL80211_IFTYPE_ADHOC);
1503 /* max num of ssids that can be probed during scanning */
1504 wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
1505 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
1506 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
1507 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1508
1509 wdev->wiphy->cipher_suites = cipher_suites;
1510 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
1511
1512 ret = wiphy_register(wdev->wiphy);
1513 if (ret < 0) {
1514 ath6kl_err("couldn't register wiphy device\n");
1515 wiphy_free(wdev->wiphy);
1516 kfree(wdev);
1517 return NULL;
1518 }
1519
1520 return wdev;
1521}
1522
1523void ath6kl_cfg80211_deinit(struct ath6kl *ar)
1524{
1525 struct wireless_dev *wdev = ar->wdev;
1526
1527 if (ar->scan_req) {
1528 cfg80211_scan_done(ar->scan_req, true);
1529 ar->scan_req = NULL;
1530 }
1531
1532 if (!wdev)
1533 return;
1534
1535 wiphy_unregister(wdev->wiphy);
1536 wiphy_free(wdev->wiphy);
1537 kfree(wdev);
1538}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
new file mode 100644
index 000000000000..a84adc249c61
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH6KL_CFG80211_H
18#define ATH6KL_CFG80211_H
19
20struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
21void ath6kl_cfg80211_deinit(struct ath6kl *ar);
22
23void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
24
25void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
26 u8 *bssid, u16 listen_intvl,
27 u16 beacon_intvl,
28 enum network_type nw_type,
29 u8 beacon_ie_len, u8 assoc_req_len,
30 u8 assoc_resp_len, u8 *assoc_info);
31
32void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
33 u8 *bssid, u8 assoc_resp_len,
34 u8 *assoc_info, u16 proto_reason);
35
36void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
37 bool ismcast);
38
39#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
new file mode 100644
index 000000000000..0a3a1d80d0a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -0,0 +1,183 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef COMMON_H
18#define COMMON_H
19
20#include <linux/netdevice.h>
21
22#define ATH6KL_MAX_IE 256
23
24extern int ath6kl_printk(const char *level, const char *fmt, ...);
25
26#define A_CACHE_LINE_PAD 128
27
28/*
29 * Reflects the version of binary interface exposed by ATH6KL target
30 * firmware. Needs to be incremented by 1 for any change in the firmware
31 * that requires upgrade of the driver on the host side for the change to
32 * work correctly
33 */
34#define ATH6KL_ABI_VERSION 1
35
36#define SIGNAL_QUALITY_METRICS_NUM_MAX 2
37
38enum {
39 SIGNAL_QUALITY_METRICS_SNR = 0,
40 SIGNAL_QUALITY_METRICS_RSSI,
41 SIGNAL_QUALITY_METRICS_ALL,
42};
43
44/*
45 * Data Path
46 */
47
48#define WMI_MAX_TX_DATA_FRAME_LENGTH \
49 (1500 + sizeof(struct wmi_data_hdr) + \
50 sizeof(struct ethhdr) + \
51 sizeof(struct ath6kl_llc_snap_hdr))
52
53/* An AMSDU frame */ /* The MAX AMSDU length of AR6003 is 3839 */
54#define WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH \
55 (3840 + sizeof(struct wmi_data_hdr) + \
56 sizeof(struct ethhdr) + \
57 sizeof(struct ath6kl_llc_snap_hdr))
58
59#define EPPING_ALIGNMENT_PAD \
60 (((sizeof(struct htc_frame_hdr) + 3) & (~0x3)) \
61 - sizeof(struct htc_frame_hdr))
62
63struct ath6kl_llc_snap_hdr {
64 u8 dsap;
65 u8 ssap;
66 u8 cntl;
67 u8 org_code[3];
68 __be16 eth_type;
69} __packed;
70
71enum crypto_type {
72 NONE_CRYPT = 0x01,
73 WEP_CRYPT = 0x02,
74 TKIP_CRYPT = 0x04,
75 AES_CRYPT = 0x08,
76};
77
78#define ATH6KL_NODE_HASHSIZE 32
79/* simple hash is enough for variation of macaddr */
80#define ATH6KL_NODE_HASH(addr) \
81 (((const u8 *)(addr))[ETH_ALEN - 1] % \
82 ATH6KL_NODE_HASHSIZE)
83
84/*
85 * Table of ath6kl_node instances. Each ieee80211com
86 * has at least one for holding the scan candidates.
87 * When operating as an access point or in ibss mode there
88 * is a second table for associated stations or neighbors.
89 */
90struct ath6kl_node_table {
91 void *nt_wmi; /* back reference */
92 spinlock_t nt_nodelock; /* on node table */
93 struct bss *nt_node_first; /* information of all nodes */
94 struct bss *nt_node_last; /* information of all nodes */
95 struct bss *nt_hash[ATH6KL_NODE_HASHSIZE];
96 const char *nt_name; /* for debugging */
97 u32 nt_node_age; /* node aging time */
98};
99
100#define WLAN_NODE_INACT_TIMEOUT_MSEC 120000
101#define WLAN_NODE_INACT_CNT 4
102
103struct ath6kl_common_ie {
104 u16 ie_chan;
105 u8 *ie_tstamp;
106 u8 *ie_ssid;
107 u8 *ie_rates;
108 u8 *ie_xrates;
109 u8 *ie_country;
110 u8 *ie_wpa;
111 u8 *ie_rsn;
112 u8 *ie_wmm;
113 u8 *ie_ath;
114 u16 ie_capInfo;
115 u16 ie_beaconInt;
116 u8 *ie_tim;
117 u8 *ie_chswitch;
118 u8 ie_erp;
119 u8 *ie_wsc;
120 u8 *ie_htcap;
121 u8 *ie_htop;
122};
123
124struct bss {
125 u8 ni_macaddr[ETH_ALEN];
126 u8 ni_snr;
127 s16 ni_rssi;
128 struct bss *ni_list_next;
129 struct bss *ni_list_prev;
130 struct bss *ni_hash_next;
131 struct bss *ni_hash_prev;
132 struct ath6kl_common_ie ni_cie;
133 u8 *ni_buf;
134 u16 ni_framelen;
135 struct ath6kl_node_table *ni_table;
136 u32 ni_refcnt;
137
138 u32 ni_tstamp;
139 u32 ni_actcnt;
140};
141
142struct htc_endpoint_credit_dist;
143struct ath6kl;
144enum htc_credit_dist_reason;
145struct htc_credit_state_info;
146
147struct bss *wlan_node_alloc(int wh_size);
148void wlan_node_free(struct bss *ni);
149void wlan_setup_node(struct ath6kl_node_table *nt, struct bss *ni,
150 const u8 *mac_addr);
151struct bss *wlan_find_node(struct ath6kl_node_table *nt,
152 const u8 *mac_addr);
153void wlan_node_reclaim(struct ath6kl_node_table *nt, struct bss *ni);
154void wlan_free_allnodes(struct ath6kl_node_table *nt);
155void wlan_iterate_nodes(struct ath6kl_node_table *nt,
156 void (*f) (void *arg, struct bss *),
157 void *arg);
158
159void wlan_node_table_init(void *wmip, struct ath6kl_node_table *nt);
160void wlan_node_table_cleanup(struct ath6kl_node_table *nt);
161
162void wlan_refresh_inactive_nodes(struct ath6kl_node_table *nt);
163
164struct bss *wlan_find_ssid_node(struct ath6kl_node_table *nt, u8 *ssid,
165 u32 ssid_len, bool is_wpa2, bool match_ssid);
166
167void wlan_node_return(struct ath6kl_node_table *nt, struct bss *ni);
168
169int ath6k_setup_credit_dist(void *htc_handle,
170 struct htc_credit_state_info *cred_info);
171void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
172 struct list_head *epdist_list,
173 enum htc_credit_dist_reason reason);
174void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
175 struct list_head *ep_list,
176 int tot_credits);
177void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
178 struct htc_endpoint_credit_dist *ep_dist);
179struct ath6kl *ath6kl_core_alloc(struct device *sdev);
180int ath6kl_core_init(struct ath6kl *ar);
181int ath6kl_unavail_ev(struct ath6kl *ar);
182struct sk_buff *ath6kl_buf_alloc(int size);
183#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
new file mode 100644
index 000000000000..86177f0b98a5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -0,0 +1,546 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef CORE_H
18#define CORE_H
19
20#include <linux/etherdevice.h>
21#include <linux/rtnetlink.h>
22#include <linux/firmware.h>
23#include <linux/sched.h>
24#include <net/cfg80211.h>
25#include "htc.h"
26#include "wmi.h"
27#include "bmi.h"
28
29#define MAX_ATH6KL 1
30#define ATH6KL_MAX_RX_BUFFERS 16
31#define ATH6KL_BUFFER_SIZE 1664
32#define ATH6KL_MAX_AMSDU_RX_BUFFERS 4
33#define ATH6KL_AMSDU_REFILL_THRESHOLD 3
34#define ATH6KL_AMSDU_BUFFER_SIZE (WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH + 128)
35#define MAX_MSDU_SUBFRAME_PAYLOAD_LEN 1508
36#define MIN_MSDU_SUBFRAME_PAYLOAD_LEN 46
37
38#define USER_SAVEDKEYS_STAT_INIT 0
39#define USER_SAVEDKEYS_STAT_RUN 1
40
41#define ATH6KL_TX_TIMEOUT 10
42#define ATH6KL_MAX_ENDPOINTS 4
43#define MAX_NODE_NUM 15
44
45/* MAX_HI_COOKIE_NUM are reserved for high priority traffic */
46#define MAX_DEF_COOKIE_NUM 180
47#define MAX_HI_COOKIE_NUM 18 /* 10% of MAX_COOKIE_NUM */
48#define MAX_COOKIE_NUM (MAX_DEF_COOKIE_NUM + MAX_HI_COOKIE_NUM)
49
50#define MAX_DEFAULT_SEND_QUEUE_DEPTH (MAX_DEF_COOKIE_NUM / WMM_NUM_AC)
51
52#define DISCON_TIMER_INTVAL 10000 /* in msec */
53#define A_DEFAULT_LISTEN_INTERVAL 100
54#define A_MAX_WOW_LISTEN_INTERVAL 1000
55
56/* AR6003 1.0 definitions */
57#define AR6003_REV1_VERSION 0x300002ba
58
59/* AR6003 2.0 definitions */
60#define AR6003_REV2_VERSION 0x30000384
61#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
62#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
63#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
64#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
65#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
66#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
67
68/* AR6003 3.0 definitions */
69#define AR6003_REV3_VERSION 0x30000582
70#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
71#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
72#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
73#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
74#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
75 "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
76
77/* Per STA data, used in AP mode */
78#define STA_PS_AWAKE BIT(0)
79#define STA_PS_SLEEP BIT(1)
80#define STA_PS_POLLED BIT(2)
81
82/* HTC TX packet tagging definitions */
83#define ATH6KL_CONTROL_PKT_TAG HTC_TX_PACKET_TAG_USER_DEFINED
84#define ATH6KL_DATA_PKT_TAG (ATH6KL_CONTROL_PKT_TAG + 1)
85
86#define AR6003_CUST_DATA_SIZE 16
87
88#define AGGR_WIN_IDX(x, y) ((x) % (y))
89#define AGGR_INCR_IDX(x, y) AGGR_WIN_IDX(((x) + 1), (y))
90#define AGGR_DCRM_IDX(x, y) AGGR_WIN_IDX(((x) - 1), (y))
91#define ATH6KL_MAX_SEQ_NO 0xFFF
92#define ATH6KL_NEXT_SEQ_NO(x) (((x) + 1) & ATH6KL_MAX_SEQ_NO)
93
94#define NUM_OF_TIDS 8
95#define AGGR_SZ_DEFAULT 8
96
97#define AGGR_WIN_SZ_MIN 2
98#define AGGR_WIN_SZ_MAX 8
99
100#define TID_WINDOW_SZ(_x) ((_x) << 1)
101
102#define AGGR_NUM_OF_FREE_NETBUFS 16
103
104#define AGGR_RX_TIMEOUT 400 /* in ms */
105
106#define WMI_TIMEOUT (2 * HZ)
107
108#define MBOX_YIELD_LIMIT 99
109
110/* configuration lags */
111/*
112 * ATH6KL_CONF_IGNORE_ERP_BARKER: Ignore the barker premable in
113 * ERP IE of beacon to determine the short premable support when
114 * sending (Re)Assoc req.
115 * ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN: Don't send the power
116 * module state transition failure events which happen during
117 * scan, to the host.
118 */
119#define ATH6KL_CONF_IGNORE_ERP_BARKER BIT(0)
120#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
121#define ATH6KL_CONF_ENABLE_11N BIT(2)
122#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
123
124enum wlan_low_pwr_state {
125 WLAN_POWER_STATE_ON,
126 WLAN_POWER_STATE_CUT_PWR,
127 WLAN_POWER_STATE_DEEP_SLEEP,
128 WLAN_POWER_STATE_WOW
129};
130
131enum sme_state {
132 SME_DISCONNECTED,
133 SME_CONNECTING,
134 SME_CONNECTED
135};
136
137enum ath6kl_wlan_state {
138 WLAN_DISABLED,
139 WLAN_ENABLED
140};
141
142struct skb_hold_q {
143 struct sk_buff *skb;
144 bool is_amsdu;
145 u16 seq_no;
146};
147
148struct rxtid {
149 bool aggr;
150 bool progress;
151 bool timer_mon;
152 u16 win_sz;
153 u16 seq_next;
154 u32 hold_q_sz;
155 struct skb_hold_q *hold_q;
156 struct sk_buff_head q;
157 spinlock_t lock;
158};
159
160struct rxtid_stats {
161 u32 num_into_aggr;
162 u32 num_dups;
163 u32 num_oow;
164 u32 num_mpdu;
165 u32 num_amsdu;
166 u32 num_delivered;
167 u32 num_timeouts;
168 u32 num_hole;
169 u32 num_bar;
170};
171
172struct aggr_info {
173 u8 aggr_sz;
174 u8 timer_scheduled;
175 struct timer_list timer;
176 struct net_device *dev;
177 struct rxtid rx_tid[NUM_OF_TIDS];
178 struct sk_buff_head free_q;
179 struct rxtid_stats stat[NUM_OF_TIDS];
180};
181
182struct ath6kl_wep_key {
183 u8 key_index;
184 u8 key_len;
185 u8 key[64];
186};
187
188#define ATH6KL_KEY_SEQ_LEN 8
189
190struct ath6kl_key {
191 u8 key[WLAN_MAX_KEY_LEN];
192 u8 key_len;
193 u8 seq[ATH6KL_KEY_SEQ_LEN];
194 u8 seq_len;
195 u32 cipher;
196};
197
198struct ath6kl_node_mapping {
199 u8 mac_addr[ETH_ALEN];
200 u8 ep_id;
201 u8 tx_pend;
202};
203
204struct ath6kl_cookie {
205 struct sk_buff *skb;
206 u32 map_no;
207 struct htc_packet htc_pkt;
208 struct ath6kl_cookie *arc_list_next;
209};
210
211struct ath6kl_sta {
212 u16 sta_flags;
213 u8 mac[ETH_ALEN];
214 u8 aid;
215 u8 keymgmt;
216 u8 ucipher;
217 u8 auth;
218 u8 wpa_ie[ATH6KL_MAX_IE];
219 struct sk_buff_head psq;
220 spinlock_t psq_lock;
221};
222
223struct ath6kl_version {
224 u32 target_ver;
225 u32 wlan_ver;
226 u32 abi_ver;
227};
228
229struct ath6kl_bmi {
230 u32 cmd_credits;
231 bool done_sent;
232 u8 *cmd_buf;
233};
234
235struct target_stats {
236 u64 tx_pkt;
237 u64 tx_byte;
238 u64 tx_ucast_pkt;
239 u64 tx_ucast_byte;
240 u64 tx_mcast_pkt;
241 u64 tx_mcast_byte;
242 u64 tx_bcast_pkt;
243 u64 tx_bcast_byte;
244 u64 tx_rts_success_cnt;
245 u64 tx_pkt_per_ac[4];
246
247 u64 tx_err;
248 u64 tx_fail_cnt;
249 u64 tx_retry_cnt;
250 u64 tx_mult_retry_cnt;
251 u64 tx_rts_fail_cnt;
252
253 u64 rx_pkt;
254 u64 rx_byte;
255 u64 rx_ucast_pkt;
256 u64 rx_ucast_byte;
257 u64 rx_mcast_pkt;
258 u64 rx_mcast_byte;
259 u64 rx_bcast_pkt;
260 u64 rx_bcast_byte;
261 u64 rx_frgment_pkt;
262
263 u64 rx_err;
264 u64 rx_crc_err;
265 u64 rx_key_cache_miss;
266 u64 rx_decrypt_err;
267 u64 rx_dupl_frame;
268
269 u64 tkip_local_mic_fail;
270 u64 tkip_cnter_measures_invoked;
271 u64 tkip_replays;
272 u64 tkip_fmt_err;
273 u64 ccmp_fmt_err;
274 u64 ccmp_replays;
275
276 u64 pwr_save_fail_cnt;
277
278 u64 cs_bmiss_cnt;
279 u64 cs_low_rssi_cnt;
280 u64 cs_connect_cnt;
281 u64 cs_discon_cnt;
282
283 s32 tx_ucast_rate;
284 s32 rx_ucast_rate;
285
286 u32 lq_val;
287
288 u32 wow_pkt_dropped;
289 u16 wow_evt_discarded;
290
291 s16 noise_floor_calib;
292 s16 cs_rssi;
293 s16 cs_ave_beacon_rssi;
294 u8 cs_ave_beacon_snr;
295 u8 cs_last_roam_msec;
296 u8 cs_snr;
297
298 u8 wow_host_pkt_wakeups;
299 u8 wow_host_evt_wakeups;
300
301 u32 arp_received;
302 u32 arp_matched;
303 u32 arp_replied;
304};
305
306struct ath6kl_mbox_info {
307 u32 htc_addr;
308 u32 htc_ext_addr;
309 u32 htc_ext_sz;
310
311 u32 block_size;
312
313 u32 gmbox_addr;
314
315 u32 gmbox_sz;
316};
317
318/*
319 * 802.11i defines an extended IV for use with non-WEP ciphers.
320 * When the EXTIV bit is set in the key id byte an additional
321 * 4 bytes immediately follow the IV for TKIP. For CCMP the
322 * EXTIV bit is likewise set but the 8 bytes represent the
323 * CCMP header rather than IV+extended-IV.
324 */
325
326#define ATH6KL_KEYBUF_SIZE 16
327#define ATH6KL_MICBUF_SIZE (8+8) /* space for both tx and rx */
328
329#define ATH6KL_KEY_XMIT 0x01
330#define ATH6KL_KEY_RECV 0x02
331#define ATH6KL_KEY_DEFAULT 0x80 /* default xmit key */
332
333/*
334 * WPA/RSN get/set key request. Specify the key/cipher
335 * type and whether the key is to be used for sending and/or
336 * receiving. The key index should be set only when working
337 * with global keys (use IEEE80211_KEYIX_NONE for ``no index'').
338 * Otherwise a unicast/pairwise key is specified by the bssid
339 * (on a station) or mac address (on an ap). They key length
340 * must include any MIC key data; otherwise it should be no
341 * more than ATH6KL_KEYBUF_SIZE.
342 */
343struct ath6kl_req_key {
344 u8 ik_type; /* key/cipher type */
345 u8 ik_pad;
346 u16 ik_keyix; /* key index */
347 u8 ik_keylen; /* key length in bytes */
348 u8 ik_flags;
349 u8 ik_macaddr[ETH_ALEN];
350 u64 ik_keyrsc; /* key receive sequence counter */
351 u64 ik_keytsc; /* key transmit sequence counter */
352 u8 ik_keydata[ATH6KL_KEYBUF_SIZE + ATH6KL_MICBUF_SIZE];
353};
354
355/* Flag info */
356#define WMI_ENABLED 0
357#define WMI_READY 1
358#define CONNECTED 2
359#define STATS_UPDATE_PEND 3
360#define CONNECT_PEND 4
361#define WMM_ENABLED 5
362#define NETQ_STOPPED 6
363#define WMI_CTRL_EP_FULL 7
364#define DTIM_EXPIRED 8
365#define DESTROY_IN_PROGRESS 9
366#define NETDEV_REGISTERED 10
367#define SKIP_SCAN 11
368
369struct ath6kl {
370 struct device *dev;
371 struct net_device *net_dev;
372 struct ath6kl_bmi bmi;
373 const struct ath6kl_hif_ops *hif_ops;
374 struct wmi *wmi;
375 int tx_pending[ENDPOINT_MAX];
376 int total_tx_data_pend;
377 struct htc_target *htc_target;
378 void *hif_priv;
379 spinlock_t lock;
380 struct semaphore sem;
381 int ssid_len;
382 u8 ssid[IEEE80211_MAX_SSID_LEN];
383 u8 next_mode;
384 u8 nw_type;
385 u8 dot11_auth_mode;
386 u8 auth_mode;
387 u8 prwise_crypto;
388 u8 prwise_crypto_len;
389 u8 grp_crypto;
390 u8 grp_crpto_len;
391 u8 def_txkey_index;
392 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
393 u8 bssid[ETH_ALEN];
394 u8 req_bssid[ETH_ALEN];
395 u16 ch_hint;
396 u16 bss_ch;
397 u16 listen_intvl_b;
398 u16 listen_intvl_t;
399 struct ath6kl_version version;
400 u32 target_type;
401 u8 tx_pwr;
402 struct net_device_stats net_stats;
403 struct target_stats target_stats;
404 enum ath6kl_wlan_state wlan_state;
405 struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
406 u8 ibss_ps_enable;
407 u8 node_num;
408 u8 next_ep_id;
409 struct ath6kl_cookie *cookie_list;
410 u32 cookie_count;
411 enum htc_endpoint_id ac2ep_map[WMM_NUM_AC];
412 bool ac_stream_active[WMM_NUM_AC];
413 u8 ac_stream_pri_map[WMM_NUM_AC];
414 u8 hiac_stream_active_pri;
415 u8 ep2ac_map[ENDPOINT_MAX];
416 enum htc_endpoint_id ctrl_ep;
417 struct htc_credit_state_info credit_state_info;
418 u32 connect_ctrl_flags;
419 u32 user_key_ctrl;
420 u8 usr_bss_filter;
421 struct ath6kl_sta sta_list[AP_MAX_NUM_STA];
422 u8 sta_list_index;
423 struct ath6kl_req_key ap_mode_bkey;
424 struct sk_buff_head mcastpsq;
425 spinlock_t mcastpsq_lock;
426 u8 intra_bss;
427 struct aggr_info *aggr_cntxt;
428 struct wmi_ap_mode_stat ap_stats;
429 u8 ap_country_code[3];
430 struct list_head amsdu_rx_buffer_queue;
431 struct timer_list disconnect_timer;
432 u8 rx_meta_ver;
433 struct wireless_dev *wdev;
434 struct cfg80211_scan_request *scan_req;
435 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
436 enum sme_state sme_state;
437 enum wlan_low_pwr_state wlan_pwr_state;
438 struct wmi_scan_params_cmd sc_params;
439#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
440 u8 auto_auth_stage;
441
442 u16 conf_flags;
443 wait_queue_head_t event_wq;
444 struct ath6kl_mbox_info mbox_info;
445
446 struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
447 int reconnect_flag;
448 unsigned long flag;
449
450 u8 *fw_board;
451 size_t fw_board_len;
452
453 u8 *fw_otp;
454 size_t fw_otp_len;
455
456 u8 *fw;
457 size_t fw_len;
458
459 u8 *fw_patch;
460 size_t fw_patch_len;
461
462 struct workqueue_struct *ath6kl_wq;
463};
464
465static inline void *ath6kl_priv(struct net_device *dev)
466{
467 return wdev_priv(dev->ieee80211_ptr);
468}
469
470static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
471 *cred_info,
472 struct htc_endpoint_credit_dist
473 *ep_dist, int credits)
474{
475 ep_dist->credits += credits;
476 ep_dist->cred_assngd += credits;
477 cred_info->cur_free_credits -= credits;
478}
479
480void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
481int ath6kl_configure_target(struct ath6kl *ar);
482void ath6kl_detect_error(unsigned long ptr);
483void disconnect_timer_handler(unsigned long ptr);
484void init_netdev(struct net_device *dev);
485void ath6kl_cookie_init(struct ath6kl *ar);
486void ath6kl_cookie_cleanup(struct ath6kl *ar);
487void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
488void ath6kl_tx_complete(void *context, struct list_head *packet_queue);
489enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
490 struct htc_packet *packet);
491void ath6kl_stop_txrx(struct ath6kl *ar);
492void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar);
493int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
494 u8 *data, u32 length, bool read);
495int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data);
496void ath6kl_init_profile_info(struct ath6kl *ar);
497void ath6kl_tx_data_cleanup(struct ath6kl *ar);
498void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
499 bool get_dbglogs);
500
501struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
502void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
503int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev);
504
505struct aggr_info *aggr_init(struct net_device *dev);
506void ath6kl_rx_refill(struct htc_target *target,
507 enum htc_endpoint_id endpoint);
508void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count);
509struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
510 enum htc_endpoint_id endpoint,
511 int len);
512void aggr_module_destroy(struct aggr_info *aggr_info);
513void aggr_reset_state(struct aggr_info *aggr_info);
514
515struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
516struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
517
518void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
519int ath6kl_control_tx(void *devt, struct sk_buff *skb,
520 enum htc_endpoint_id eid);
521void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
522 u8 *bssid, u16 listen_int,
523 u16 beacon_int, enum network_type net_type,
524 u8 beacon_ie_len, u8 assoc_req_len,
525 u8 assoc_resp_len, u8 *assoc_info);
526void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
527 u8 *bssid, u8 assoc_resp_len,
528 u8 *assoc_info, u16 prot_reason_status);
529void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
530void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
531void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
532void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
533void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
534enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
535
536void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
537
538void ath6kl_dtimexpiry_event(struct ath6kl *ar);
539void ath6kl_disconnect(struct ath6kl *ar);
540void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
541void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
542 u8 win_sz);
543void ath6kl_wakeup_event(void *dev);
544void ath6kl_target_failure(struct ath6kl *ar);
545
546#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
new file mode 100644
index 000000000000..316136c8b903
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19
20int ath6kl_printk(const char *level, const char *fmt, ...)
21{
22 struct va_format vaf;
23 va_list args;
24 int rtn;
25
26 va_start(args, fmt);
27
28 vaf.fmt = fmt;
29 vaf.va = &args;
30
31 rtn = printk("%sath6kl: %pV", level, &vaf);
32
33 va_end(args);
34
35 return rtn;
36}
37
38#ifdef CONFIG_ATH6KL_DEBUG
39void ath6kl_dump_registers(struct ath6kl_device *dev,
40 struct ath6kl_irq_proc_registers *irq_proc_reg,
41 struct ath6kl_irq_enable_reg *irq_enable_reg)
42{
43
44 ath6kl_dbg(ATH6KL_DBG_ANY, ("<------- Register Table -------->\n"));
45
46 if (irq_proc_reg != NULL) {
47 ath6kl_dbg(ATH6KL_DBG_ANY,
48 "Host Int status: 0x%x\n",
49 irq_proc_reg->host_int_status);
50 ath6kl_dbg(ATH6KL_DBG_ANY,
51 "CPU Int status: 0x%x\n",
52 irq_proc_reg->cpu_int_status);
53 ath6kl_dbg(ATH6KL_DBG_ANY,
54 "Error Int status: 0x%x\n",
55 irq_proc_reg->error_int_status);
56 ath6kl_dbg(ATH6KL_DBG_ANY,
57 "Counter Int status: 0x%x\n",
58 irq_proc_reg->counter_int_status);
59 ath6kl_dbg(ATH6KL_DBG_ANY,
60 "Mbox Frame: 0x%x\n",
61 irq_proc_reg->mbox_frame);
62 ath6kl_dbg(ATH6KL_DBG_ANY,
63 "Rx Lookahead Valid: 0x%x\n",
64 irq_proc_reg->rx_lkahd_valid);
65 ath6kl_dbg(ATH6KL_DBG_ANY,
66 "Rx Lookahead 0: 0x%x\n",
67 irq_proc_reg->rx_lkahd[0]);
68 ath6kl_dbg(ATH6KL_DBG_ANY,
69 "Rx Lookahead 1: 0x%x\n",
70 irq_proc_reg->rx_lkahd[1]);
71
72 if (dev->ar->mbox_info.gmbox_addr != 0) {
73 /*
74 * If the target supports GMBOX hardware, dump some
75 * additional state.
76 */
77 ath6kl_dbg(ATH6KL_DBG_ANY,
78 "GMBOX Host Int status 2: 0x%x\n",
79 irq_proc_reg->host_int_status2);
80 ath6kl_dbg(ATH6KL_DBG_ANY,
81 "GMBOX RX Avail: 0x%x\n",
82 irq_proc_reg->gmbox_rx_avail);
83 ath6kl_dbg(ATH6KL_DBG_ANY,
84 "GMBOX lookahead alias 0: 0x%x\n",
85 irq_proc_reg->rx_gmbox_lkahd_alias[0]);
86 ath6kl_dbg(ATH6KL_DBG_ANY,
87 "GMBOX lookahead alias 1: 0x%x\n",
88 irq_proc_reg->rx_gmbox_lkahd_alias[1]);
89 }
90
91 }
92
93 if (irq_enable_reg != NULL) {
94 ath6kl_dbg(ATH6KL_DBG_ANY,
95 "Int status Enable: 0x%x\n",
96 irq_enable_reg->int_status_en);
97 ath6kl_dbg(ATH6KL_DBG_ANY, "Counter Int status Enable: 0x%x\n",
98 irq_enable_reg->cntr_int_status_en);
99 }
100 ath6kl_dbg(ATH6KL_DBG_ANY, "<------------------------------->\n");
101}
102
103static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
104{
105 ath6kl_dbg(ATH6KL_DBG_ANY,
106 "--- endpoint: %d svc_id: 0x%X ---\n",
107 ep_dist->endpoint, ep_dist->svc_id);
108 ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
109 ep_dist->dist_flags);
110 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
111 ep_dist->cred_norm);
112 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
113 ep_dist->cred_min);
114 ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
115 ep_dist->credits);
116 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
117 ep_dist->cred_assngd);
118 ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
119 ep_dist->seek_cred);
120 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
121 ep_dist->cred_sz);
122 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
123 ep_dist->cred_per_msg);
124 ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
125 ep_dist->cred_to_dist);
126 ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
127 get_queue_depth(&((struct htc_endpoint *)
128 ep_dist->htc_rsvd)->txq));
129 ath6kl_dbg(ATH6KL_DBG_ANY,
130 "----------------------------------\n");
131}
132
133void dump_cred_dist_stats(struct htc_target *target)
134{
135 struct htc_endpoint_credit_dist *ep_list;
136
137 if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
138 return;
139
140 list_for_each_entry(ep_list, &target->cred_dist_list, list)
141 dump_cred_dist(ep_list);
142
143 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
144 target->cred_dist_cntxt, NULL);
145 ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
146 target->cred_dist_cntxt->total_avail_credits,
147 target->cred_dist_cntxt->cur_free_credits);
148}
149
150#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
new file mode 100644
index 000000000000..2e6058856a6a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DEBUG_H
18#define DEBUG_H
19
20#include "htc_hif.h"
21
22enum ATH6K_DEBUG_MASK {
23 ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
24 ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
25 ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
26 ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
27 ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
28 ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
29 ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
30 ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
31 ATH6KL_DBG_PM = BIT(8), /* power management */
32 ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
33 ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
34 ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
35 ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
36 ATH6KL_DBG_WLAN_CFG = BIT(13), /* cfg80211 i/f file tracing */
37 ATH6KL_DBG_RAW_BYTES = BIT(14), /* dump tx/rx and wmi frames */
38 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
39};
40
41extern unsigned int debug_mask;
42extern int ath6kl_printk(const char *level, const char *fmt, ...)
43 __attribute__ ((format (printf, 2, 3)));
44
45#define ath6kl_info(fmt, ...) \
46 ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
47#define ath6kl_err(fmt, ...) \
48 ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
49#define ath6kl_warn(fmt, ...) \
50 ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
51
52#define AR_DBG_LVL_CHECK(mask) (debug_mask & mask)
53
54#ifdef CONFIG_ATH6KL_DEBUG
55#define ath6kl_dbg(mask, fmt, ...) \
56 ({ \
57 int rtn; \
58 if (debug_mask & mask) \
59 rtn = ath6kl_printk(KERN_DEBUG, fmt, ##__VA_ARGS__); \
60 else \
61 rtn = 0; \
62 \
63 rtn; \
64 })
65
66static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
67 const char *msg, const void *buf,
68 size_t len)
69{
70 if (debug_mask & mask) {
71 ath6kl_dbg(mask, "%s\n", msg);
72 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
73 }
74}
75
76void ath6kl_dump_registers(struct ath6kl_device *dev,
77 struct ath6kl_irq_proc_registers *irq_proc_reg,
78 struct ath6kl_irq_enable_reg *irq_en_reg);
79void dump_cred_dist_stats(struct htc_target *target);
80#else
81static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
82 const char *fmt, ...)
83{
84 return 0;
85}
86
87static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
88 const char *msg, const void *buf,
89 size_t len)
90{
91}
92
93static inline void ath6kl_dump_registers(struct ath6kl_device *dev,
94 struct ath6kl_irq_proc_registers *irq_proc_reg,
95 struct ath6kl_irq_enable_reg *irq_en_reg)
96{
97
98}
99static inline void dump_cred_dist_stats(struct htc_target *target)
100{
101}
102#endif
103
104#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
new file mode 100644
index 000000000000..ad4966917e84
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HIF_OPS_H
18#define HIF_OPS_H
19
20#include "hif.h"
21
22static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
23 u32 len, u32 request)
24{
25 return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
26}
27
28static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
29 u32 length, u32 request,
30 struct htc_packet *packet)
31{
32 return ar->hif_ops->write_async(ar, address, buffer, length,
33 request, packet);
34}
35static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
36{
37 return ar->hif_ops->irq_enable(ar);
38}
39
40static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
41{
42 return ar->hif_ops->irq_disable(ar);
43}
44
45static inline struct hif_scatter_req *hif_scatter_req_get(struct ath6kl *ar)
46{
47 return ar->hif_ops->scatter_req_get(ar);
48}
49
50static inline void hif_scatter_req_add(struct ath6kl *ar,
51 struct hif_scatter_req *s_req)
52{
53 return ar->hif_ops->scatter_req_add(ar, s_req);
54}
55
56static inline int ath6kl_hif_enable_scatter(struct ath6kl *ar,
57 struct hif_dev_scat_sup_info *info)
58{
59 return ar->hif_ops->enable_scatter(ar, info);
60}
61
62static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
63{
64 return ar->hif_ops->cleanup_scatter(ar);
65}
66
67#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
new file mode 100644
index 000000000000..7d39c1769fe4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -0,0 +1,216 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HIF_H
18#define HIF_H
19
20#include "common.h"
21#include "core.h"
22
23#include <linux/scatterlist.h>
24
25#define BUS_REQUEST_MAX_NUM 64
26#define HIF_MBOX_BLOCK_SIZE 128
27#define HIF_MBOX0_BLOCK_SIZE 1
28
29#define HIF_DMA_BUFFER_SIZE (32 * 1024)
30#define CMD53_FIXED_ADDRESS 1
31#define CMD53_INCR_ADDRESS 2
32
33#define MAX_SCATTER_REQUESTS 4
34#define MAX_SCATTER_ENTRIES_PER_REQ 16
35#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
36
37#define MANUFACTURER_ID_AR6003_BASE 0x300
38 /* SDIO manufacturer ID and Codes */
39#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
40#define MANUFACTURER_CODE 0x271 /* Atheros */
41
42/* Mailbox address in SDIO address space */
43#define HIF_MBOX_BASE_ADDR 0x800
44#define HIF_MBOX_WIDTH 0x800
45
46#define HIF_MBOX_END_ADDR (HTC_MAILBOX_NUM_MAX * HIF_MBOX_WIDTH - 1)
47
48/* version 1 of the chip has only a 12K extended mbox range */
49#define HIF_MBOX0_EXT_BASE_ADDR 0x4000
50#define HIF_MBOX0_EXT_WIDTH (12*1024)
51
52/* GMBOX addresses */
53#define HIF_GMBOX_BASE_ADDR 0x7000
54#define HIF_GMBOX_WIDTH 0x4000
55
56/* interrupt mode register */
57#define CCCR_SDIO_IRQ_MODE_REG 0xF0
58
59/* mode to enable special 4-bit interrupt assertion without clock */
60#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
61
62struct bus_request {
63 struct list_head list;
64
65 /* request data */
66 u32 address;
67
68 u8 *buffer;
69 u32 length;
70 u32 request;
71 struct htc_packet *packet;
72 int status;
73
74 /* this is a scatter request */
75 struct hif_scatter_req *scat_req;
76};
77
78/* direction of transfer (read/write) */
79#define HIF_READ 0x00000001
80#define HIF_WRITE 0x00000002
81#define HIF_DIR_MASK (HIF_READ | HIF_WRITE)
82
83/*
84 * emode - This indicates the whether the command is to be executed in a
85 * blocking or non-blocking fashion (HIF_SYNCHRONOUS/
86 * HIF_ASYNCHRONOUS). The read/write data paths in HTC have been
87 * implemented using the asynchronous mode allowing the the bus
88 * driver to indicate the completion of operation through the
89 * registered callback routine. The requirement primarily comes
90 * from the contexts these operations get called from (a driver's
91 * transmit context or the ISR context in case of receive).
92 * Support for both of these modes is essential.
93 */
94#define HIF_SYNCHRONOUS 0x00000010
95#define HIF_ASYNCHRONOUS 0x00000020
96#define HIF_EMODE_MASK (HIF_SYNCHRONOUS | HIF_ASYNCHRONOUS)
97
98/*
99 * dmode - An interface may support different kinds of commands based on
100 * the tradeoff between the amount of data it can carry and the
101 * setup time. Byte and Block modes are supported (HIF_BYTE_BASIS/
102 * HIF_BLOCK_BASIS). In case of latter, the data is rounded off
103 * to the nearest block size by padding. The size of the block is
104 * configurable at compile time using the HIF_BLOCK_SIZE and is
105 * negotiated with the target during initialization after the
106 * ATH6KL interrupts are enabled.
107 */
108#define HIF_BYTE_BASIS 0x00000040
109#define HIF_BLOCK_BASIS 0x00000080
110#define HIF_DMODE_MASK (HIF_BYTE_BASIS | HIF_BLOCK_BASIS)
111
112/*
113 * amode - This indicates if the address has to be incremented on ATH6KL
114 * after every read/write operation (HIF?FIXED_ADDRESS/
115 * HIF_INCREMENTAL_ADDRESS).
116 */
117#define HIF_FIXED_ADDRESS 0x00000100
118#define HIF_INCREMENTAL_ADDRESS 0x00000200
119#define HIF_AMODE_MASK (HIF_FIXED_ADDRESS | HIF_INCREMENTAL_ADDRESS)
120
121#define HIF_WR_ASYNC_BYTE_INC \
122 (HIF_WRITE | HIF_ASYNCHRONOUS | \
123 HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
124
125#define HIF_WR_ASYNC_BLOCK_INC \
126 (HIF_WRITE | HIF_ASYNCHRONOUS | \
127 HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
128
129#define HIF_WR_SYNC_BYTE_FIX \
130 (HIF_WRITE | HIF_SYNCHRONOUS | \
131 HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
132
133#define HIF_WR_SYNC_BYTE_INC \
134 (HIF_WRITE | HIF_SYNCHRONOUS | \
135 HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
136
137#define HIF_WR_SYNC_BLOCK_INC \
138 (HIF_WRITE | HIF_SYNCHRONOUS | \
139 HIF_BLOCK_BASIS | HIF_INCREMENTAL_ADDRESS)
140
141#define HIF_RD_SYNC_BYTE_INC \
142 (HIF_READ | HIF_SYNCHRONOUS | \
143 HIF_BYTE_BASIS | HIF_INCREMENTAL_ADDRESS)
144
145#define HIF_RD_SYNC_BYTE_FIX \
146 (HIF_READ | HIF_SYNCHRONOUS | \
147 HIF_BYTE_BASIS | HIF_FIXED_ADDRESS)
148
149#define HIF_RD_ASYNC_BLOCK_FIX \
150 (HIF_READ | HIF_ASYNCHRONOUS | \
151 HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
152
153#define HIF_RD_SYNC_BLOCK_FIX \
154 (HIF_READ | HIF_SYNCHRONOUS | \
155 HIF_BLOCK_BASIS | HIF_FIXED_ADDRESS)
156
157struct hif_scatter_item {
158 u8 *buf;
159 int len;
160 struct htc_packet *packet;
161};
162
163struct hif_scatter_req {
164 struct list_head list;
165 /* address for the read/write operation */
166 u32 addr;
167
168 /* request flags */
169 u32 req;
170
171 /* total length of entire transfer */
172 u32 len;
173
174 u32 flags;
175 void (*complete) (struct hif_scatter_req *);
176 int status;
177 struct htc_endpoint *ep;
178 int scat_entries;
179
180 struct hif_scatter_req_priv *req_priv;
181
182 /* bounce buffer for upper layers to copy to/from */
183 u8 *virt_dma_buf;
184
185 struct hif_scatter_item scat_list[1];
186};
187
188struct hif_dev_scat_sup_info {
189 int (*rw_scat_func) (struct ath6kl *ar, struct hif_scatter_req *);
190 int max_scat_entries;
191 int max_xfer_szper_scatreq;
192};
193
194struct hif_scatter_req_priv {
195 struct bus_request *busrequest;
196 struct scatterlist sgentries[MAX_SCATTER_ENTRIES_PER_REQ];
197};
198
199struct ath6kl_hif_ops {
200 int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
201 u32 len, u32 request);
202 int (*write_async)(struct ath6kl *ar, u32 address, u8 *buffer,
203 u32 length, u32 request, struct htc_packet *packet);
204
205 void (*irq_enable)(struct ath6kl *ar);
206 void (*irq_disable)(struct ath6kl *ar);
207
208 struct hif_scatter_req *(*scatter_req_get)(struct ath6kl *ar);
209 void (*scatter_req_add)(struct ath6kl *ar,
210 struct hif_scatter_req *s_req);
211 int (*enable_scatter)(struct ath6kl *ar,
212 struct hif_dev_scat_sup_info *info);
213 void (*cleanup_scatter)(struct ath6kl *ar);
214};
215
216#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
new file mode 100644
index 000000000000..95c47bbd1d78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -0,0 +1,2466 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "htc_hif.h"
19#include "debug.h"
20#include "hif-ops.h"
21#include <asm/unaligned.h>
22
23#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
24
25static void htc_prep_send_pkt(struct htc_packet *packet, u8 flags, int ctrl0,
26 int ctrl1)
27{
28 struct htc_frame_hdr *hdr;
29
30 packet->buf -= HTC_HDR_LENGTH;
31 hdr = (struct htc_frame_hdr *)packet->buf;
32
33 /* Endianess? */
34 put_unaligned((u16)packet->act_len, &hdr->payld_len);
35 hdr->flags = flags;
36 hdr->eid = packet->endpoint;
37 hdr->ctrl[0] = ctrl0;
38 hdr->ctrl[1] = ctrl1;
39}
40
41static void htc_reclaim_txctrl_buf(struct htc_target *target,
42 struct htc_packet *pkt)
43{
44 spin_lock_bh(&target->htc_lock);
45 list_add_tail(&pkt->list, &target->free_ctrl_txbuf);
46 spin_unlock_bh(&target->htc_lock);
47}
48
49static struct htc_packet *htc_get_control_buf(struct htc_target *target,
50 bool tx)
51{
52 struct htc_packet *packet = NULL;
53 struct list_head *buf_list;
54
55 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf;
56
57 spin_lock_bh(&target->htc_lock);
58
59 if (list_empty(buf_list)) {
60 spin_unlock_bh(&target->htc_lock);
61 return NULL;
62 }
63
64 packet = list_first_entry(buf_list, struct htc_packet, list);
65 list_del(&packet->list);
66 spin_unlock_bh(&target->htc_lock);
67
68 if (tx)
69 packet->buf = packet->buf_start + HTC_HDR_LENGTH;
70
71 return packet;
72}
73
74static void htc_tx_comp_update(struct htc_target *target,
75 struct htc_endpoint *endpoint,
76 struct htc_packet *packet)
77{
78 packet->completion = NULL;
79 packet->buf += HTC_HDR_LENGTH;
80
81 if (!packet->status)
82 return;
83
84 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n",
85 packet->status, packet->endpoint, packet->act_len,
86 packet->info.tx.cred_used);
87
88 /* on failure to submit, reclaim credits for this packet */
89 spin_lock_bh(&target->tx_lock);
90 endpoint->cred_dist.cred_to_dist +=
91 packet->info.tx.cred_used;
92 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
93
94 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
95 target->cred_dist_cntxt, &target->cred_dist_list);
96
97 ath6k_credit_distribute(target->cred_dist_cntxt,
98 &target->cred_dist_list,
99 HTC_CREDIT_DIST_SEND_COMPLETE);
100
101 spin_unlock_bh(&target->tx_lock);
102}
103
104static void htc_tx_complete(struct htc_endpoint *endpoint,
105 struct list_head *txq)
106{
107 if (list_empty(txq))
108 return;
109
110 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
111 "send complete ep %d, (%d pkts)\n",
112 endpoint->eid, get_queue_depth(txq));
113
114 ath6kl_tx_complete(endpoint->target->dev->ar, txq);
115}
116
117static void htc_tx_comp_handler(struct htc_target *target,
118 struct htc_packet *packet)
119{
120 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
121 struct list_head container;
122
123 htc_tx_comp_update(target, endpoint, packet);
124 INIT_LIST_HEAD(&container);
125 list_add_tail(&packet->list, &container);
126 /* do completion */
127 htc_tx_complete(endpoint, &container);
128}
129
130static void htc_async_tx_scat_complete(struct hif_scatter_req *scat_req)
131{
132 struct htc_endpoint *endpoint = scat_req->ep;
133 struct htc_target *target = endpoint->target;
134 struct htc_packet *packet;
135 struct list_head tx_compq;
136 int i;
137
138 INIT_LIST_HEAD(&tx_compq);
139
140 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
141 "htc_async_tx_scat_complete total len: %d entries: %d\n",
142 scat_req->len, scat_req->scat_entries);
143
144 if (scat_req->status)
145 ath6kl_err("send scatter req failed: %d\n", scat_req->status);
146
147 /* walk through the scatter list and process */
148 for (i = 0; i < scat_req->scat_entries; i++) {
149 packet = scat_req->scat_list[i].packet;
150 if (!packet) {
151 WARN_ON(1);
152 return;
153 }
154
155 packet->status = scat_req->status;
156 htc_tx_comp_update(target, endpoint, packet);
157 list_add_tail(&packet->list, &tx_compq);
158 }
159
160 /* free scatter request */
161 hif_scatter_req_add(target->dev->ar, scat_req);
162
163 /* complete all packets */
164 htc_tx_complete(endpoint, &tx_compq);
165}
166
167static int htc_issue_send(struct htc_target *target, struct htc_packet *packet)
168{
169 int status;
170 bool sync = false;
171 u32 padded_len, send_len;
172
173 if (!packet->completion)
174 sync = true;
175
176 send_len = packet->act_len + HTC_HDR_LENGTH;
177
178 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
179 __func__, send_len, sync ? "sync" : "async");
180
181 padded_len = CALC_TXRX_PADDED_LEN(target->dev, send_len);
182
183 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
184 "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
185 padded_len,
186 target->dev->ar->mbox_info.htc_addr,
187 sync ? "sync" : "async");
188
189 if (sync) {
190 status = hif_read_write_sync(target->dev->ar,
191 target->dev->ar->mbox_info.htc_addr,
192 packet->buf, padded_len,
193 HIF_WR_SYNC_BLOCK_INC);
194
195 packet->status = status;
196 packet->buf += HTC_HDR_LENGTH;
197 } else
198 status = hif_write_async(target->dev->ar,
199 target->dev->ar->mbox_info.htc_addr,
200 packet->buf, padded_len,
201 HIF_WR_ASYNC_BLOCK_INC, packet);
202
203 return status;
204}
205
206static int htc_check_credits(struct htc_target *target,
207 struct htc_endpoint *ep, u8 *flags,
208 enum htc_endpoint_id eid, unsigned int len,
209 int *req_cred)
210{
211
212 *req_cred = (len > target->tgt_cred_sz) ?
213 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
214
215 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
216 *req_cred, ep->cred_dist.credits);
217
218 if (ep->cred_dist.credits < *req_cred) {
219 if (eid == ENDPOINT_0)
220 return -EINVAL;
221
222 /* Seek more credits */
223 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
224
225 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
226 target->cred_dist_cntxt, &ep->cred_dist);
227
228 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
229
230 ep->cred_dist.seek_cred = 0;
231
232 if (ep->cred_dist.credits < *req_cred) {
233 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
234 "not enough credits for ep %d - leaving packet in queue\n",
235 eid);
236 return -EINVAL;
237 }
238 }
239
240 ep->cred_dist.credits -= *req_cred;
241 ep->ep_st.cred_cosumd += *req_cred;
242
243 /* When we are getting low on credits, ask for more */
244 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
245 ep->cred_dist.seek_cred =
246 ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
247
248 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
249 target->cred_dist_cntxt, &ep->cred_dist);
250
251 ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
252
253 /* see if we were successful in getting more */
254 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
255 /* tell the target we need credits ASAP! */
256 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
257 ep->ep_st.cred_low_indicate += 1;
258 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
259 }
260 }
261
262 return 0;
263}
264
265static void htc_tx_pkts_get(struct htc_target *target,
266 struct htc_endpoint *endpoint,
267 struct list_head *queue)
268{
269 int req_cred;
270 u8 flags;
271 struct htc_packet *packet;
272 unsigned int len;
273
274 while (true) {
275
276 flags = 0;
277
278 if (list_empty(&endpoint->txq))
279 break;
280 packet = list_first_entry(&endpoint->txq, struct htc_packet,
281 list);
282
283 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
284 "got head pkt:0x%p , queue depth: %d\n",
285 packet, get_queue_depth(&endpoint->txq));
286
287 len = CALC_TXRX_PADDED_LEN(target->dev,
288 packet->act_len + HTC_HDR_LENGTH);
289
290 if (htc_check_credits(target, endpoint, &flags,
291 packet->endpoint, len, &req_cred))
292 break;
293
294 /* now we can fully move onto caller's queue */
295 packet = list_first_entry(&endpoint->txq, struct htc_packet,
296 list);
297 list_move_tail(&packet->list, queue);
298
299 /* save the number of credits this packet consumed */
300 packet->info.tx.cred_used = req_cred;
301
302 /* all TX packets are handled asynchronously */
303 packet->completion = htc_tx_comp_handler;
304 packet->context = target;
305 endpoint->ep_st.tx_issued += 1;
306
307 /* save send flags */
308 packet->info.tx.flags = flags;
309 packet->info.tx.seqno = endpoint->seqno;
310 endpoint->seqno++;
311 }
312}
313
314/* See if the padded tx length falls on a credit boundary */
315static int htc_get_credit_padding(unsigned int cred_sz, int *len,
316 struct htc_endpoint *ep)
317{
318 int rem_cred, cred_pad;
319
320 rem_cred = *len % cred_sz;
321
322 /* No padding needed */
323 if (!rem_cred)
324 return 0;
325
326 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN))
327 return -1;
328
329 /*
330 * The transfer consumes a "partial" credit, this
331 * packet cannot be bundled unless we add
332 * additional "dummy" padding (max 255 bytes) to
333 * consume the entire credit.
334 */
335 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
336
337 if ((cred_pad > 0) && (cred_pad <= 255))
338 *len += cred_pad;
339 else
340 /* The amount of padding is too large, send as non-bundled */
341 return -1;
342
343 return cred_pad;
344}
345
346static int htc_setup_send_scat_list(struct htc_target *target,
347 struct htc_endpoint *endpoint,
348 struct hif_scatter_req *scat_req,
349 int n_scat,
350 struct list_head *queue)
351{
352 struct htc_packet *packet;
353 int i, len, rem_scat, cred_pad;
354 int status = 0;
355
356 rem_scat = target->dev->max_tx_bndl_sz;
357
358 for (i = 0; i < n_scat; i++) {
359 scat_req->scat_list[i].packet = NULL;
360
361 if (list_empty(queue))
362 break;
363
364 packet = list_first_entry(queue, struct htc_packet, list);
365 len = CALC_TXRX_PADDED_LEN(target->dev,
366 packet->act_len + HTC_HDR_LENGTH);
367
368 cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
369 &len, endpoint);
370 if (cred_pad < 0) {
371 status = -EINVAL;
372 break;
373 }
374
375 if (rem_scat < len) {
376 /* exceeds what we can transfer */
377 status = -ENOSPC;
378 break;
379 }
380
381 rem_scat -= len;
382 /* now remove it from the queue */
383 packet = list_first_entry(queue, struct htc_packet, list);
384 list_del(&packet->list);
385
386 scat_req->scat_list[i].packet = packet;
387 /* prepare packet and flag message as part of a send bundle */
388 htc_prep_send_pkt(packet,
389 packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE,
390 cred_pad, packet->info.tx.seqno);
391 scat_req->scat_list[i].buf = packet->buf;
392 scat_req->scat_list[i].len = len;
393
394 scat_req->len += len;
395 scat_req->scat_entries++;
396 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
397 "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
398 i, packet, len, rem_scat);
399 }
400
401 /* Roll back scatter setup in case of any failure */
402 if (status || (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
403 for (i = scat_req->scat_entries - 1; i >= 0; i--) {
404 packet = scat_req->scat_list[i].packet;
405 if (packet) {
406 packet->buf += HTC_HDR_LENGTH;
407 list_add(&packet->list, queue);
408 }
409 }
410 return -EINVAL;
411 }
412
413 return 0;
414}
415
416/*
417 * htc_issue_send_bundle: drain a queue and send as bundles
418 * this function may return without fully draining the queue
419 * when
420 *
421 * 1. scatter resources are exhausted
422 * 2. a message that will consume a partial credit will stop the
423 * bundling process early
424 * 3. we drop below the minimum number of messages for a bundle
425 */
426static void htc_issue_send_bundle(struct htc_endpoint *endpoint,
427 struct list_head *queue,
428 int *sent_bundle, int *n_bundle_pkts)
429{
430 struct htc_target *target = endpoint->target;
431 struct hif_scatter_req *scat_req = NULL;
432 struct hif_dev_scat_sup_info hif_info;
433 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
434
435 hif_info = target->dev->hif_scat_info;
436
437 while (true) {
438 n_scat = get_queue_depth(queue);
439 n_scat = min(n_scat, target->msg_per_bndl_max);
440
441 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE)
442 /* not enough to bundle */
443 break;
444
445 scat_req = hif_scatter_req_get(target->dev->ar);
446
447 if (!scat_req) {
448 /* no scatter resources */
449 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
450 "no more scatter resources\n");
451 break;
452 }
453
454 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
455 n_scat);
456
457 scat_req->len = 0;
458 scat_req->scat_entries = 0;
459
460 if (htc_setup_send_scat_list(target, endpoint, scat_req,
461 n_scat, queue)) {
462 hif_scatter_req_add(target->dev->ar, scat_req);
463 break;
464 }
465
466 /* send path is always asynchronous */
467 scat_req->complete = htc_async_tx_scat_complete;
468 scat_req->ep = endpoint;
469 n_sent_bundle++;
470 tot_pkts_bundle += scat_req->scat_entries;
471
472 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
473 "send scatter total bytes: %d , entries: %d\n",
474 scat_req->len, scat_req->scat_entries);
475 ath6kldev_submit_scat_req(target->dev, scat_req, false);
476 }
477
478 *sent_bundle = n_sent_bundle;
479 *n_bundle_pkts = tot_pkts_bundle;
480 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_issue_send_bundle (sent:%d)\n",
481 n_sent_bundle);
482
483 return;
484}
485
486static void htc_tx_from_ep_txq(struct htc_target *target,
487 struct htc_endpoint *endpoint)
488{
489 struct list_head txq;
490 struct htc_packet *packet;
491 int bundle_sent;
492 int n_pkts_bundle;
493
494 spin_lock_bh(&target->tx_lock);
495
496 endpoint->tx_proc_cnt++;
497 if (endpoint->tx_proc_cnt > 1) {
498 endpoint->tx_proc_cnt--;
499 spin_unlock_bh(&target->tx_lock);
500 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
501 return;
502 }
503
504 /*
505 * drain the endpoint TX queue for transmission as long
506 * as we have enough credits.
507 */
508 INIT_LIST_HEAD(&txq);
509
510 while (true) {
511
512 if (list_empty(&endpoint->txq))
513 break;
514
515 htc_tx_pkts_get(target, endpoint, &txq);
516
517 if (list_empty(&txq))
518 break;
519
520 spin_unlock_bh(&target->tx_lock);
521
522 bundle_sent = 0;
523 n_pkts_bundle = 0;
524
525 while (true) {
526 /* try to send a bundle on each pass */
527 if ((target->tx_bndl_enable) &&
528 (get_queue_depth(&txq) >=
529 HTC_MIN_HTC_MSGS_TO_BUNDLE)) {
530 int temp1 = 0, temp2 = 0;
531
532 htc_issue_send_bundle(endpoint, &txq,
533 &temp1, &temp2);
534 bundle_sent += temp1;
535 n_pkts_bundle += temp2;
536 }
537
538 if (list_empty(&txq))
539 break;
540
541 packet = list_first_entry(&txq, struct htc_packet,
542 list);
543 list_del(&packet->list);
544
545 htc_prep_send_pkt(packet, packet->info.tx.flags,
546 0, packet->info.tx.seqno);
547 htc_issue_send(target, packet);
548 }
549
550 spin_lock_bh(&target->tx_lock);
551
552 endpoint->ep_st.tx_bundles += bundle_sent;
553 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle;
554 }
555
556 endpoint->tx_proc_cnt = 0;
557 spin_unlock_bh(&target->tx_lock);
558}
559
560static bool htc_try_send(struct htc_target *target,
561 struct htc_endpoint *endpoint,
562 struct htc_packet *tx_pkt)
563{
564 struct htc_ep_callbacks ep_cb;
565 int txq_depth;
566 bool overflow = false;
567
568 ep_cb = endpoint->ep_cb;
569
570 spin_lock_bh(&target->tx_lock);
571 txq_depth = get_queue_depth(&endpoint->txq);
572 spin_unlock_bh(&target->tx_lock);
573
574 if (txq_depth >= endpoint->max_txq_depth)
575 overflow = true;
576
577 if (overflow)
578 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
579 "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
580 endpoint->eid, overflow, txq_depth,
581 endpoint->max_txq_depth);
582
583 if (overflow && ep_cb.tx_full) {
584 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
585 "indicating overflowed tx packet: 0x%p\n", tx_pkt);
586
587 if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
588 HTC_SEND_FULL_DROP) {
589 endpoint->ep_st.tx_dropped += 1;
590 return false;
591 }
592 }
593
594 spin_lock_bh(&target->tx_lock);
595 list_add_tail(&tx_pkt->list, &endpoint->txq);
596 spin_unlock_bh(&target->tx_lock);
597
598 htc_tx_from_ep_txq(target, endpoint);
599
600 return true;
601}
602
603static void htc_chk_ep_txq(struct htc_target *target)
604{
605 struct htc_endpoint *endpoint;
606 struct htc_endpoint_credit_dist *cred_dist;
607
608 /*
609 * Run through the credit distribution list to see if there are
610 * packets queued. NOTE: no locks need to be taken since the
611 * distribution list is not dynamic (cannot be re-ordered) and we
612 * are not modifying any state.
613 */
614 list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
615 endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
616
617 spin_lock_bh(&target->tx_lock);
618 if (!list_empty(&endpoint->txq)) {
619 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
620 "ep %d has %d credits and %d packets in tx queue\n",
621 cred_dist->endpoint,
622 endpoint->cred_dist.credits,
623 get_queue_depth(&endpoint->txq));
624 spin_unlock_bh(&target->tx_lock);
625 /*
626 * Try to start the stalled queue, this list is
627 * ordered by priority. If there are credits
628 * available the highest priority queue will get a
629 * chance to reclaim credits from lower priority
630 * ones.
631 */
632 htc_tx_from_ep_txq(target, endpoint);
633 spin_lock_bh(&target->tx_lock);
634 }
635 spin_unlock_bh(&target->tx_lock);
636 }
637}
638
639static int htc_setup_tx_complete(struct htc_target *target)
640{
641 struct htc_packet *send_pkt = NULL;
642 int status;
643
644 send_pkt = htc_get_control_buf(target, true);
645
646 if (!send_pkt)
647 return -ENOMEM;
648
649 if (target->htc_tgt_ver >= HTC_VERSION_2P1) {
650 struct htc_setup_comp_ext_msg *setup_comp_ext;
651 u32 flags = 0;
652
653 setup_comp_ext =
654 (struct htc_setup_comp_ext_msg *)send_pkt->buf;
655 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext));
656 setup_comp_ext->msg_id =
657 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
658
659 if (target->msg_per_bndl_max > 0) {
660 /* Indicate HTC bundling to the target */
661 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN;
662 setup_comp_ext->msg_per_rxbndl =
663 target->msg_per_bndl_max;
664 }
665
666 memcpy(&setup_comp_ext->flags, &flags,
667 sizeof(setup_comp_ext->flags));
668 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext,
669 sizeof(struct htc_setup_comp_ext_msg),
670 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
671
672 } else {
673 struct htc_setup_comp_msg *setup_comp;
674 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf;
675 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg));
676 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID);
677 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp,
678 sizeof(struct htc_setup_comp_msg),
679 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
680 }
681
682 /* we want synchronous operation */
683 send_pkt->completion = NULL;
684 htc_prep_send_pkt(send_pkt, 0, 0, 0);
685 status = htc_issue_send(target, send_pkt);
686
687 if (send_pkt != NULL)
688 htc_reclaim_txctrl_buf(target, send_pkt);
689
690 return status;
691}
692
693void htc_set_credit_dist(struct htc_target *target,
694 struct htc_credit_state_info *cred_dist_cntxt,
695 u16 srvc_pri_order[], int list_len)
696{
697 struct htc_endpoint *endpoint;
698 int i, ep;
699
700 target->cred_dist_cntxt = cred_dist_cntxt;
701
702 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
703 &target->cred_dist_list);
704
705 for (i = 0; i < list_len; i++) {
706 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) {
707 endpoint = &target->endpoint[ep];
708 if (endpoint->svc_id == srvc_pri_order[i]) {
709 list_add_tail(&endpoint->cred_dist.list,
710 &target->cred_dist_list);
711 break;
712 }
713 }
714 if (ep >= ENDPOINT_MAX) {
715 WARN_ON(1);
716 return;
717 }
718 }
719}
720
721int htc_tx(struct htc_target *target, struct htc_packet *packet)
722{
723 struct htc_endpoint *endpoint;
724 struct list_head queue;
725
726 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
727 "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
728 packet->endpoint, packet->buf, packet->act_len);
729
730 if (packet->endpoint >= ENDPOINT_MAX) {
731 WARN_ON(1);
732 return -EINVAL;
733 }
734
735 endpoint = &target->endpoint[packet->endpoint];
736
737 if (!htc_try_send(target, endpoint, packet)) {
738 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ?
739 -ECANCELED : -ENOSPC;
740 INIT_LIST_HEAD(&queue);
741 list_add(&packet->list, &queue);
742 htc_tx_complete(endpoint, &queue);
743 }
744
745 return 0;
746}
747
748/* flush endpoint TX queue */
749void htc_flush_txep(struct htc_target *target,
750 enum htc_endpoint_id eid, u16 tag)
751{
752 struct htc_packet *packet, *tmp_pkt;
753 struct list_head discard_q, container;
754 struct htc_endpoint *endpoint = &target->endpoint[eid];
755
756 if (!endpoint->svc_id) {
757 WARN_ON(1);
758 return;
759 }
760
761 /* initialize the discard queue */
762 INIT_LIST_HEAD(&discard_q);
763
764 spin_lock_bh(&target->tx_lock);
765
766 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) {
767 if ((tag == HTC_TX_PACKET_TAG_ALL) ||
768 (tag == packet->info.tx.tag))
769 list_move_tail(&packet->list, &discard_q);
770 }
771
772 spin_unlock_bh(&target->tx_lock);
773
774 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
775 packet->status = -ECANCELED;
776 list_del(&packet->list);
777 ath6kl_dbg(ATH6KL_DBG_TRC,
778 "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
779 packet, packet->act_len,
780 packet->endpoint, packet->info.tx.tag);
781
782 INIT_LIST_HEAD(&container);
783 list_add_tail(&packet->list, &container);
784 htc_tx_complete(endpoint, &container);
785 }
786
787}
788
789static void htc_flush_txep_all(struct htc_target *target)
790{
791 struct htc_endpoint *endpoint;
792 int i;
793
794 dump_cred_dist_stats(target);
795
796 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
797 endpoint = &target->endpoint[i];
798 if (endpoint->svc_id == 0)
799 /* not in use.. */
800 continue;
801 htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
802 }
803}
804
805void htc_indicate_activity_change(struct htc_target *target,
806 enum htc_endpoint_id eid, bool active)
807{
808 struct htc_endpoint *endpoint = &target->endpoint[eid];
809 bool dist = false;
810
811 if (endpoint->svc_id == 0) {
812 WARN_ON(1);
813 return;
814 }
815
816 spin_lock_bh(&target->tx_lock);
817
818 if (active) {
819 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) {
820 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE;
821 dist = true;
822 }
823 } else {
824 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) {
825 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE;
826 dist = true;
827 }
828 }
829
830 if (dist) {
831 endpoint->cred_dist.txq_depth =
832 get_queue_depth(&endpoint->txq);
833
834 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
835 target->cred_dist_cntxt, &target->cred_dist_list);
836
837 ath6k_credit_distribute(target->cred_dist_cntxt,
838 &target->cred_dist_list,
839 HTC_CREDIT_DIST_ACTIVITY_CHANGE);
840 }
841
842 spin_unlock_bh(&target->tx_lock);
843
844 if (dist && !active)
845 htc_chk_ep_txq(target);
846}
847
848/* HTC Rx */
849
850static inline void htc_update_rx_stats(struct htc_endpoint *endpoint,
851 int n_look_ahds)
852{
853 endpoint->ep_st.rx_pkts++;
854 if (n_look_ahds == 1)
855 endpoint->ep_st.rx_lkahds++;
856 else if (n_look_ahds > 1)
857 endpoint->ep_st.rx_bundle_lkahd++;
858}
859
860static inline bool htc_valid_rx_frame_len(struct htc_target *target,
861 enum htc_endpoint_id eid, int len)
862{
863 return (eid == target->dev->ar->ctrl_ep) ?
864 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE;
865}
866
867static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
868{
869 struct list_head queue;
870
871 INIT_LIST_HEAD(&queue);
872 list_add_tail(&packet->list, &queue);
873 return htc_add_rxbuf_multiple(target, &queue);
874}
875
876static void htc_reclaim_rxbuf(struct htc_target *target,
877 struct htc_packet *packet,
878 struct htc_endpoint *ep)
879{
880 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) {
881 htc_rxpkt_reset(packet);
882 packet->status = -ECANCELED;
883 ep->ep_cb.rx(ep->target, packet);
884 } else {
885 htc_rxpkt_reset(packet);
886 htc_add_rxbuf((void *)(target), packet);
887 }
888}
889
890static void reclaim_rx_ctrl_buf(struct htc_target *target,
891 struct htc_packet *packet)
892{
893 spin_lock_bh(&target->htc_lock);
894 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
895 spin_unlock_bh(&target->htc_lock);
896}
897
898static int dev_rx_pkt(struct htc_target *target, struct htc_packet *packet,
899 u32 rx_len)
900{
901 struct ath6kl_device *dev = target->dev;
902 u32 padded_len;
903 int status;
904
905 padded_len = CALC_TXRX_PADDED_LEN(dev, rx_len);
906
907 if (padded_len > packet->buf_len) {
908 ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
909 padded_len, rx_len, packet->buf_len);
910 return -ENOMEM;
911 }
912
913 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
914 "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
915 packet, packet->info.rx.exp_hdr,
916 padded_len, dev->ar->mbox_info.htc_addr, "sync");
917
918 status = hif_read_write_sync(dev->ar,
919 dev->ar->mbox_info.htc_addr,
920 packet->buf, padded_len,
921 HIF_RD_SYNC_BLOCK_FIX);
922
923 packet->status = status;
924
925 return status;
926}
927
928/*
929 * optimization for recv packets, we can indicate a
930 * "hint" that there are more single-packets to fetch
931 * on this endpoint.
932 */
933static void set_rxpkt_indication_flag(u32 lk_ahd,
934 struct htc_endpoint *endpoint,
935 struct htc_packet *packet)
936{
937 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
938
939 if (htc_hdr->eid == packet->endpoint) {
940 if (!list_empty(&endpoint->rx_bufq))
941 packet->info.rx.indicat_flags |=
942 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
943 }
944}
945
946static void chk_rx_water_mark(struct htc_endpoint *endpoint)
947{
948 struct htc_ep_callbacks ep_cb = endpoint->ep_cb;
949
950 if (ep_cb.rx_refill_thresh > 0) {
951 spin_lock_bh(&endpoint->target->rx_lock);
952 if (get_queue_depth(&endpoint->rx_bufq)
953 < ep_cb.rx_refill_thresh) {
954 spin_unlock_bh(&endpoint->target->rx_lock);
955 ep_cb.rx_refill(endpoint->target, endpoint->eid);
956 return;
957 }
958 spin_unlock_bh(&endpoint->target->rx_lock);
959 }
960}
961
962/* This function is called with rx_lock held */
963static int htc_setup_rxpkts(struct htc_target *target, struct htc_endpoint *ep,
964 u32 *lk_ahds, struct list_head *queue, int n_msg)
965{
966 struct htc_packet *packet;
967 /* FIXME: type of lk_ahds can't be right */
968 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds;
969 struct htc_ep_callbacks ep_cb;
970 int status = 0, j, full_len;
971 bool no_recycle;
972
973 full_len = CALC_TXRX_PADDED_LEN(target->dev,
974 le16_to_cpu(htc_hdr->payld_len) +
975 sizeof(*htc_hdr));
976
977 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
978 ath6kl_warn("Rx buffer requested with invalid length\n");
979 return -EINVAL;
980 }
981
982 ep_cb = ep->ep_cb;
983 for (j = 0; j < n_msg; j++) {
984
985 /*
986 * Reset flag, any packets allocated using the
987 * rx_alloc() API cannot be recycled on
988 * cleanup,they must be explicitly returned.
989 */
990 no_recycle = false;
991
992 if (ep_cb.rx_allocthresh &&
993 (full_len > ep_cb.rx_alloc_thresh)) {
994 ep->ep_st.rx_alloc_thresh_hit += 1;
995 ep->ep_st.rxalloc_thresh_byte +=
996 le16_to_cpu(htc_hdr->payld_len);
997
998 spin_unlock_bh(&target->rx_lock);
999 no_recycle = true;
1000
1001 packet = ep_cb.rx_allocthresh(ep->target, ep->eid,
1002 full_len);
1003 spin_lock_bh(&target->rx_lock);
1004 } else {
1005 /* refill handler is being used */
1006 if (list_empty(&ep->rx_bufq)) {
1007 if (ep_cb.rx_refill) {
1008 spin_unlock_bh(&target->rx_lock);
1009 ep_cb.rx_refill(ep->target, ep->eid);
1010 spin_lock_bh(&target->rx_lock);
1011 }
1012 }
1013
1014 if (list_empty(&ep->rx_bufq))
1015 packet = NULL;
1016 else {
1017 packet = list_first_entry(&ep->rx_bufq,
1018 struct htc_packet, list);
1019 list_del(&packet->list);
1020 }
1021 }
1022
1023 if (!packet) {
1024 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS;
1025 target->ep_waiting = ep->eid;
1026 return -ENOSPC;
1027 }
1028
1029 /* clear flags */
1030 packet->info.rx.rx_flags = 0;
1031 packet->info.rx.indicat_flags = 0;
1032 packet->status = 0;
1033
1034 if (no_recycle)
1035 /*
1036 * flag that these packets cannot be
1037 * recycled, they have to be returned to
1038 * the user
1039 */
1040 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
1041
1042 /* Caller needs to free this upon any failure */
1043 list_add_tail(&packet->list, queue);
1044
1045 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1046 status = -ECANCELED;
1047 break;
1048 }
1049
1050 if (j) {
1051 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
1052 packet->info.rx.exp_hdr = 0xFFFFFFFF;
1053 } else
1054 /* set expected look ahead */
1055 packet->info.rx.exp_hdr = *lk_ahds;
1056
1057 packet->act_len = le16_to_cpu(htc_hdr->payld_len) +
1058 HTC_HDR_LENGTH;
1059 }
1060
1061 return status;
1062}
1063
1064static int alloc_and_prep_rxpkts(struct htc_target *target,
1065 u32 lk_ahds[], int msg,
1066 struct htc_endpoint *endpoint,
1067 struct list_head *queue)
1068{
1069 int status = 0;
1070 struct htc_packet *packet, *tmp_pkt;
1071 struct htc_frame_hdr *htc_hdr;
1072 int i, n_msg;
1073
1074 spin_lock_bh(&target->rx_lock);
1075
1076 for (i = 0; i < msg; i++) {
1077
1078 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
1079
1080 if (htc_hdr->eid >= ENDPOINT_MAX) {
1081 ath6kl_err("invalid ep in look-ahead: %d\n",
1082 htc_hdr->eid);
1083 status = -ENOMEM;
1084 break;
1085 }
1086
1087 if (htc_hdr->eid != endpoint->eid) {
1088 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
1089 htc_hdr->eid, endpoint->eid, i);
1090 status = -ENOMEM;
1091 break;
1092 }
1093
1094 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
1095 ath6kl_err("payload len %d exceeds max htc : %d !\n",
1096 htc_hdr->payld_len,
1097 (u32) HTC_MAX_PAYLOAD_LENGTH);
1098 status = -ENOMEM;
1099 break;
1100 }
1101
1102 if (endpoint->svc_id == 0) {
1103 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
1104 status = -ENOMEM;
1105 break;
1106 }
1107
1108 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) {
1109 /*
1110 * HTC header indicates that every packet to follow
1111 * has the same padded length so that it can be
1112 * optimally fetched as a full bundle.
1113 */
1114 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
1115 HTC_FLG_RX_BNDL_CNT_S;
1116
1117 /* the count doesn't include the starter frame */
1118 n_msg++;
1119 if (n_msg > target->msg_per_bndl_max) {
1120 status = -ENOMEM;
1121 break;
1122 }
1123
1124 endpoint->ep_st.rx_bundle_from_hdr += 1;
1125 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1126 "htc hdr indicates :%d msg can be fetched as a bundle\n",
1127 n_msg);
1128 } else
1129 /* HTC header only indicates 1 message to fetch */
1130 n_msg = 1;
1131
1132 /* Setup packet buffers for each message */
1133 status = htc_setup_rxpkts(target, endpoint, &lk_ahds[i], queue,
1134 n_msg);
1135
1136 /*
1137 * This is due to unavailabilty of buffers to rx entire data.
1138 * Return no error so that free buffers from queue can be used
1139 * to receive partial data.
1140 */
1141 if (status == -ENOSPC) {
1142 spin_unlock_bh(&target->rx_lock);
1143 return 0;
1144 }
1145
1146 if (status)
1147 break;
1148 }
1149
1150 spin_unlock_bh(&target->rx_lock);
1151
1152 if (status) {
1153 list_for_each_entry_safe(packet, tmp_pkt, queue, list) {
1154 list_del(&packet->list);
1155 htc_reclaim_rxbuf(target, packet,
1156 &target->endpoint[packet->endpoint]);
1157 }
1158 }
1159
1160 return status;
1161}
1162
1163static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
1164{
1165 if (packets->endpoint != ENDPOINT_0) {
1166 WARN_ON(1);
1167 return;
1168 }
1169
1170 if (packets->status == -ECANCELED) {
1171 reclaim_rx_ctrl_buf(context, packets);
1172 return;
1173 }
1174
1175 if (packets->act_len > 0) {
1176 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
1177 packets->act_len + HTC_HDR_LENGTH);
1178
1179 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1180 "Unexpected ENDPOINT 0 Message",
1181 packets->buf - HTC_HDR_LENGTH,
1182 packets->act_len + HTC_HDR_LENGTH);
1183 }
1184
1185 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]);
1186}
1187
1188static void htc_proc_cred_rpt(struct htc_target *target,
1189 struct htc_credit_report *rpt,
1190 int n_entries,
1191 enum htc_endpoint_id from_ep)
1192{
1193 struct htc_endpoint *endpoint;
1194 int tot_credits = 0, i;
1195 bool dist = false;
1196
1197 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1198 "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
1199
1200 spin_lock_bh(&target->tx_lock);
1201
1202 for (i = 0; i < n_entries; i++, rpt++) {
1203 if (rpt->eid >= ENDPOINT_MAX) {
1204 WARN_ON(1);
1205 spin_unlock_bh(&target->tx_lock);
1206 return;
1207 }
1208
1209 endpoint = &target->endpoint[rpt->eid];
1210
1211 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
1212 rpt->eid, rpt->credits);
1213
1214 endpoint->ep_st.tx_cred_rpt += 1;
1215 endpoint->ep_st.cred_retnd += rpt->credits;
1216
1217 if (from_ep == rpt->eid) {
1218 /*
1219 * This credit report arrived on the same endpoint
1220 * indicating it arrived in an RX packet.
1221 */
1222 endpoint->ep_st.cred_from_rx += rpt->credits;
1223 endpoint->ep_st.cred_rpt_from_rx += 1;
1224 } else if (from_ep == ENDPOINT_0) {
1225 /* credit arrived on endpoint 0 as a NULL message */
1226 endpoint->ep_st.cred_from_ep0 += rpt->credits;
1227 endpoint->ep_st.cred_rpt_ep0 += 1;
1228 } else {
1229 endpoint->ep_st.cred_from_other += rpt->credits;
1230 endpoint->ep_st.cred_rpt_from_other += 1;
1231 }
1232
1233 if (ENDPOINT_0 == rpt->eid)
1234 /* always give endpoint 0 credits back */
1235 endpoint->cred_dist.credits += rpt->credits;
1236 else {
1237 endpoint->cred_dist.cred_to_dist += rpt->credits;
1238 dist = true;
1239 }
1240
1241 /*
1242 * Refresh tx depth for distribution function that will
1243 * recover these credits NOTE: this is only valid when
1244 * there are credits to recover!
1245 */
1246 endpoint->cred_dist.txq_depth =
1247 get_queue_depth(&endpoint->txq);
1248
1249 tot_credits += rpt->credits;
1250 }
1251
1252 ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
1253 "report indicated %d credits to distribute\n",
1254 tot_credits);
1255
1256 if (dist) {
1257 /*
1258 * This was a credit return based on a completed send
1259 * operations note, this is done with the lock held
1260 */
1261 ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
1262 target->cred_dist_cntxt, &target->cred_dist_list);
1263
1264 ath6k_credit_distribute(target->cred_dist_cntxt,
1265 &target->cred_dist_list,
1266 HTC_CREDIT_DIST_SEND_COMPLETE);
1267 }
1268
1269 spin_unlock_bh(&target->tx_lock);
1270
1271 if (tot_credits)
1272 htc_chk_ep_txq(target);
1273}
1274
1275static int htc_parse_trailer(struct htc_target *target,
1276 struct htc_record_hdr *record,
1277 u8 *record_buf, u32 *next_lk_ahds,
1278 enum htc_endpoint_id endpoint,
1279 int *n_lk_ahds)
1280{
1281 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt;
1282 struct htc_lookahead_report *lk_ahd;
1283 int len;
1284
1285 switch (record->rec_id) {
1286 case HTC_RECORD_CREDITS:
1287 len = record->len / sizeof(struct htc_credit_report);
1288 if (!len) {
1289 WARN_ON(1);
1290 return -EINVAL;
1291 }
1292
1293 htc_proc_cred_rpt(target,
1294 (struct htc_credit_report *) record_buf,
1295 len, endpoint);
1296 break;
1297 case HTC_RECORD_LOOKAHEAD:
1298 len = record->len / sizeof(*lk_ahd);
1299 if (!len) {
1300 WARN_ON(1);
1301 return -EINVAL;
1302 }
1303
1304 lk_ahd = (struct htc_lookahead_report *) record_buf;
1305 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
1306 && next_lk_ahds) {
1307
1308 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1309 "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
1310 lk_ahd->pre_valid, lk_ahd->post_valid);
1311
1312 /* look ahead bytes are valid, copy them over */
1313 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
1314
1315 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
1316 next_lk_ahds, 4);
1317
1318 *n_lk_ahds = 1;
1319 }
1320 break;
1321 case HTC_RECORD_LOOKAHEAD_BUNDLE:
1322 len = record->len / sizeof(*bundle_lkahd_rpt);
1323 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) {
1324 WARN_ON(1);
1325 return -EINVAL;
1326 }
1327
1328 if (next_lk_ahds) {
1329 int i;
1330
1331 bundle_lkahd_rpt =
1332 (struct htc_bundle_lkahd_rpt *) record_buf;
1333
1334 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
1335 record_buf, record->len);
1336
1337 for (i = 0; i < len; i++) {
1338 memcpy((u8 *)&next_lk_ahds[i],
1339 bundle_lkahd_rpt->lk_ahd, 4);
1340 bundle_lkahd_rpt++;
1341 }
1342
1343 *n_lk_ahds = i;
1344 }
1345 break;
1346 default:
1347 ath6kl_err("unhandled record: id:%d len:%d\n",
1348 record->rec_id, record->len);
1349 break;
1350 }
1351
1352 return 0;
1353
1354}
1355
1356static int htc_proc_trailer(struct htc_target *target,
1357 u8 *buf, int len, u32 *next_lk_ahds,
1358 int *n_lk_ahds, enum htc_endpoint_id endpoint)
1359{
1360 struct htc_record_hdr *record;
1361 int orig_len;
1362 int status;
1363 u8 *record_buf;
1364 u8 *orig_buf;
1365
1366 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
1367
1368 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", buf, len);
1369
1370 orig_buf = buf;
1371 orig_len = len;
1372 status = 0;
1373
1374 while (len > 0) {
1375
1376 if (len < sizeof(struct htc_record_hdr)) {
1377 status = -ENOMEM;
1378 break;
1379 }
1380 /* these are byte aligned structs */
1381 record = (struct htc_record_hdr *) buf;
1382 len -= sizeof(struct htc_record_hdr);
1383 buf += sizeof(struct htc_record_hdr);
1384
1385 if (record->len > len) {
1386 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
1387 record->len, record->rec_id, len);
1388 status = -ENOMEM;
1389 break;
1390 }
1391 record_buf = buf;
1392
1393 status = htc_parse_trailer(target, record, record_buf,
1394 next_lk_ahds, endpoint, n_lk_ahds);
1395
1396 if (status)
1397 break;
1398
1399 /* advance buffer past this record for next time around */
1400 buf += record->len;
1401 len -= record->len;
1402 }
1403
1404 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
1405 orig_buf, orig_len);
1406
1407 return status;
1408}
1409
1410static int htc_proc_rxhdr(struct htc_target *target,
1411 struct htc_packet *packet,
1412 u32 *next_lkahds, int *n_lkahds)
1413{
1414 int status = 0;
1415 u16 payload_len;
1416 u32 lk_ahd;
1417 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
1418
1419 if (n_lkahds != NULL)
1420 *n_lkahds = 0;
1421
1422 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", packet->buf,
1423 packet->act_len);
1424
1425 /*
1426 * NOTE: we cannot assume the alignment of buf, so we use the safe
1427 * macros to retrieve 16 bit fields.
1428 */
1429 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1430
1431 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd));
1432
1433 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) {
1434 /*
1435 * Refresh the expected header and the actual length as it
1436 * was unknown when this packet was grabbed as part of the
1437 * bundle.
1438 */
1439 packet->info.rx.exp_hdr = lk_ahd;
1440 packet->act_len = payload_len + HTC_HDR_LENGTH;
1441
1442 /* validate the actual header that was refreshed */
1443 if (packet->act_len > packet->buf_len) {
1444 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
1445 payload_len, lk_ahd);
1446 /*
1447 * Limit this to max buffer just to print out some
1448 * of the buffer.
1449 */
1450 packet->act_len = min(packet->act_len, packet->buf_len);
1451 status = -ENOMEM;
1452 goto fail_rx;
1453 }
1454
1455 if (packet->endpoint != htc_hdr->eid) {
1456 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
1457 htc_hdr->eid, packet->endpoint);
1458 status = -ENOMEM;
1459 goto fail_rx;
1460 }
1461 }
1462
1463 if (lk_ahd != packet->info.rx.exp_hdr) {
1464 ath6kl_err("htc_proc_rxhdr, lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
1465 packet, packet->info.rx.rx_flags);
1466 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
1467 &packet->info.rx.exp_hdr, 4);
1468 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
1469 (u8 *)&lk_ahd, sizeof(lk_ahd));
1470 status = -ENOMEM;
1471 goto fail_rx;
1472 }
1473
1474 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) {
1475 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) ||
1476 htc_hdr->ctrl[0] > payload_len) {
1477 ath6kl_err("htc_proc_rxhdr, invalid hdr (payload len should be :%d, CB[0] is:%d)\n",
1478 payload_len, htc_hdr->ctrl[0]);
1479 status = -ENOMEM;
1480 goto fail_rx;
1481 }
1482
1483 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) {
1484 next_lkahds = NULL;
1485 n_lkahds = NULL;
1486 }
1487
1488 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH
1489 + payload_len - htc_hdr->ctrl[0],
1490 htc_hdr->ctrl[0], next_lkahds,
1491 n_lkahds, packet->endpoint);
1492
1493 if (status)
1494 goto fail_rx;
1495
1496 packet->act_len -= htc_hdr->ctrl[0];
1497 }
1498
1499 packet->buf += HTC_HDR_LENGTH;
1500 packet->act_len -= HTC_HDR_LENGTH;
1501
1502fail_rx:
1503 if (status)
1504 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
1505 packet->buf,
1506 packet->act_len < 256 ? packet->act_len : 256);
1507 else {
1508 if (packet->act_len > 0)
1509 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
1510 "HTC - Application Msg",
1511 packet->buf, packet->act_len);
1512 }
1513
1514 return status;
1515}
1516
1517static void do_rx_completion(struct htc_endpoint *endpoint,
1518 struct htc_packet *packet)
1519{
1520 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1521 "htc calling ep %d recv callback on packet 0x%p\n",
1522 endpoint->eid, packet);
1523 endpoint->ep_cb.rx(endpoint->target, packet);
1524}
1525
1526static int htc_issue_rxpkt_bundle(struct htc_target *target,
1527 struct list_head *rxq,
1528 struct list_head *sync_compq,
1529 int *n_pkt_fetched, bool part_bundle)
1530{
1531 struct hif_scatter_req *scat_req;
1532 struct htc_packet *packet;
1533 int rem_space = target->dev->max_rx_bndl_sz;
1534 int n_scat_pkt, status = 0, i, len;
1535
1536 n_scat_pkt = get_queue_depth(rxq);
1537 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max);
1538
1539 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) {
1540 /*
1541 * We were forced to split this bundle receive operation
1542 * all packets in this partial bundle must have their
1543 * lookaheads ignored.
1544 */
1545 part_bundle = true;
1546
1547 /*
1548 * This would only happen if the target ignored our max
1549 * bundle limit.
1550 */
1551 ath6kl_warn("htc_issue_rxpkt_bundle : partial bundle detected num:%d , %d\n",
1552 get_queue_depth(rxq), n_scat_pkt);
1553 }
1554
1555 len = 0;
1556
1557 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1558 "htc_issue_rxpkt_bundle (numpackets: %d , actual : %d)\n",
1559 get_queue_depth(rxq), n_scat_pkt);
1560
1561 scat_req = hif_scatter_req_get(target->dev->ar);
1562
1563 if (scat_req == NULL)
1564 goto fail_rx_pkt;
1565
1566 scat_req->flags = 0;
1567
1568 if (part_bundle)
1569 scat_req->flags |=
1570 HTC_SCAT_REQ_FLG_PART_BNDL;
1571
1572 for (i = 0; i < n_scat_pkt; i++) {
1573 int pad_len;
1574
1575 packet = list_first_entry(rxq, struct htc_packet, list);
1576 list_del(&packet->list);
1577
1578 pad_len = CALC_TXRX_PADDED_LEN(target->dev,
1579 packet->act_len);
1580
1581 if ((rem_space - pad_len) < 0) {
1582 list_add(&packet->list, rxq);
1583 break;
1584 }
1585
1586 rem_space -= pad_len;
1587
1588 if (part_bundle || (i < (n_scat_pkt - 1)))
1589 /*
1590 * Packet 0..n-1 cannot be checked for look-aheads
1591 * since we are fetching a bundle the last packet
1592 * however can have it's lookahead used
1593 */
1594 packet->info.rx.rx_flags |=
1595 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1596
1597 /* NOTE: 1 HTC packet per scatter entry */
1598 scat_req->scat_list[i].buf = packet->buf;
1599 scat_req->scat_list[i].len = pad_len;
1600
1601 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE;
1602
1603 list_add_tail(&packet->list, sync_compq);
1604
1605 WARN_ON(!scat_req->scat_list[i].len);
1606 len += scat_req->scat_list[i].len;
1607 }
1608
1609 scat_req->len = len;
1610 scat_req->scat_entries = i;
1611
1612 status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
1613
1614 if (!status)
1615 *n_pkt_fetched = i;
1616
1617 /* free scatter request */
1618 hif_scatter_req_add(target->dev->ar, scat_req);
1619
1620fail_rx_pkt:
1621
1622 return status;
1623}
1624
1625static int htc_proc_fetched_rxpkts(struct htc_target *target,
1626 struct list_head *comp_pktq, u32 lk_ahds[],
1627 int *n_lk_ahd)
1628{
1629 struct htc_packet *packet, *tmp_pkt;
1630 struct htc_endpoint *ep;
1631 int status = 0;
1632
1633 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
1634 list_del(&packet->list);
1635 ep = &target->endpoint[packet->endpoint];
1636
1637 /* process header for each of the recv packet */
1638 status = htc_proc_rxhdr(target, packet, lk_ahds, n_lk_ahd);
1639 if (status)
1640 return status;
1641
1642 if (list_empty(comp_pktq)) {
1643 /*
1644 * Last packet's more packet flag is set
1645 * based on the lookahead.
1646 */
1647 if (*n_lk_ahd > 0)
1648 set_rxpkt_indication_flag(lk_ahds[0],
1649 ep, packet);
1650 } else
1651 /*
1652 * Packets in a bundle automatically have
1653 * this flag set.
1654 */
1655 packet->info.rx.indicat_flags |=
1656 HTC_RX_FLAGS_INDICATE_MORE_PKTS;
1657
1658 htc_update_rx_stats(ep, *n_lk_ahd);
1659
1660 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
1661 ep->ep_st.rx_bundl += 1;
1662
1663 do_rx_completion(ep, packet);
1664 }
1665
1666 return status;
1667}
1668
1669static int htc_fetch_rxpkts(struct htc_target *target,
1670 struct list_head *rx_pktq,
1671 struct list_head *comp_pktq)
1672{
1673 int fetched_pkts;
1674 bool part_bundle = false;
1675 int status = 0;
1676
1677 /* now go fetch the list of HTC packets */
1678 while (!list_empty(rx_pktq)) {
1679 fetched_pkts = 0;
1680
1681 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
1682 /*
1683 * There are enough packets to attempt a
1684 * bundle transfer and recv bundling is
1685 * allowed.
1686 */
1687 status = htc_issue_rxpkt_bundle(target, rx_pktq,
1688 comp_pktq,
1689 &fetched_pkts,
1690 part_bundle);
1691 if (status)
1692 return status;
1693
1694 if (!list_empty(rx_pktq))
1695 part_bundle = true;
1696 }
1697
1698 if (!fetched_pkts) {
1699 struct htc_packet *packet;
1700
1701 packet = list_first_entry(rx_pktq, struct htc_packet,
1702 list);
1703
1704 list_del(&packet->list);
1705
1706 /* fully synchronous */
1707 packet->completion = NULL;
1708
1709 if (!list_empty(rx_pktq))
1710 /*
1711 * look_aheads in all packet
1712 * except the last one in the
1713 * bundle must be ignored
1714 */
1715 packet->info.rx.rx_flags |=
1716 HTC_RX_PKT_IGNORE_LOOKAHEAD;
1717
1718 /* go fetch the packet */
1719 status = dev_rx_pkt(target, packet, packet->act_len);
1720 if (status)
1721 return status;
1722
1723 list_add_tail(&packet->list, comp_pktq);
1724 }
1725 }
1726
1727 return status;
1728}
1729
1730static int htc_rxmsg_pending_handler(struct htc_target *target,
1731 u32 msg_look_ahead[],
1732 int *num_pkts)
1733{
1734 struct htc_packet *packets, *tmp_pkt;
1735 struct htc_endpoint *endpoint;
1736 struct list_head rx_pktq, comp_pktq;
1737 int status = 0;
1738 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE];
1739 int num_look_ahead = 1;
1740 enum htc_endpoint_id id;
1741 int n_fetched = 0;
1742
1743 *num_pkts = 0;
1744
1745 /*
1746 * On first entry copy the look_aheads into our temp array for
1747 * processing
1748 */
1749 memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
1750
1751 while (true) {
1752
1753 /*
1754 * First lookahead sets the expected endpoint IDs for all
1755 * packets in a bundle.
1756 */
1757 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
1758 endpoint = &target->endpoint[id];
1759
1760 if (id >= ENDPOINT_MAX) {
1761 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
1762 id);
1763 status = -ENOMEM;
1764 break;
1765 }
1766
1767 INIT_LIST_HEAD(&rx_pktq);
1768 INIT_LIST_HEAD(&comp_pktq);
1769
1770 /*
1771 * Try to allocate as many HTC RX packets indicated by the
1772 * look_aheads.
1773 */
1774 status = alloc_and_prep_rxpkts(target, look_aheads,
1775 num_look_ahead, endpoint,
1776 &rx_pktq);
1777 if (status)
1778 break;
1779
1780 if (get_queue_depth(&rx_pktq) >= 2)
1781 /*
1782 * A recv bundle was detected, force IRQ status
1783 * re-check again
1784 */
1785 target->dev->chk_irq_status_cnt = 1;
1786
1787 n_fetched += get_queue_depth(&rx_pktq);
1788
1789 num_look_ahead = 0;
1790
1791 status = htc_fetch_rxpkts(target, &rx_pktq, &comp_pktq);
1792
1793 if (!status)
1794 chk_rx_water_mark(endpoint);
1795
1796 /* Process fetched packets */
1797 status = htc_proc_fetched_rxpkts(target, &comp_pktq,
1798 look_aheads, &num_look_ahead);
1799
1800 if (!num_look_ahead || status)
1801 break;
1802
1803 /*
1804 * For SYNCH processing, if we get here, we are running
1805 * through the loop again due to a detected lookahead. Set
1806 * flag that we should re-check IRQ status registers again
1807 * before leaving IRQ processing, this can net better
1808 * performance in high throughput situations.
1809 */
1810 target->dev->chk_irq_status_cnt = 1;
1811 }
1812
1813 if (status) {
1814 ath6kl_err("failed to get pending recv messages: %d\n",
1815 status);
1816 /*
1817 * Cleanup any packets we allocated but didn't use to
1818 * actually fetch any packets.
1819 */
1820 list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
1821 list_del(&packets->list);
1822 htc_reclaim_rxbuf(target, packets,
1823 &target->endpoint[packets->endpoint]);
1824 }
1825
1826 /* cleanup any packets in sync completion queue */
1827 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
1828 list_del(&packets->list);
1829 htc_reclaim_rxbuf(target, packets,
1830 &target->endpoint[packets->endpoint]);
1831 }
1832
1833 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1834 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
1835 ath6kldev_rx_control(target->dev, false);
1836 }
1837 }
1838
1839 /*
1840 * Before leaving, check to see if host ran out of buffers and
1841 * needs to stop the receiver.
1842 */
1843 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1844 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
1845 ath6kldev_rx_control(target->dev, false);
1846 }
1847 *num_pkts = n_fetched;
1848
1849 return status;
1850}
1851
1852/*
1853 * Synchronously wait for a control message from the target,
1854 * This function is used at initialization time ONLY. At init messages
1855 * on ENDPOINT 0 are expected.
1856 */
1857static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
1858{
1859 struct htc_packet *packet = NULL;
1860 struct htc_frame_hdr *htc_hdr;
1861 u32 look_ahead;
1862
1863 if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
1864 HTC_TARGET_RESPONSE_TIMEOUT))
1865 return NULL;
1866
1867 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1868 "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
1869
1870 htc_hdr = (struct htc_frame_hdr *)&look_ahead;
1871
1872 if (htc_hdr->eid != ENDPOINT_0)
1873 return NULL;
1874
1875 packet = htc_get_control_buf(target, false);
1876
1877 if (!packet)
1878 return NULL;
1879
1880 packet->info.rx.rx_flags = 0;
1881 packet->info.rx.exp_hdr = look_ahead;
1882 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH;
1883
1884 if (packet->act_len > packet->buf_len)
1885 goto fail_ctrl_rx;
1886
1887 /* we want synchronous operation */
1888 packet->completion = NULL;
1889
1890 /* get the message from the device, this will block */
1891 if (dev_rx_pkt(target, packet, packet->act_len))
1892 goto fail_ctrl_rx;
1893
1894 /* process receive header */
1895 packet->status = htc_proc_rxhdr(target, packet, NULL, NULL);
1896
1897 if (packet->status) {
1898 ath6kl_err("htc_wait_for_ctrl_msg, htc_proc_rxhdr failed (status = %d)\n",
1899 packet->status);
1900 goto fail_ctrl_rx;
1901 }
1902
1903 return packet;
1904
1905fail_ctrl_rx:
1906 if (packet != NULL) {
1907 htc_rxpkt_reset(packet);
1908 reclaim_rx_ctrl_buf(target, packet);
1909 }
1910
1911 return NULL;
1912}
1913
1914int htc_add_rxbuf_multiple(struct htc_target *target,
1915 struct list_head *pkt_queue)
1916{
1917 struct htc_endpoint *endpoint;
1918 struct htc_packet *first_pkt;
1919 bool rx_unblock = false;
1920 int status = 0, depth;
1921
1922 if (list_empty(pkt_queue))
1923 return -ENOMEM;
1924
1925 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list);
1926
1927 if (first_pkt->endpoint >= ENDPOINT_MAX)
1928 return status;
1929
1930 depth = get_queue_depth(pkt_queue);
1931
1932 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1933 "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
1934 first_pkt->endpoint, depth, first_pkt->buf_len);
1935
1936 endpoint = &target->endpoint[first_pkt->endpoint];
1937
1938 if (target->htc_flags & HTC_OP_STATE_STOPPING) {
1939 struct htc_packet *packet, *tmp_pkt;
1940
1941 /* walk through queue and mark each one canceled */
1942 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1943 packet->status = -ECANCELED;
1944 list_del(&packet->list);
1945 do_rx_completion(endpoint, packet);
1946 }
1947
1948 return status;
1949 }
1950
1951 spin_lock_bh(&target->rx_lock);
1952
1953 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq);
1954
1955 /* check if we are blocked waiting for a new buffer */
1956 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
1957 if (target->ep_waiting == first_pkt->endpoint) {
1958 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1959 "receiver was blocked on ep:%d, unblocking.\n",
1960 target->ep_waiting);
1961 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
1962 target->ep_waiting = ENDPOINT_MAX;
1963 rx_unblock = true;
1964 }
1965 }
1966
1967 spin_unlock_bh(&target->rx_lock);
1968
1969 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
1970 /* TODO : implement a buffer threshold count? */
1971 ath6kldev_rx_control(target->dev, true);
1972
1973 return status;
1974}
1975
1976void htc_flush_rx_buf(struct htc_target *target)
1977{
1978 struct htc_endpoint *endpoint;
1979 struct htc_packet *packet, *tmp_pkt;
1980 int i;
1981
1982 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1983 endpoint = &target->endpoint[i];
1984 if (!endpoint->svc_id)
1985 /* not in use.. */
1986 continue;
1987
1988 spin_lock_bh(&target->rx_lock);
1989 list_for_each_entry_safe(packet, tmp_pkt,
1990 &endpoint->rx_bufq, list) {
1991 list_del(&packet->list);
1992 spin_unlock_bh(&target->rx_lock);
1993 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
1994 "flushing rx pkt:0x%p, len:%d, ep:%d\n",
1995 packet, packet->buf_len,
1996 packet->endpoint);
1997 dev_kfree_skb(packet->pkt_cntxt);
1998 spin_lock_bh(&target->rx_lock);
1999 }
2000 spin_unlock_bh(&target->rx_lock);
2001 }
2002}
2003
2004int htc_conn_service(struct htc_target *target,
2005 struct htc_service_connect_req *conn_req,
2006 struct htc_service_connect_resp *conn_resp)
2007{
2008 struct htc_packet *rx_pkt = NULL;
2009 struct htc_packet *tx_pkt = NULL;
2010 struct htc_conn_service_resp *resp_msg;
2011 struct htc_conn_service_msg *conn_msg;
2012 struct htc_endpoint *endpoint;
2013 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX;
2014 unsigned int max_msg_sz = 0;
2015 int status = 0;
2016
2017 ath6kl_dbg(ATH6KL_DBG_TRC,
2018 "htc_conn_service, target:0x%p service id:0x%X\n",
2019 target, conn_req->svc_id);
2020
2021 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
2022 /* special case for pseudo control service */
2023 assigned_ep = ENDPOINT_0;
2024 max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
2025 } else {
2026 /* allocate a packet to send to the target */
2027 tx_pkt = htc_get_control_buf(target, true);
2028
2029 if (!tx_pkt)
2030 return -ENOMEM;
2031
2032 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf;
2033 memset(conn_msg, 0, sizeof(*conn_msg));
2034 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
2035 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
2036 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags);
2037
2038 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg,
2039 sizeof(*conn_msg) + conn_msg->svc_meta_len,
2040 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
2041
2042 /* we want synchronous operation */
2043 tx_pkt->completion = NULL;
2044 htc_prep_send_pkt(tx_pkt, 0, 0, 0);
2045 status = htc_issue_send(target, tx_pkt);
2046
2047 if (status)
2048 goto fail_tx;
2049
2050 /* wait for response */
2051 rx_pkt = htc_wait_for_ctrl_msg(target);
2052
2053 if (!rx_pkt) {
2054 status = -ENOMEM;
2055 goto fail_tx;
2056 }
2057
2058 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf;
2059
2060 if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID)
2061 || (rx_pkt->act_len < sizeof(*resp_msg))) {
2062 status = -ENOMEM;
2063 goto fail_tx;
2064 }
2065
2066 conn_resp->resp_code = resp_msg->status;
2067 /* check response status */
2068 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
2069 ath6kl_err("target failed service 0x%X connect request (status:%d)\n",
2070 resp_msg->svc_id, resp_msg->status);
2071 status = -ENOMEM;
2072 goto fail_tx;
2073 }
2074
2075 assigned_ep = (enum htc_endpoint_id)resp_msg->eid;
2076 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2077 }
2078
2079 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) {
2080 status = -ENOMEM;
2081 goto fail_tx;
2082 }
2083
2084 endpoint = &target->endpoint[assigned_ep];
2085 endpoint->eid = assigned_ep;
2086 if (endpoint->svc_id) {
2087 status = -ENOMEM;
2088 goto fail_tx;
2089 }
2090
2091 /* return assigned endpoint to caller */
2092 conn_resp->endpoint = assigned_ep;
2093 conn_resp->len_max = max_msg_sz;
2094
2095 /* setup the endpoint */
2096
2097 /* this marks the endpoint in use */
2098 endpoint->svc_id = conn_req->svc_id;
2099
2100 endpoint->max_txq_depth = conn_req->max_txq_depth;
2101 endpoint->len_max = max_msg_sz;
2102 endpoint->ep_cb = conn_req->ep_cb;
2103 endpoint->cred_dist.svc_id = conn_req->svc_id;
2104 endpoint->cred_dist.htc_rsvd = endpoint;
2105 endpoint->cred_dist.endpoint = assigned_ep;
2106 endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
2107
2108 if (conn_req->max_rxmsg_sz) {
2109 /*
2110 * Override cred_per_msg calculation, this optimizes
2111 * the credit-low indications since the host will actually
2112 * issue smaller messages in the Send path.
2113 */
2114 if (conn_req->max_rxmsg_sz > max_msg_sz) {
2115 status = -ENOMEM;
2116 goto fail_tx;
2117 }
2118 endpoint->cred_dist.cred_per_msg =
2119 conn_req->max_rxmsg_sz / target->tgt_cred_sz;
2120 } else
2121 endpoint->cred_dist.cred_per_msg =
2122 max_msg_sz / target->tgt_cred_sz;
2123
2124 if (!endpoint->cred_dist.cred_per_msg)
2125 endpoint->cred_dist.cred_per_msg = 1;
2126
2127 /* save local connection flags */
2128 endpoint->conn_flags = conn_req->flags;
2129
2130fail_tx:
2131 if (tx_pkt)
2132 htc_reclaim_txctrl_buf(target, tx_pkt);
2133
2134 if (rx_pkt) {
2135 htc_rxpkt_reset(rx_pkt);
2136 reclaim_rx_ctrl_buf(target, rx_pkt);
2137 }
2138
2139 return status;
2140}
2141
2142static void reset_ep_state(struct htc_target *target)
2143{
2144 struct htc_endpoint *endpoint;
2145 int i;
2146
2147 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
2148 endpoint = &target->endpoint[i];
2149 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist));
2150 endpoint->svc_id = 0;
2151 endpoint->len_max = 0;
2152 endpoint->max_txq_depth = 0;
2153 memset(&endpoint->ep_st, 0,
2154 sizeof(endpoint->ep_st));
2155 INIT_LIST_HEAD(&endpoint->rx_bufq);
2156 INIT_LIST_HEAD(&endpoint->txq);
2157 endpoint->target = target;
2158 }
2159
2160 /* reset distribution list */
2161 INIT_LIST_HEAD(&target->cred_dist_list);
2162}
2163
2164int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint)
2165{
2166 int num;
2167
2168 spin_lock_bh(&target->rx_lock);
2169 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
2170 spin_unlock_bh(&target->rx_lock);
2171 return num;
2172}
2173
2174static void htc_setup_msg_bndl(struct htc_target *target)
2175{
2176 struct hif_dev_scat_sup_info *scat_info = &target->dev->hif_scat_info;
2177
2178 /* limit what HTC can handle */
2179 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE,
2180 target->msg_per_bndl_max);
2181
2182 if (ath6kldev_setup_msg_bndl(target->dev, target->msg_per_bndl_max)) {
2183 target->msg_per_bndl_max = 0;
2184 return;
2185 }
2186
2187 /* limit bundle what the device layer can handle */
2188 target->msg_per_bndl_max = min(scat_info->max_scat_entries,
2189 target->msg_per_bndl_max);
2190
2191 ath6kl_dbg(ATH6KL_DBG_TRC,
2192 "htc bundling allowed. max msg per htc bundle: %d\n",
2193 target->msg_per_bndl_max);
2194
2195 /* Max rx bundle size is limited by the max tx bundle size */
2196 target->dev->max_rx_bndl_sz = scat_info->max_xfer_szper_scatreq;
2197 /* Max tx bundle size if limited by the extended mbox address range */
2198 target->dev->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
2199 scat_info->max_xfer_szper_scatreq);
2200
2201 ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
2202 target->dev->max_rx_bndl_sz, target->dev->max_tx_bndl_sz);
2203
2204 if (target->dev->max_tx_bndl_sz)
2205 target->tx_bndl_enable = true;
2206
2207 if (target->dev->max_rx_bndl_sz)
2208 target->rx_bndl_enable = true;
2209
2210 if ((target->tgt_cred_sz % target->dev->block_sz) != 0) {
2211 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n",
2212 target->tgt_cred_sz);
2213
2214 /*
2215 * Disallow send bundling since the credit size is
2216 * not aligned to a block size the I/O block
2217 * padding will spill into the next credit buffer
2218 * which is fatal.
2219 */
2220 target->tx_bndl_enable = false;
2221 }
2222}
2223
2224int htc_wait_target(struct htc_target *target)
2225{
2226 struct htc_packet *packet = NULL;
2227 struct htc_ready_ext_msg *rdy_msg;
2228 struct htc_service_connect_req connect;
2229 struct htc_service_connect_resp resp;
2230 int status;
2231
2232 /* we should be getting 1 control message that the target is ready */
2233 packet = htc_wait_for_ctrl_msg(target);
2234
2235 if (!packet)
2236 return -ENOMEM;
2237
2238 /* we controlled the buffer creation so it's properly aligned */
2239 rdy_msg = (struct htc_ready_ext_msg *)packet->buf;
2240
2241 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) ||
2242 (packet->act_len < sizeof(struct htc_ready_msg))) {
2243 status = -ENOMEM;
2244 goto fail_wait_target;
2245 }
2246
2247 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) {
2248 status = -ENOMEM;
2249 goto fail_wait_target;
2250 }
2251
2252 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
2253 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
2254
2255 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
2256 "target ready: credits: %d credit size: %d\n",
2257 target->tgt_creds, target->tgt_cred_sz);
2258
2259 /* check if this is an extended ready message */
2260 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) {
2261 /* this is an extended message */
2262 target->htc_tgt_ver = rdy_msg->htc_ver;
2263 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl;
2264 } else {
2265 /* legacy */
2266 target->htc_tgt_ver = HTC_VERSION_2P0;
2267 target->msg_per_bndl_max = 0;
2268 }
2269
2270 ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
2271 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
2272 target->htc_tgt_ver);
2273
2274 if (target->msg_per_bndl_max > 0)
2275 htc_setup_msg_bndl(target);
2276
2277 /* setup our pseudo HTC control endpoint connection */
2278 memset(&connect, 0, sizeof(connect));
2279 memset(&resp, 0, sizeof(resp));
2280 connect.ep_cb.rx = htc_ctrl_rx;
2281 connect.ep_cb.rx_refill = NULL;
2282 connect.ep_cb.tx_full = NULL;
2283 connect.max_txq_depth = NUM_CONTROL_BUFFERS;
2284 connect.svc_id = HTC_CTRL_RSVD_SVC;
2285
2286 /* connect fake service */
2287 status = htc_conn_service((void *)target, &connect, &resp);
2288
2289 if (status)
2290 ath6kl_hif_cleanup_scatter(target->dev->ar);
2291
2292fail_wait_target:
2293 if (packet) {
2294 htc_rxpkt_reset(packet);
2295 reclaim_rx_ctrl_buf(target, packet);
2296 }
2297
2298 return status;
2299}
2300
2301/*
2302 * Start HTC, enable interrupts and let the target know
2303 * host has finished setup.
2304 */
2305int htc_start(struct htc_target *target)
2306{
2307 struct htc_packet *packet;
2308 int status;
2309
2310 /* Disable interrupts at the chip level */
2311 ath6kldev_disable_intrs(target->dev);
2312
2313 target->htc_flags = 0;
2314 target->rx_st_flags = 0;
2315
2316 /* Push control receive buffers into htc control endpoint */
2317 while ((packet = htc_get_control_buf(target, false)) != NULL) {
2318 status = htc_add_rxbuf(target, packet);
2319 if (status)
2320 return status;
2321 }
2322
2323 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */
2324 ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
2325 target->tgt_creds);
2326
2327 dump_cred_dist_stats(target);
2328
2329 /* Indicate to the target of the setup completion */
2330 status = htc_setup_tx_complete(target);
2331
2332 if (status)
2333 return status;
2334
2335 /* unmask interrupts */
2336 status = ath6kldev_unmask_intrs(target->dev);
2337
2338 if (status)
2339 htc_stop(target);
2340
2341 return status;
2342}
2343
2344/* htc_stop: stop interrupt reception, and flush all queued buffers */
2345void htc_stop(struct htc_target *target)
2346{
2347 spin_lock_bh(&target->htc_lock);
2348 target->htc_flags |= HTC_OP_STATE_STOPPING;
2349 spin_unlock_bh(&target->htc_lock);
2350
2351 /*
2352 * Masking interrupts is a synchronous operation, when this
2353 * function returns all pending HIF I/O has completed, we can
2354 * safely flush the queues.
2355 */
2356 ath6kldev_mask_intrs(target->dev);
2357
2358 htc_flush_txep_all(target);
2359
2360 htc_flush_rx_buf(target);
2361
2362 reset_ep_state(target);
2363}
2364
2365void *htc_create(struct ath6kl *ar)
2366{
2367 struct htc_target *target = NULL;
2368 struct htc_packet *packet;
2369 int status = 0, i = 0;
2370 u32 block_size, ctrl_bufsz;
2371
2372 target = kzalloc(sizeof(*target), GFP_KERNEL);
2373 if (!target) {
2374 ath6kl_err("unable to allocate memory\n");
2375 return NULL;
2376 }
2377
2378 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
2379 if (!target->dev) {
2380 ath6kl_err("unable to allocate memory\n");
2381 status = -ENOMEM;
2382 goto fail_create_htc;
2383 }
2384
2385 spin_lock_init(&target->htc_lock);
2386 spin_lock_init(&target->rx_lock);
2387 spin_lock_init(&target->tx_lock);
2388
2389 INIT_LIST_HEAD(&target->free_ctrl_txbuf);
2390 INIT_LIST_HEAD(&target->free_ctrl_rxbuf);
2391 INIT_LIST_HEAD(&target->cred_dist_list);
2392
2393 target->dev->ar = ar;
2394 target->dev->htc_cnxt = target;
2395 target->dev->msg_pending = htc_rxmsg_pending_handler;
2396 target->ep_waiting = ENDPOINT_MAX;
2397
2398 reset_ep_state(target);
2399
2400 status = ath6kldev_setup(target->dev);
2401
2402 if (status)
2403 goto fail_create_htc;
2404
2405 block_size = ar->mbox_info.block_size;
2406
2407 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
2408 (block_size + HTC_HDR_LENGTH) :
2409 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
2410
2411 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
2412 packet = kzalloc(sizeof(*packet), GFP_KERNEL);
2413 if (!packet)
2414 break;
2415
2416 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
2417 if (!packet->buf_start) {
2418 kfree(packet);
2419 break;
2420 }
2421
2422 packet->buf_len = ctrl_bufsz;
2423 if (i < NUM_CONTROL_RX_BUFFERS) {
2424 packet->act_len = 0;
2425 packet->buf = packet->buf_start;
2426 packet->endpoint = ENDPOINT_0;
2427 list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
2428 } else
2429 list_add_tail(&packet->list, &target->free_ctrl_txbuf);
2430 }
2431
2432fail_create_htc:
2433 if (i != NUM_CONTROL_BUFFERS || status) {
2434 if (target) {
2435 htc_cleanup(target);
2436 target = NULL;
2437 }
2438 }
2439
2440 return target;
2441}
2442
2443/* cleanup the HTC instance */
2444void htc_cleanup(struct htc_target *target)
2445{
2446 struct htc_packet *packet, *tmp_packet;
2447
2448 ath6kl_hif_cleanup_scatter(target->dev->ar);
2449
2450 list_for_each_entry_safe(packet, tmp_packet,
2451 &target->free_ctrl_txbuf, list) {
2452 list_del(&packet->list);
2453 kfree(packet->buf_start);
2454 kfree(packet);
2455 }
2456
2457 list_for_each_entry_safe(packet, tmp_packet,
2458 &target->free_ctrl_rxbuf, list) {
2459 list_del(&packet->list);
2460 kfree(packet->buf_start);
2461 kfree(packet);
2462 }
2463
2464 kfree(target->dev);
2465 kfree(target);
2466}
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
new file mode 100644
index 000000000000..16fa7a84a231
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -0,0 +1,596 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_H
18#define HTC_H
19
20#include "common.h"
21
22/* frame header flags */
23
24/* send direction */
25#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
26#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
27
28/* receive direction */
29#define HTC_FLG_RX_UNUSED (1 << 0)
30#define HTC_FLG_RX_TRAILER (1 << 1)
31/* Bundle count maske and shift */
32#define HTC_FLG_RX_BNDL_CNT (0xF0)
33#define HTC_FLG_RX_BNDL_CNT_S 4
34
35#define HTC_HDR_LENGTH (sizeof(struct htc_frame_hdr))
36#define HTC_MAX_PAYLOAD_LENGTH (4096 - sizeof(struct htc_frame_hdr))
37
38/* HTC control message IDs */
39
40#define HTC_MSG_READY_ID 1
41#define HTC_MSG_CONN_SVC_ID 2
42#define HTC_MSG_CONN_SVC_RESP_ID 3
43#define HTC_MSG_SETUP_COMPLETE_ID 4
44#define HTC_MSG_SETUP_COMPLETE_EX_ID 5
45
46#define HTC_MAX_CTRL_MSG_LEN 256
47
48#define HTC_VERSION_2P0 0x00
49#define HTC_VERSION_2P1 0x01
50
51#define HTC_SERVICE_META_DATA_MAX_LENGTH 128
52
53#define HTC_CONN_FLGS_THRESH_LVL_QUAT 0x0
54#define HTC_CONN_FLGS_THRESH_LVL_HALF 0x1
55#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
56#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
57#define HTC_CONN_FLGS_THRESH_MASK 0x3
58
59/* connect response status codes */
60#define HTC_SERVICE_SUCCESS 0
61#define HTC_SERVICE_NOT_FOUND 1
62#define HTC_SERVICE_FAILED 2
63
64/* no resources (i.e. no more endpoints) */
65#define HTC_SERVICE_NO_RESOURCES 3
66
67/* specific service is not allowing any more endpoints */
68#define HTC_SERVICE_NO_MORE_EP 4
69
70/* report record IDs */
71#define HTC_RECORD_NULL 0
72#define HTC_RECORD_CREDITS 1
73#define HTC_RECORD_LOOKAHEAD 2
74#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
75
76#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
77
78#define MAKE_SERVICE_ID(group, index) \
79 (int)(((int)group << 8) | (int)(index))
80
81/* NOTE: service ID of 0x0000 is reserved and should never be used */
82#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
83#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
84#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
85#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
86#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
87#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
88#define WMI_MAX_SERVICES 5
89
90/* reserved and used to flush ALL packets */
91#define HTC_TX_PACKET_TAG_ALL 0
92#define HTC_SERVICE_TX_PACKET_TAG 1
93#define HTC_TX_PACKET_TAG_USER_DEFINED (HTC_SERVICE_TX_PACKET_TAG + 9)
94
95/* more packets on this endpoint are being fetched */
96#define HTC_RX_FLAGS_INDICATE_MORE_PKTS (1 << 0)
97
98/* TODO.. for BMI */
99#define ENDPOINT1 0
100/* TODO -remove me, but we have to fix BMI first */
101#define HTC_MAILBOX_NUM_MAX 4
102
103/* enable send bundle padding for this endpoint */
104#define HTC_FLGS_TX_BNDL_PAD_EN (1 << 0)
105#define HTC_EP_ACTIVE ((u32) (1u << 31))
106
107/* HTC operational parameters */
108#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
109#define HTC_TARGET_DEBUG_INTR_MASK 0x01
110#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
111
112#define HTC_HOST_MAX_MSG_PER_BUNDLE 8
113#define HTC_MIN_HTC_MSGS_TO_BUNDLE 2
114
115/* packet flags */
116
117#define HTC_RX_PKT_IGNORE_LOOKAHEAD (1 << 0)
118#define HTC_RX_PKT_REFRESH_HDR (1 << 1)
119#define HTC_RX_PKT_PART_OF_BUNDLE (1 << 2)
120#define HTC_RX_PKT_NO_RECYCLE (1 << 3)
121
122/* scatter request flags */
123
124#define HTC_SCAT_REQ_FLG_PART_BNDL (1 << 0)
125
126#define NUM_CONTROL_BUFFERS 8
127#define NUM_CONTROL_TX_BUFFERS 2
128#define NUM_CONTROL_RX_BUFFERS (NUM_CONTROL_BUFFERS - NUM_CONTROL_TX_BUFFERS)
129
130#define HTC_RECV_WAIT_BUFFERS (1 << 0)
131#define HTC_OP_STATE_STOPPING (1 << 0)
132
133/*
134 * The frame header length and message formats defined herein were selected
135 * to accommodate optimal alignment for target processing. This reduces
136 * code size and improves performance. Any changes to the header length may
137 * alter the alignment and cause exceptions on the target. When adding to
138 * the messagestructures insure that fields are properly aligned.
139 */
140
141/* HTC frame header
142 *
143 * NOTE: do not remove or re-arrange the fields, these are minimally
144 * required to take advantage of 4-byte lookaheads in some hardware
145 * implementations.
146 */
147struct htc_frame_hdr {
148 u8 eid;
149 u8 flags;
150
151 /* length of data (including trailer) that follows the header */
152 __le16 payld_len;
153
154 /* end of 4-byte lookahead */
155
156 u8 ctrl[2];
157} __packed;
158
159/* HTC ready message */
160struct htc_ready_msg {
161 __le16 msg_id;
162 __le16 cred_cnt;
163 __le16 cred_sz;
164 u8 max_ep;
165 u8 pad;
166} __packed;
167
168/* extended HTC ready message */
169struct htc_ready_ext_msg {
170 struct htc_ready_msg ver2_0_info;
171 u8 htc_ver;
172 u8 msg_per_htc_bndl;
173} __packed;
174
175/* connect service */
176struct htc_conn_service_msg {
177 __le16 msg_id;
178 __le16 svc_id;
179 __le16 conn_flags;
180 u8 svc_meta_len;
181 u8 pad;
182} __packed;
183
184/* connect response */
185struct htc_conn_service_resp {
186 __le16 msg_id;
187 __le16 svc_id;
188 u8 status;
189 u8 eid;
190 __le16 max_msg_sz;
191 u8 svc_meta_len;
192 u8 pad;
193} __packed;
194
195struct htc_setup_comp_msg {
196 __le16 msg_id;
197} __packed;
198
199/* extended setup completion message */
200struct htc_setup_comp_ext_msg {
201 __le16 msg_id;
202 __le32 flags;
203 u8 msg_per_rxbndl;
204 u8 Rsvd[3];
205} __packed;
206
207struct htc_record_hdr {
208 u8 rec_id;
209 u8 len;
210} __packed;
211
212struct htc_credit_report {
213 u8 eid;
214 u8 credits;
215} __packed;
216
217/*
218 * NOTE: The lk_ahd array is guarded by a pre_valid
219 * and Post Valid guard bytes. The pre_valid bytes must
220 * equal the inverse of the post_valid byte.
221 */
222struct htc_lookahead_report {
223 u8 pre_valid;
224 u8 lk_ahd[4];
225 u8 post_valid;
226} __packed;
227
228struct htc_bundle_lkahd_rpt {
229 u8 lk_ahd[4];
230} __packed;
231
232/* Current service IDs */
233
234enum htc_service_grp_ids {
235 RSVD_SERVICE_GROUP = 0,
236 WMI_SERVICE_GROUP = 1,
237
238 HTC_TEST_GROUP = 254,
239 HTC_SERVICE_GROUP_LAST = 255
240};
241
242/* ------ endpoint IDS ------ */
243
244enum htc_endpoint_id {
245 ENDPOINT_UNUSED = -1,
246 ENDPOINT_0 = 0,
247 ENDPOINT_1 = 1,
248 ENDPOINT_2 = 2,
249 ENDPOINT_3,
250 ENDPOINT_4,
251 ENDPOINT_5,
252 ENDPOINT_6,
253 ENDPOINT_7,
254 ENDPOINT_8,
255 ENDPOINT_MAX,
256};
257
258struct htc_tx_packet_info {
259 u16 tag;
260 int cred_used;
261 u8 flags;
262 int seqno;
263};
264
265struct htc_rx_packet_info {
266 u32 exp_hdr;
267 u32 rx_flags;
268 u32 indicat_flags;
269};
270
271struct htc_target;
272
273/* wrapper around endpoint-specific packets */
274struct htc_packet {
275 struct list_head list;
276
277 /* caller's per packet specific context */
278 void *pkt_cntxt;
279
280 /*
281 * the true buffer start , the caller can store the real
282 * buffer start here. In receive callbacks, the HTC layer
283 * sets buf to the start of the payload past the header.
284 * This field allows the caller to reset buf when it recycles
285 * receive packets back to HTC.
286 */
287 u8 *buf_start;
288
289 /*
290 * Pointer to the start of the buffer. In the transmit
291 * direction this points to the start of the payload. In the
292 * receive direction, however, the buffer when queued up
293 * points to the start of the HTC header but when returned
294 * to the caller points to the start of the payload
295 */
296 u8 *buf;
297 u32 buf_len;
298
299 /* actual length of payload */
300 u32 act_len;
301
302 /* endpoint that this packet was sent/recv'd from */
303 enum htc_endpoint_id endpoint;
304
305 /* completion status */
306
307 int status;
308 union {
309 struct htc_tx_packet_info tx;
310 struct htc_rx_packet_info rx;
311 } info;
312
313 void (*completion) (struct htc_target *, struct htc_packet *);
314 struct htc_target *context;
315};
316
317enum htc_send_full_action {
318 HTC_SEND_FULL_KEEP = 0,
319 HTC_SEND_FULL_DROP = 1,
320};
321
322struct htc_ep_callbacks {
323 void (*rx) (struct htc_target *, struct htc_packet *);
324 void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
325 enum htc_send_full_action (*tx_full) (struct htc_target *,
326 struct htc_packet *);
327 struct htc_packet *(*rx_allocthresh) (struct htc_target *,
328 enum htc_endpoint_id, int);
329 int rx_alloc_thresh;
330 int rx_refill_thresh;
331};
332
333/* service connection information */
334struct htc_service_connect_req {
335 u16 svc_id;
336 u16 conn_flags;
337 struct htc_ep_callbacks ep_cb;
338 int max_txq_depth;
339 u32 flags;
340 unsigned int max_rxmsg_sz;
341};
342
343/* service connection response information */
344struct htc_service_connect_resp {
345 u8 buf_len;
346 u8 act_len;
347 enum htc_endpoint_id endpoint;
348 unsigned int len_max;
349 u8 resp_code;
350};
351
352/* endpoint distributionstructure */
353struct htc_endpoint_credit_dist {
354 struct list_head list;
355
356 /* Service ID (set by HTC) */
357 u16 svc_id;
358
359 /* endpoint for this distributionstruct (set by HTC) */
360 enum htc_endpoint_id endpoint;
361
362 u32 dist_flags;
363
364 /*
365 * credits for normal operation, anything above this
366 * indicates the endpoint is over-subscribed.
367 */
368 int cred_norm;
369
370 /* floor for credit distribution */
371 int cred_min;
372
373 int cred_assngd;
374
375 /* current credits available */
376 int credits;
377
378 /*
379 * pending credits to distribute on this endpoint, this
380 * is set by HTC when credit reports arrive. The credit
381 * distribution functions sets this to zero when it distributes
382 * the credits.
383 */
384 int cred_to_dist;
385
386 /*
387 * the number of credits that the current pending TX packet needs
388 * to transmit. This is set by HTC when endpoint needs credits in
389 * order to transmit.
390 */
391 int seek_cred;
392
393 /* size in bytes of each credit */
394 int cred_sz;
395
396 /* credits required for a maximum sized messages */
397 int cred_per_msg;
398
399 /* reserved for HTC use */
400 void *htc_rsvd;
401
402 /*
403 * current depth of TX queue , i.e. messages waiting for credits
404 * This field is valid only when HTC_CREDIT_DIST_ACTIVITY_CHANGE
405 * or HTC_CREDIT_DIST_SEND_COMPLETE is indicated on an endpoint
406 * that has non-zero credits to recover.
407 */
408 int txq_depth;
409};
410
411/*
412 * credit distibution code that is passed into the distrbution function,
413 * there are mandatory and optional codes that must be handled
414 */
415enum htc_credit_dist_reason {
416 HTC_CREDIT_DIST_SEND_COMPLETE = 0,
417 HTC_CREDIT_DIST_ACTIVITY_CHANGE = 1,
418 HTC_CREDIT_DIST_SEEK_CREDITS,
419};
420
421struct htc_credit_state_info {
422 int total_avail_credits;
423 int cur_free_credits;
424 struct list_head lowestpri_ep_dist;
425};
426
427/* endpoint statistics */
428struct htc_endpoint_stats {
429 /*
430 * number of times the host set the credit-low flag in a send
431 * message on this endpoint
432 */
433 u32 cred_low_indicate;
434
435 u32 tx_issued;
436 u32 tx_pkt_bundled;
437 u32 tx_bundles;
438 u32 tx_dropped;
439
440 /* running count of total credit reports received for this endpoint */
441 u32 tx_cred_rpt;
442
443 /* credit reports received from this endpoint's RX packets */
444 u32 cred_rpt_from_rx;
445
446 /* credit reports received from RX packets of other endpoints */
447 u32 cred_rpt_from_other;
448
449 /* credit reports received from endpoint 0 RX packets */
450 u32 cred_rpt_ep0;
451
452 /* count of credits received via Rx packets on this endpoint */
453 u32 cred_from_rx;
454
455 /* count of credits received via another endpoint */
456 u32 cred_from_other;
457
458 /* count of credits received via another endpoint */
459 u32 cred_from_ep0;
460
461 /* count of consummed credits */
462 u32 cred_cosumd;
463
464 /* count of credits returned */
465 u32 cred_retnd;
466
467 u32 rx_pkts;
468
469 /* count of lookahead records found in Rx msg */
470 u32 rx_lkahds;
471
472 /* count of recv packets received in a bundle */
473 u32 rx_bundl;
474
475 /* count of number of bundled lookaheads */
476 u32 rx_bundle_lkahd;
477
478 /* count of the number of bundle indications from the HTC header */
479 u32 rx_bundle_from_hdr;
480
481 /* the number of times the recv allocation threshold was hit */
482 u32 rx_alloc_thresh_hit;
483
484 /* total number of bytes */
485 u32 rxalloc_thresh_byte;
486};
487
488struct htc_endpoint {
489 enum htc_endpoint_id eid;
490 u16 svc_id;
491 struct list_head txq;
492 struct list_head rx_bufq;
493 struct htc_endpoint_credit_dist cred_dist;
494 struct htc_ep_callbacks ep_cb;
495 int max_txq_depth;
496 int len_max;
497 int tx_proc_cnt;
498 int rx_proc_cnt;
499 struct htc_target *target;
500 u8 seqno;
501 u32 conn_flags;
502 struct htc_endpoint_stats ep_st;
503};
504
505struct htc_control_buffer {
506 struct htc_packet packet;
507 u8 *buf;
508};
509
510struct ath6kl_device;
511
512/* our HTC target state */
513struct htc_target {
514 struct htc_endpoint endpoint[ENDPOINT_MAX];
515 struct list_head cred_dist_list;
516 struct list_head free_ctrl_txbuf;
517 struct list_head free_ctrl_rxbuf;
518 struct htc_credit_state_info *cred_dist_cntxt;
519 int tgt_creds;
520 unsigned int tgt_cred_sz;
521 spinlock_t htc_lock;
522 spinlock_t rx_lock;
523 spinlock_t tx_lock;
524 struct ath6kl_device *dev;
525 u32 htc_flags;
526 u32 rx_st_flags;
527 enum htc_endpoint_id ep_waiting;
528 u8 htc_tgt_ver;
529
530 /* max messages per bundle for HTC */
531 int msg_per_bndl_max;
532
533 bool tx_bndl_enable;
534 int rx_bndl_enable;
535};
536
537void *htc_create(struct ath6kl *ar);
538void htc_set_credit_dist(struct htc_target *target,
539 struct htc_credit_state_info *cred_info,
540 u16 svc_pri_order[], int len);
541int htc_wait_target(struct htc_target *target);
542int htc_start(struct htc_target *target);
543int htc_conn_service(struct htc_target *target,
544 struct htc_service_connect_req *req,
545 struct htc_service_connect_resp *resp);
546int htc_tx(struct htc_target *target, struct htc_packet *packet);
547void htc_stop(struct htc_target *target);
548void htc_cleanup(struct htc_target *target);
549void htc_flush_txep(struct htc_target *target,
550 enum htc_endpoint_id endpoint, u16 tag);
551void htc_flush_rx_buf(struct htc_target *target);
552void htc_indicate_activity_change(struct htc_target *target,
553 enum htc_endpoint_id endpoint, bool active);
554int htc_get_rxbuf_num(struct htc_target *target, enum htc_endpoint_id endpoint);
555int htc_add_rxbuf_multiple(struct htc_target *target, struct list_head *pktq);
556
557static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
558 u8 *buf, unsigned int len,
559 enum htc_endpoint_id eid, u16 tag)
560{
561 packet->pkt_cntxt = context;
562 packet->buf = buf;
563 packet->act_len = len;
564 packet->endpoint = eid;
565 packet->info.tx.tag = tag;
566}
567
568static inline void htc_rxpkt_reset(struct htc_packet *packet)
569{
570 packet->buf = packet->buf_start;
571 packet->act_len = 0;
572}
573
574static inline void set_htc_rxpkt_info(struct htc_packet *packet, void *context,
575 u8 *buf, unsigned long len,
576 enum htc_endpoint_id eid)
577{
578 packet->pkt_cntxt = context;
579 packet->buf = buf;
580 packet->buf_start = buf;
581 packet->buf_len = len;
582 packet->endpoint = eid;
583}
584
585static inline int get_queue_depth(struct list_head *queue)
586{
587 struct list_head *tmp_list;
588 int depth = 0;
589
590 list_for_each(tmp_list, queue)
591 depth++;
592
593 return depth;
594}
595
596#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/htc_hif.c
new file mode 100644
index 000000000000..1bcaaec579c5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "target.h"
19#include "hif-ops.h"
20#include "htc_hif.h"
21#include "debug.h"
22
23#define MAILBOX_FOR_BLOCK_SIZE 1
24
25#define ATH6KL_TIME_QUANTUM 10 /* in ms */
26
27static void ath6kl_add_io_pkt(struct ath6kl_device *dev,
28 struct htc_packet *packet)
29{
30 spin_lock_bh(&dev->lock);
31 list_add_tail(&packet->list, &dev->reg_io);
32 spin_unlock_bh(&dev->lock);
33}
34
35static struct htc_packet *ath6kl_get_io_pkt(struct ath6kl_device *dev)
36{
37 struct htc_packet *packet = NULL;
38
39 spin_lock_bh(&dev->lock);
40 if (!list_empty(&dev->reg_io)) {
41 packet = list_first_entry(&dev->reg_io,
42 struct htc_packet, list);
43 list_del(&packet->list);
44 }
45 spin_unlock_bh(&dev->lock);
46
47 return packet;
48}
49
50static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
51{
52 u8 *buf;
53 int i;
54
55 buf = req->virt_dma_buf;
56
57 for (i = 0; i < req->scat_entries; i++) {
58
59 if (from_dma)
60 memcpy(req->scat_list[i].buf, buf,
61 req->scat_list[i].len);
62 else
63 memcpy(buf, req->scat_list[i].buf,
64 req->scat_list[i].len);
65
66 buf += req->scat_list[i].len;
67 }
68
69 return 0;
70}
71
72int ath6kldev_rw_comp_handler(void *context, int status)
73{
74 struct htc_packet *packet = context;
75
76 ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
77 "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
78 packet, status);
79
80 packet->status = status;
81 packet->completion(packet->context, packet);
82
83 return 0;
84}
85
86static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
87{
88 u32 dummy;
89 int status;
90
91 ath6kl_err("target debug interrupt\n");
92
93 ath6kl_target_failure(dev->ar);
94
95 /*
96 * read counter to clear the interrupt, the debug error interrupt is
97 * counter 0.
98 */
99 status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
100 (u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
101 if (status)
102 WARN_ON(1);
103
104 return status;
105}
106
107/* mailbox recv message polling */
108int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
109 int timeout)
110{
111 struct ath6kl_irq_proc_registers *rg;
112 int status = 0, i;
113 u8 htc_mbox = 1 << HTC_MAILBOX;
114
115 for (i = timeout / ATH6KL_TIME_QUANTUM; i > 0; i--) {
116 /* this is the standard HIF way, load the reg table */
117 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
118 (u8 *) &dev->irq_proc_reg,
119 sizeof(dev->irq_proc_reg),
120 HIF_RD_SYNC_BYTE_INC);
121
122 if (status) {
123 ath6kl_err("failed to read reg table\n");
124 return status;
125 }
126
127 /* check for MBOX data and valid lookahead */
128 if (dev->irq_proc_reg.host_int_status & htc_mbox) {
129 if (dev->irq_proc_reg.rx_lkahd_valid &
130 htc_mbox) {
131 /*
132 * Mailbox has a message and the look ahead
133 * is valid.
134 */
135 rg = &dev->irq_proc_reg;
136 *lk_ahd =
137 le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
138 break;
139 }
140 }
141
142 /* delay a little */
143 mdelay(ATH6KL_TIME_QUANTUM);
144 ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
145 }
146
147 if (i == 0) {
148 ath6kl_err("timeout waiting for recv message\n");
149 status = -ETIME;
150 /* check if the target asserted */
151 if (dev->irq_proc_reg.counter_int_status &
152 ATH6KL_TARGET_DEBUG_INTR_MASK)
153 /*
154 * Target failure handler will be called in case of
155 * an assert.
156 */
157 ath6kldev_proc_dbg_intr(dev);
158 }
159
160 return status;
161}
162
163/*
164 * Disable packet reception (used in case the host runs out of buffers)
165 * using the interrupt enable registers through the host I/F
166 */
167int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
168{
169 struct ath6kl_irq_enable_reg regs;
170 int status = 0;
171
172 /* take the lock to protect interrupt enable shadows */
173 spin_lock_bh(&dev->lock);
174
175 if (enable_rx)
176 dev->irq_en_reg.int_status_en |=
177 SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
178 else
179 dev->irq_en_reg.int_status_en &=
180 ~SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
181
182 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
183
184 spin_unlock_bh(&dev->lock);
185
186 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
187 &regs.int_status_en,
188 sizeof(struct ath6kl_irq_enable_reg),
189 HIF_WR_SYNC_BYTE_INC);
190
191 return status;
192}
193
194static void ath6kldev_rw_async_handler(struct htc_target *target,
195 struct htc_packet *packet)
196{
197 struct ath6kl_device *dev = target->dev;
198 struct hif_scatter_req *req = packet->pkt_cntxt;
199
200 req->status = packet->status;
201
202 ath6kl_add_io_pkt(dev, packet);
203
204 req->complete(req);
205}
206
207static int ath6kldev_rw_scatter(struct ath6kl *ar, struct hif_scatter_req *req)
208{
209 struct ath6kl_device *dev = ar->htc_target->dev;
210 struct htc_packet *packet = NULL;
211 int status = 0;
212 u32 request = req->req;
213 u8 *virt_dma_buf;
214
215 if (!req->len)
216 return 0;
217
218 if (request & HIF_ASYNCHRONOUS) {
219 /* use an I/O packet to carry this request */
220 packet = ath6kl_get_io_pkt(dev);
221 if (!packet) {
222 status = -ENOMEM;
223 goto out;
224 }
225
226 packet->pkt_cntxt = req;
227 packet->completion = ath6kldev_rw_async_handler;
228 packet->context = ar->htc_target;
229 }
230
231 virt_dma_buf = req->virt_dma_buf;
232
233 if (request & HIF_ASYNCHRONOUS)
234 status = hif_write_async(dev->ar, req->addr, virt_dma_buf,
235 req->len, request, packet);
236 else
237 status = hif_read_write_sync(dev->ar, req->addr, virt_dma_buf,
238 req->len, request);
239
240out:
241 if (status)
242 if (request & HIF_ASYNCHRONOUS) {
243 if (packet != NULL)
244 ath6kl_add_io_pkt(dev, packet);
245 req->status = status;
246 req->complete(req);
247 status = 0;
248 }
249
250 return status;
251}
252
253int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
254 struct hif_scatter_req *scat_req, bool read)
255{
256 int status = 0;
257
258 if (read) {
259 scat_req->req = HIF_RD_SYNC_BLOCK_FIX;
260 scat_req->addr = dev->ar->mbox_info.htc_addr;
261 } else {
262 scat_req->req = HIF_WR_ASYNC_BLOCK_INC;
263
264 scat_req->addr =
265 (scat_req->len > HIF_MBOX_WIDTH) ?
266 dev->ar->mbox_info.htc_ext_addr :
267 dev->ar->mbox_info.htc_addr;
268 }
269
270 ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
271 "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
272 scat_req->scat_entries, scat_req->len,
273 scat_req->addr, !read ? "async" : "sync",
274 (read) ? "rd" : "wr");
275
276 if (!read && dev->virt_scat)
277 status = ath6kldev_cp_scat_dma_buf(scat_req, false);
278
279 if (status) {
280 if (!read) {
281 scat_req->status = status;
282 scat_req->complete(scat_req);
283 return 0;
284 }
285 return status;
286 }
287
288 status = dev->hif_scat_info.rw_scat_func(dev->ar, scat_req);
289
290 if (read) {
291 /* in sync mode, we can touch the scatter request */
292 scat_req->status = status;
293 if (!status && dev->virt_scat)
294 scat_req->status =
295 ath6kldev_cp_scat_dma_buf(scat_req, true);
296 }
297
298 return status;
299}
300
301/*
302 * function to set up virtual scatter support if HIF
303 * layer has not implemented the interface.
304 */
305static int ath6kldev_setup_virt_scat_sup(struct ath6kl_device *dev)
306{
307 struct hif_scatter_req *scat_req;
308 int buf_sz, scat_req_sz, scat_list_sz;
309 int i, status = 0;
310 u8 *virt_dma_buf;
311
312 buf_sz = 2 * L1_CACHE_BYTES + ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
313
314 scat_list_sz = (ATH6KL_SCATTER_ENTRIES_PER_REQ - 1) *
315 sizeof(struct hif_scatter_item);
316 scat_req_sz = sizeof(*scat_req) + scat_list_sz;
317
318 for (i = 0; i < ATH6KL_SCATTER_REQS; i++) {
319 scat_req = kzalloc(scat_req_sz, GFP_KERNEL);
320
321 if (!scat_req) {
322 status = -ENOMEM;
323 break;
324 }
325
326 virt_dma_buf = kzalloc(buf_sz, GFP_KERNEL);
327 if (!virt_dma_buf) {
328 kfree(scat_req);
329 status = -ENOMEM;
330 break;
331 }
332
333 scat_req->virt_dma_buf =
334 (u8 *)L1_CACHE_ALIGN((unsigned long)virt_dma_buf);
335
336 /* we emulate a DMA bounce interface */
337 hif_scatter_req_add(dev->ar, scat_req);
338 }
339
340 if (status)
341 ath6kl_hif_cleanup_scatter(dev->ar);
342 else {
343 dev->hif_scat_info.rw_scat_func = ath6kldev_rw_scatter;
344 dev->hif_scat_info.max_scat_entries =
345 ATH6KL_SCATTER_ENTRIES_PER_REQ;
346 dev->hif_scat_info.max_xfer_szper_scatreq =
347 ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER;
348 dev->virt_scat = true;
349 }
350
351 return status;
352}
353
354int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_trans)
355{
356 int status;
357
358 status = ath6kl_hif_enable_scatter(dev->ar, &dev->hif_scat_info);
359
360 if (status) {
361 ath6kl_warn("hif does not support scatter requests (%d)\n",
362 status);
363
364 /* we can try to use a virtual DMA scatter mechanism */
365 status = ath6kldev_setup_virt_scat_sup(dev);
366 }
367
368 if (!status)
369 ath6kl_dbg(ATH6KL_DBG_ANY, "max scatter items:%d: maxlen:%d\n",
370 dev->hif_scat_info.max_scat_entries,
371 dev->hif_scat_info.max_xfer_szper_scatreq);
372
373 return status;
374}
375
376static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
377{
378 u8 counter_int_status;
379
380 ath6kl_dbg(ATH6KL_DBG_IRQ, "counter interrupt\n");
381
382 counter_int_status = dev->irq_proc_reg.counter_int_status &
383 dev->irq_en_reg.cntr_int_status_en;
384
385 ath6kl_dbg(ATH6KL_DBG_IRQ,
386 "valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
387 counter_int_status);
388
389 /*
390 * NOTE: other modules like GMBOX may use the counter interrupt for
391 * credit flow control on other counters, we only need to check for
392 * the debug assertion counter interrupt.
393 */
394 if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
395 return ath6kldev_proc_dbg_intr(dev);
396
397 return 0;
398}
399
400static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
401{
402 int status;
403 u8 error_int_status;
404 u8 reg_buf[4];
405
406 ath6kl_dbg(ATH6KL_DBG_IRQ, "error interrupt\n");
407
408 error_int_status = dev->irq_proc_reg.error_int_status & 0x0F;
409 if (!error_int_status) {
410 WARN_ON(1);
411 return -EIO;
412 }
413
414 ath6kl_dbg(ATH6KL_DBG_IRQ,
415 "valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
416 error_int_status);
417
418 if (MS(ERROR_INT_STATUS_WAKEUP, error_int_status))
419 ath6kl_dbg(ATH6KL_DBG_IRQ, "error : wakeup\n");
420
421 if (MS(ERROR_INT_STATUS_RX_UNDERFLOW, error_int_status))
422 ath6kl_err("rx underflow\n");
423
424 if (MS(ERROR_INT_STATUS_TX_OVERFLOW, error_int_status))
425 ath6kl_err("tx overflow\n");
426
427 /* Clear the interrupt */
428 dev->irq_proc_reg.error_int_status &= ~error_int_status;
429
430 /* set W1C value to clear the interrupt, this hits the register first */
431 reg_buf[0] = error_int_status;
432 reg_buf[1] = 0;
433 reg_buf[2] = 0;
434 reg_buf[3] = 0;
435
436 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
437 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
438
439 if (status)
440 WARN_ON(1);
441
442 return status;
443}
444
445static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
446{
447 int status;
448 u8 cpu_int_status;
449 u8 reg_buf[4];
450
451 ath6kl_dbg(ATH6KL_DBG_IRQ, "cpu interrupt\n");
452
453 cpu_int_status = dev->irq_proc_reg.cpu_int_status &
454 dev->irq_en_reg.cpu_int_status_en;
455 if (!cpu_int_status) {
456 WARN_ON(1);
457 return -EIO;
458 }
459
460 ath6kl_dbg(ATH6KL_DBG_IRQ,
461 "valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
462 cpu_int_status);
463
464 /* Clear the interrupt */
465 dev->irq_proc_reg.cpu_int_status &= ~cpu_int_status;
466
467 /*
468 * Set up the register transfer buffer to hit the register 4 times ,
469 * this is done to make the access 4-byte aligned to mitigate issues
470 * with host bus interconnects that restrict bus transfer lengths to
471 * be a multiple of 4-bytes.
472 */
473
474 /* set W1C value to clear the interrupt, this hits the register first */
475 reg_buf[0] = cpu_int_status;
476 /* the remaining are set to zero which have no-effect */
477 reg_buf[1] = 0;
478 reg_buf[2] = 0;
479 reg_buf[3] = 0;
480
481 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
482 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
483
484 if (status)
485 WARN_ON(1);
486
487 return status;
488}
489
490/* process pending interrupts synchronously */
491static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
492{
493 struct ath6kl_irq_proc_registers *rg;
494 int status = 0;
495 u8 host_int_status = 0;
496 u32 lk_ahd = 0;
497 u8 htc_mbox = 1 << HTC_MAILBOX;
498
499 ath6kl_dbg(ATH6KL_DBG_IRQ, "proc_pending_irqs: (dev: 0x%p)\n", dev);
500
501 /*
502 * NOTE: HIF implementation guarantees that the context of this
503 * call allows us to perform SYNCHRONOUS I/O, that is we can block,
504 * sleep or call any API that can block or switch thread/task
505 * contexts. This is a fully schedulable context.
506 */
507
508 /*
509 * Process pending intr only when int_status_en is clear, it may
510 * result in unnecessary bus transaction otherwise. Target may be
511 * unresponsive at the time.
512 */
513 if (dev->irq_en_reg.int_status_en) {
514 /*
515 * Read the first 28 bytes of the HTC register table. This
516 * will yield us the value of different int status
517 * registers and the lookahead registers.
518 *
519 * length = sizeof(int_status) + sizeof(cpu_int_status)
520 * + sizeof(error_int_status) +
521 * sizeof(counter_int_status) +
522 * sizeof(mbox_frame) + sizeof(rx_lkahd_valid)
523 * + sizeof(hole) + sizeof(rx_lkahd) +
524 * sizeof(int_status_en) +
525 * sizeof(cpu_int_status_en) +
526 * sizeof(err_int_status_en) +
527 * sizeof(cntr_int_status_en);
528 */
529 status = hif_read_write_sync(dev->ar, HOST_INT_STATUS_ADDRESS,
530 (u8 *) &dev->irq_proc_reg,
531 sizeof(dev->irq_proc_reg),
532 HIF_RD_SYNC_BYTE_INC);
533 if (status)
534 goto out;
535
536 if (AR_DBG_LVL_CHECK(ATH6KL_DBG_IRQ))
537 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
538 &dev->irq_en_reg);
539
540 /* Update only those registers that are enabled */
541 host_int_status = dev->irq_proc_reg.host_int_status &
542 dev->irq_en_reg.int_status_en;
543
544 /* Look at mbox status */
545 if (host_int_status & htc_mbox) {
546 /*
547 * Mask out pending mbox value, we use "lookAhead as
548 * the real flag for mbox processing.
549 */
550 host_int_status &= ~htc_mbox;
551 if (dev->irq_proc_reg.rx_lkahd_valid &
552 htc_mbox) {
553 rg = &dev->irq_proc_reg;
554 lk_ahd = le32_to_cpu(rg->rx_lkahd[HTC_MAILBOX]);
555 if (!lk_ahd)
556 ath6kl_err("lookAhead is zero!\n");
557 }
558 }
559 }
560
561 if (!host_int_status && !lk_ahd) {
562 *done = true;
563 goto out;
564 }
565
566 if (lk_ahd) {
567 int fetched = 0;
568
569 ath6kl_dbg(ATH6KL_DBG_IRQ,
570 "pending mailbox msg, lk_ahd: 0x%X\n", lk_ahd);
571 /*
572 * Mailbox Interrupt, the HTC layer may issue async
573 * requests to empty the mailbox. When emptying the recv
574 * mailbox we use the async handler above called from the
575 * completion routine of the callers read request. This can
576 * improve performance by reducing context switching when
577 * we rapidly pull packets.
578 */
579 status = dev->msg_pending(dev->htc_cnxt, &lk_ahd, &fetched);
580 if (status)
581 goto out;
582
583 if (!fetched)
584 /*
585 * HTC could not pull any messages out due to lack
586 * of resources.
587 */
588 dev->chk_irq_status_cnt = 0;
589 }
590
591 /* now handle the rest of them */
592 ath6kl_dbg(ATH6KL_DBG_IRQ,
593 "valid interrupt source(s) for other interrupts: 0x%x\n",
594 host_int_status);
595
596 if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
597 /* CPU Interrupt */
598 status = ath6kldev_proc_cpu_intr(dev);
599 if (status)
600 goto out;
601 }
602
603 if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
604 /* Error Interrupt */
605 status = ath6kldev_proc_err_intr(dev);
606 if (status)
607 goto out;
608 }
609
610 if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
611 /* Counter Interrupt */
612 status = ath6kldev_proc_counter_intr(dev);
613
614out:
615 /*
616 * An optimization to bypass reading the IRQ status registers
617 * unecessarily which can re-wake the target, if upper layers
618 * determine that we are in a low-throughput mode, we can rely on
619 * taking another interrupt rather than re-checking the status
620 * registers which can re-wake the target.
621 *
622 * NOTE : for host interfaces that makes use of detecting pending
623 * mbox messages at hif can not use this optimization due to
624 * possible side effects, SPI requires the host to drain all
625 * messages from the mailbox before exiting the ISR routine.
626 */
627
628 ath6kl_dbg(ATH6KL_DBG_IRQ,
629 "bypassing irq status re-check, forcing done\n");
630
631 *done = true;
632
633 ath6kl_dbg(ATH6KL_DBG_IRQ,
634 "proc_pending_irqs: (done:%d, status=%d\n", *done, status);
635
636 return status;
637}
638
639/* interrupt handler, kicks off all interrupt processing */
640int ath6kldev_intr_bh_handler(struct ath6kl *ar)
641{
642 struct ath6kl_device *dev = ar->htc_target->dev;
643 int status = 0;
644 bool done = false;
645
646 /*
647 * Reset counter used to flag a re-scan of IRQ status registers on
648 * the target.
649 */
650 dev->chk_irq_status_cnt = 0;
651
652 /*
653 * IRQ processing is synchronous, interrupt status registers can be
654 * re-read.
655 */
656 while (!done) {
657 status = proc_pending_irqs(dev, &done);
658 if (status)
659 break;
660 }
661
662 return status;
663}
664
665static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
666{
667 struct ath6kl_irq_enable_reg regs;
668 int status;
669
670 spin_lock_bh(&dev->lock);
671
672 /* Enable all but ATH6KL CPU interrupts */
673 dev->irq_en_reg.int_status_en =
674 SM(INT_STATUS_ENABLE_ERROR, 0x01) |
675 SM(INT_STATUS_ENABLE_CPU, 0x01) |
676 SM(INT_STATUS_ENABLE_COUNTER, 0x01);
677
678 /*
679 * NOTE: There are some cases where HIF can do detection of
680 * pending mbox messages which is disabled now.
681 */
682 dev->irq_en_reg.int_status_en |= SM(INT_STATUS_ENABLE_MBOX_DATA, 0x01);
683
684 /* Set up the CPU Interrupt status Register */
685 dev->irq_en_reg.cpu_int_status_en = 0;
686
687 /* Set up the Error Interrupt status Register */
688 dev->irq_en_reg.err_int_status_en =
689 SM(ERROR_STATUS_ENABLE_RX_UNDERFLOW, 0x01) |
690 SM(ERROR_STATUS_ENABLE_TX_OVERFLOW, 0x1);
691
692 /*
693 * Enable Counter interrupt status register to get fatal errors for
694 * debugging.
695 */
696 dev->irq_en_reg.cntr_int_status_en = SM(COUNTER_INT_STATUS_ENABLE_BIT,
697 ATH6KL_TARGET_DEBUG_INTR_MASK);
698 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
699
700 spin_unlock_bh(&dev->lock);
701
702 status = hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
703 &regs.int_status_en, sizeof(regs),
704 HIF_WR_SYNC_BYTE_INC);
705
706 if (status)
707 ath6kl_err("failed to update interrupt ctl reg err: %d\n",
708 status);
709
710 return status;
711}
712
713int ath6kldev_disable_intrs(struct ath6kl_device *dev)
714{
715 struct ath6kl_irq_enable_reg regs;
716
717 spin_lock_bh(&dev->lock);
718 /* Disable all interrupts */
719 dev->irq_en_reg.int_status_en = 0;
720 dev->irq_en_reg.cpu_int_status_en = 0;
721 dev->irq_en_reg.err_int_status_en = 0;
722 dev->irq_en_reg.cntr_int_status_en = 0;
723 memcpy(&regs, &dev->irq_en_reg, sizeof(regs));
724 spin_unlock_bh(&dev->lock);
725
726 return hif_read_write_sync(dev->ar, INT_STATUS_ENABLE_ADDRESS,
727 &regs.int_status_en, sizeof(regs),
728 HIF_WR_SYNC_BYTE_INC);
729}
730
731/* enable device interrupts */
732int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
733{
734 int status = 0;
735
736 /*
737 * Make sure interrupt are disabled before unmasking at the HIF
738 * layer. The rationale here is that between device insertion
739 * (where we clear the interrupts the first time) and when HTC
740 * is finally ready to handle interrupts, other software can perform
741 * target "soft" resets. The ATH6KL interrupt enables reset back to an
742 * "enabled" state when this happens.
743 */
744 ath6kldev_disable_intrs(dev);
745
746 /* unmask the host controller interrupts */
747 ath6kl_hif_irq_enable(dev->ar);
748 status = ath6kldev_enable_intrs(dev);
749
750 return status;
751}
752
753/* disable all device interrupts */
754int ath6kldev_mask_intrs(struct ath6kl_device *dev)
755{
756 /*
757 * Mask the interrupt at the HIF layer to avoid any stray interrupt
758 * taken while we zero out our shadow registers in
759 * ath6kldev_disable_intrs().
760 */
761 ath6kl_hif_irq_disable(dev->ar);
762
763 return ath6kldev_disable_intrs(dev);
764}
765
766int ath6kldev_setup(struct ath6kl_device *dev)
767{
768 int status = 0;
769 int i;
770 struct htc_packet *packet;
771
772 /* initialize our free list of IO packets */
773 INIT_LIST_HEAD(&dev->reg_io);
774 spin_lock_init(&dev->lock);
775
776 /* carve up register I/O packets (these are for ASYNC register I/O ) */
777 for (i = 0; i < ATH6KL_MAX_REG_IO_BUFFERS; i++) {
778 packet = &dev->reg_io_buf[i].packet;
779 set_htc_rxpkt_info(packet, dev, dev->reg_io_buf[i].buf,
780 ATH6KL_REG_IO_BUFFER_SIZE, 0);
781 ath6kl_add_io_pkt(dev, packet);
782 }
783
784 /*
785 * NOTE: we actually get the block size of a mailbox other than 0,
786 * for SDIO the block size on mailbox 0 is artificially set to 1.
787 * So we use the block size that is set for the other 3 mailboxes.
788 */
789 dev->block_sz = dev->ar->mbox_info.block_size;
790
791 /* must be a power of 2 */
792 if ((dev->block_sz & (dev->block_sz - 1)) != 0) {
793 WARN_ON(1);
794 goto fail_setup;
795 }
796
797 /* assemble mask, used for padding to a block */
798 dev->block_mask = dev->block_sz - 1;
799
800 ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
801 dev->block_sz, dev->ar->mbox_info.htc_addr);
802
803 ath6kl_dbg(ATH6KL_DBG_TRC,
804 "hif interrupt processing is sync only\n");
805
806 status = ath6kldev_disable_intrs(dev);
807
808fail_setup:
809 return status;
810
811}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
new file mode 100644
index 000000000000..d770d4ec612e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_hif.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_HIF_H
18#define HTC_HIF_H
19
20#include "htc.h"
21#include "hif.h"
22
23#define ATH6KL_MAILBOXES 4
24
25/* HTC runs over mailbox 0 */
26#define HTC_MAILBOX 0
27
28#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
29
30#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
31 INT_STATUS_ENABLE_CPU_MASK | \
32 INT_STATUS_ENABLE_COUNTER_MASK)
33
34#define ATH6KL_REG_IO_BUFFER_SIZE 32
35#define ATH6KL_MAX_REG_IO_BUFFERS 8
36#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
37#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
38#define ATH6KL_SCATTER_REQS 4
39
40#ifndef A_CACHE_LINE_PAD
41#define A_CACHE_LINE_PAD 128
42#endif
43#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
44#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
45
46struct ath6kl_irq_proc_registers {
47 u8 host_int_status;
48 u8 cpu_int_status;
49 u8 error_int_status;
50 u8 counter_int_status;
51 u8 mbox_frame;
52 u8 rx_lkahd_valid;
53 u8 host_int_status2;
54 u8 gmbox_rx_avail;
55 __le32 rx_lkahd[2];
56 __le32 rx_gmbox_lkahd_alias[2];
57} __packed;
58
59struct ath6kl_irq_enable_reg {
60 u8 int_status_en;
61 u8 cpu_int_status_en;
62 u8 err_int_status_en;
63 u8 cntr_int_status_en;
64} __packed;
65
66/* buffers for ASYNC I/O */
67struct ath6kl_async_reg_io_buffer {
68 struct htc_packet packet;
69 u8 pad1[A_CACHE_LINE_PAD];
70 /* cache-line safe with pads around */
71 u8 buf[ATH6KL_REG_IO_BUFFER_SIZE];
72 u8 pad2[A_CACHE_LINE_PAD];
73};
74
75struct ath6kl_device {
76 spinlock_t lock;
77 u8 pad1[A_CACHE_LINE_PAD];
78 struct ath6kl_irq_proc_registers irq_proc_reg;
79 u8 pad2[A_CACHE_LINE_PAD];
80 struct ath6kl_irq_enable_reg irq_en_reg;
81 u8 pad3[A_CACHE_LINE_PAD];
82 u32 block_sz;
83 u32 block_mask;
84 struct htc_target *htc_cnxt;
85 struct list_head reg_io;
86 struct ath6kl_async_reg_io_buffer reg_io_buf[ATH6KL_MAX_REG_IO_BUFFERS];
87 int (*msg_pending) (struct htc_target *target, u32 lk_ahds[],
88 int *npkts_fetched);
89 struct hif_dev_scat_sup_info hif_scat_info;
90 bool virt_scat;
91 int max_rx_bndl_sz;
92 int max_tx_bndl_sz;
93 int chk_irq_status_cnt;
94 struct ath6kl *ar;
95};
96
97int ath6kldev_setup(struct ath6kl_device *dev);
98int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
99int ath6kldev_mask_intrs(struct ath6kl_device *dev);
100int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
101 u32 *lk_ahd, int timeout);
102int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
103int ath6kldev_disable_intrs(struct ath6kl_device *dev);
104
105int ath6kldev_rw_comp_handler(void *context, int status);
106int ath6kldev_intr_bh_handler(struct ath6kl *ar);
107
108/* Scatter Function and Definitions */
109int ath6kldev_setup_msg_bndl(struct ath6kl_device *dev, int max_msg_per_xfer);
110int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
111 struct hif_scatter_req *scat_req, bool read);
112
113#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
new file mode 100644
index 000000000000..fe61871e9874
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -0,0 +1,1293 @@
1
2/*
3 * Copyright (c) 2011 Atheros Communications Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18#include <linux/mmc/sdio_func.h>
19#include "core.h"
20#include "cfg80211.h"
21#include "target.h"
22#include "debug.h"
23#include "hif-ops.h"
24
25unsigned int debug_mask;
26
27module_param(debug_mask, uint, 0644);
28
29/*
30 * Include definitions here that can be used to tune the WLAN module
31 * behavior. Different customers can tune the behavior as per their needs,
32 * here.
33 */
34
35/*
36 * This configuration item enable/disable keepalive support.
37 * Keepalive support: In the absence of any data traffic to AP, null
38 * frames will be sent to the AP at periodic interval, to keep the association
39 * active. This configuration item defines the periodic interval.
40 * Use value of zero to disable keepalive support
41 * Default: 60 seconds
42 */
43#define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60
44
45/*
46 * This configuration item sets the value of disconnect timeout
47 * Firmware delays sending the disconnec event to the host for this
48 * timeout after is gets disconnected from the current AP.
49 * If the firmware successly roams within the disconnect timeout
50 * it sends a new connect event
51 */
52#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
53
54#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
55
56enum addr_type {
57 DATASET_PATCH_ADDR,
58 APP_LOAD_ADDR,
59 APP_START_OVERRIDE_ADDR,
60};
61
62#define ATH6KL_DATA_OFFSET 64
63struct sk_buff *ath6kl_buf_alloc(int size)
64{
65 struct sk_buff *skb;
66 u16 reserved;
67
68 /* Add chacheline space at front and back of buffer */
69 reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
70 sizeof(struct htc_packet);
71 skb = dev_alloc_skb(size + reserved);
72
73 if (skb)
74 skb_reserve(skb, reserved - L1_CACHE_BYTES);
75 return skb;
76}
77
78void ath6kl_init_profile_info(struct ath6kl *ar)
79{
80 ar->ssid_len = 0;
81 memset(ar->ssid, 0, sizeof(ar->ssid));
82
83 ar->dot11_auth_mode = OPEN_AUTH;
84 ar->auth_mode = NONE_AUTH;
85 ar->prwise_crypto = NONE_CRYPT;
86 ar->prwise_crypto_len = 0;
87 ar->grp_crypto = NONE_CRYPT;
88 ar->grp_crpto_len = 0;
89 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
90 memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
91 memset(ar->bssid, 0, sizeof(ar->bssid));
92 ar->bss_ch = 0;
93 ar->nw_type = ar->next_mode = INFRA_NETWORK;
94}
95
96static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
97{
98 switch (ar->nw_type) {
99 case INFRA_NETWORK:
100 return HI_OPTION_FW_MODE_BSS_STA;
101 case ADHOC_NETWORK:
102 return HI_OPTION_FW_MODE_IBSS;
103 case AP_NETWORK:
104 return HI_OPTION_FW_MODE_AP;
105 default:
106 ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
107 return 0xff;
108 }
109}
110
111static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
112 u32 item_offset)
113{
114 u32 addr = 0;
115
116 if (ar->target_type == TARGET_TYPE_AR6003)
117 addr = ATH6KL_HI_START_ADDR + item_offset;
118
119 return addr;
120}
121
122static int ath6kl_set_host_app_area(struct ath6kl *ar)
123{
124 u32 address, data;
125 struct host_app_area host_app_area;
126
127 /* Fetch the address of the host_app_area_s
128 * instance in the host interest area */
129 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest));
130 address = TARG_VTOP(address);
131
132 if (ath6kl_read_reg_diag(ar, &address, &data))
133 return -EIO;
134
135 address = TARG_VTOP(data);
136 host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
137 if (ath6kl_access_datadiag(ar, address,
138 (u8 *)&host_app_area,
139 sizeof(struct host_app_area), false))
140 return -EIO;
141
142 return 0;
143}
144
145static inline void set_ac2_ep_map(struct ath6kl *ar,
146 u8 ac,
147 enum htc_endpoint_id ep)
148{
149 ar->ac2ep_map[ac] = ep;
150 ar->ep2ac_map[ep] = ac;
151}
152
153/* connect to a service */
154static int ath6kl_connectservice(struct ath6kl *ar,
155 struct htc_service_connect_req *con_req,
156 char *desc)
157{
158 int status;
159 struct htc_service_connect_resp response;
160
161 memset(&response, 0, sizeof(response));
162
163 status = htc_conn_service(ar->htc_target, con_req, &response);
164 if (status) {
165 ath6kl_err("failed to connect to %s service status:%d\n",
166 desc, status);
167 return status;
168 }
169
170 switch (con_req->svc_id) {
171 case WMI_CONTROL_SVC:
172 if (test_bit(WMI_ENABLED, &ar->flag))
173 ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint);
174 ar->ctrl_ep = response.endpoint;
175 break;
176 case WMI_DATA_BE_SVC:
177 set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint);
178 break;
179 case WMI_DATA_BK_SVC:
180 set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint);
181 break;
182 case WMI_DATA_VI_SVC:
183 set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint);
184 break;
185 case WMI_DATA_VO_SVC:
186 set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint);
187 break;
188 default:
189 ath6kl_err("service id is not mapped %d\n", con_req->svc_id);
190 return -EINVAL;
191 }
192
193 return 0;
194}
195
196static int ath6kl_init_service_ep(struct ath6kl *ar)
197{
198 struct htc_service_connect_req connect;
199
200 memset(&connect, 0, sizeof(connect));
201
202 /* these fields are the same for all service endpoints */
203 connect.ep_cb.rx = ath6kl_rx;
204 connect.ep_cb.rx_refill = ath6kl_rx_refill;
205 connect.ep_cb.tx_full = ath6kl_tx_queue_full;
206
207 /*
208 * Set the max queue depth so that our ath6kl_tx_queue_full handler
209 * gets called.
210 */
211 connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH;
212 connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4;
213 if (!connect.ep_cb.rx_refill_thresh)
214 connect.ep_cb.rx_refill_thresh++;
215
216 /* connect to control service */
217 connect.svc_id = WMI_CONTROL_SVC;
218 if (ath6kl_connectservice(ar, &connect, "WMI CONTROL"))
219 return -EIO;
220
221 connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN;
222
223 /*
224 * Limit the HTC message size on the send path, although e can
225 * receive A-MSDU frames of 4K, we will only send ethernet-sized
226 * (802.3) frames on the send path.
227 */
228 connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH;
229
230 /*
231 * To reduce the amount of committed memory for larger A_MSDU
232 * frames, use the recv-alloc threshold mechanism for larger
233 * packets.
234 */
235 connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE;
236 connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf;
237
238 /*
239 * For the remaining data services set the connection flag to
240 * reduce dribbling, if configured to do so.
241 */
242 connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB;
243 connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK;
244 connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF;
245
246 connect.svc_id = WMI_DATA_BE_SVC;
247
248 if (ath6kl_connectservice(ar, &connect, "WMI DATA BE"))
249 return -EIO;
250
251 /* connect to back-ground map this to WMI LOW_PRI */
252 connect.svc_id = WMI_DATA_BK_SVC;
253 if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
254 return -EIO;
255
256 /* connect to Video service, map this to to HI PRI */
257 connect.svc_id = WMI_DATA_VI_SVC;
258 if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
259 return -EIO;
260
261 /*
262 * Connect to VO service, this is currently not mapped to a WMI
263 * priority stream due to historical reasons. WMI originally
264 * defined 3 priorities over 3 mailboxes We can change this when
265 * WMI is reworked so that priorities are not dependent on
266 * mailboxes.
267 */
268 connect.svc_id = WMI_DATA_VO_SVC;
269 if (ath6kl_connectservice(ar, &connect, "WMI DATA VO"))
270 return -EIO;
271
272 return 0;
273}
274
275static void ath6kl_init_control_info(struct ath6kl *ar)
276{
277 u8 ctr;
278
279 clear_bit(WMI_ENABLED, &ar->flag);
280 ath6kl_init_profile_info(ar);
281 ar->def_txkey_index = 0;
282 memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
283 ar->ch_hint = 0;
284 ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
285 ar->listen_intvl_b = 0;
286 ar->tx_pwr = 0;
287 clear_bit(SKIP_SCAN, &ar->flag);
288 set_bit(WMM_ENABLED, &ar->flag);
289 ar->intra_bss = 1;
290 memset(&ar->sc_params, 0, sizeof(ar->sc_params));
291 ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
292 ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
293
294 memset((u8 *)ar->sta_list, 0,
295 AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
296
297 spin_lock_init(&ar->mcastpsq_lock);
298
299 /* Init the PS queues */
300 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
301 spin_lock_init(&ar->sta_list[ctr].psq_lock);
302 skb_queue_head_init(&ar->sta_list[ctr].psq);
303 }
304
305 skb_queue_head_init(&ar->mcastpsq);
306
307 memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
308}
309
310/*
311 * Set HTC/Mbox operational parameters, this can only be called when the
312 * target is in the BMI phase.
313 */
314static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val,
315 u8 htc_ctrl_buf)
316{
317 int status;
318 u32 blk_size;
319
320 blk_size = ar->mbox_info.block_size;
321
322 if (htc_ctrl_buf)
323 blk_size |= ((u32)htc_ctrl_buf) << 16;
324
325 /* set the host interest area for the block size */
326 status = ath6kl_bmi_write(ar,
327 ath6kl_get_hi_item_addr(ar,
328 HI_ITEM(hi_mbox_io_block_sz)),
329 (u8 *)&blk_size,
330 4);
331 if (status) {
332 ath6kl_err("bmi_write_memory for IO block size failed\n");
333 goto out;
334 }
335
336 ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n",
337 blk_size,
338 ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz)));
339
340 if (mbox_isr_yield_val) {
341 /* set the host interest area for the mbox ISR yield limit */
342 status = ath6kl_bmi_write(ar,
343 ath6kl_get_hi_item_addr(ar,
344 HI_ITEM(hi_mbox_isr_yield_limit)),
345 (u8 *)&mbox_isr_yield_val,
346 4);
347 if (status) {
348 ath6kl_err("bmi_write_memory for yield limit failed\n");
349 goto out;
350 }
351 }
352
353out:
354 return status;
355}
356
357#define REG_DUMP_COUNT_AR6003 60
358#define REGISTER_DUMP_LEN_MAX 60
359
360static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
361{
362 u32 address;
363 u32 regdump_loc = 0;
364 int status;
365 u32 regdump_val[REGISTER_DUMP_LEN_MAX];
366 u32 i;
367
368 if (ar->target_type != TARGET_TYPE_AR6003)
369 return;
370
371 /* the reg dump pointer is copied to the host interest area */
372 address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
373 address = TARG_VTOP(address);
374
375 /* read RAM location through diagnostic window */
376 status = ath6kl_read_reg_diag(ar, &address, &regdump_loc);
377
378 if (status || !regdump_loc) {
379 ath6kl_err("failed to get ptr to register dump area\n");
380 return;
381 }
382
383 ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
384 regdump_loc);
385
386 regdump_loc = TARG_VTOP(regdump_loc);
387
388 /* fetch register dump data */
389 status = ath6kl_access_datadiag(ar,
390 regdump_loc,
391 (u8 *)&regdump_val[0],
392 REG_DUMP_COUNT_AR6003 * (sizeof(u32)),
393 true);
394
395 if (status) {
396 ath6kl_err("failed to get register dump\n");
397 return;
398 }
399 ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
400
401 for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
402 ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
403 i, regdump_val[i]);
404
405}
406
407void ath6kl_target_failure(struct ath6kl *ar)
408{
409 ath6kl_err("target asserted\n");
410
411 /* try dumping target assertion information (if any) */
412 ath6kl_dump_target_assert_info(ar);
413
414}
415
416static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
417{
418 int status = 0;
419
420 /*
421 * Configure the device for rx dot11 header rules. "0,0" are the
422 * default values. Required if checksum offload is needed. Set
423 * RxMetaVersion to 2.
424 */
425 if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
426 ar->rx_meta_ver, 0, 0)) {
427 ath6kl_err("unable to set the rx frame format\n");
428 status = -EIO;
429 }
430
431 if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
432 if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
433 IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
434 ath6kl_err("unable to set power save fail event policy\n");
435 status = -EIO;
436 }
437
438 if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
439 if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
440 WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
441 ath6kl_err("unable to set barker preamble policy\n");
442 status = -EIO;
443 }
444
445 if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
446 WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
447 ath6kl_err("unable to set keep alive interval\n");
448 status = -EIO;
449 }
450
451 if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
452 WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
453 ath6kl_err("unable to set disconnect timeout\n");
454 status = -EIO;
455 }
456
457 if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
458 if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
459 ath6kl_err("unable to set txop bursting\n");
460 status = -EIO;
461 }
462
463 return status;
464}
465
466int ath6kl_configure_target(struct ath6kl *ar)
467{
468 u32 param, ram_reserved_size;
469 u8 fw_iftype;
470
471 fw_iftype = ath6kl_get_fw_iftype(ar);
472 if (fw_iftype == 0xff)
473 return -EINVAL;
474
475 /* Tell target which HTC version it is used*/
476 param = HTC_PROTOCOL_VERSION;
477 if (ath6kl_bmi_write(ar,
478 ath6kl_get_hi_item_addr(ar,
479 HI_ITEM(hi_app_host_interest)),
480 (u8 *)&param, 4) != 0) {
481 ath6kl_err("bmi_write_memory for htc version failed\n");
482 return -EIO;
483 }
484
485 /* set the firmware mode to STA/IBSS/AP */
486 param = 0;
487
488 if (ath6kl_bmi_read(ar,
489 ath6kl_get_hi_item_addr(ar,
490 HI_ITEM(hi_option_flag)),
491 (u8 *)&param, 4) != 0) {
492 ath6kl_err("bmi_read_memory for setting fwmode failed\n");
493 return -EIO;
494 }
495
496 param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
497 param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
498 param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
499 param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
500
501 if (ath6kl_bmi_write(ar,
502 ath6kl_get_hi_item_addr(ar,
503 HI_ITEM(hi_option_flag)),
504 (u8 *)&param,
505 4) != 0) {
506 ath6kl_err("bmi_write_memory for setting fwmode failed\n");
507 return -EIO;
508 }
509
510 ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n");
511
512 /*
513 * Hardcode the address use for the extended board data
514 * Ideally this should be pre-allocate by the OS at boot time
515 * But since it is a new feature and board data is loaded
516 * at init time, we have to workaround this from host.
517 * It is difficult to patch the firmware boot code,
518 * but possible in theory.
519 */
520
521 if (ar->target_type == TARGET_TYPE_AR6003) {
522 if (ar->version.target_ver == AR6003_REV2_VERSION) {
523 param = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
524 ram_reserved_size = AR6003_REV2_RAM_RESERVE_SIZE;
525 } else {
526 param = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
527 ram_reserved_size = AR6003_REV3_RAM_RESERVE_SIZE;
528 }
529
530 if (ath6kl_bmi_write(ar,
531 ath6kl_get_hi_item_addr(ar,
532 HI_ITEM(hi_board_ext_data)),
533 (u8 *)&param, 4) != 0) {
534 ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
535 return -EIO;
536 }
537 if (ath6kl_bmi_write(ar,
538 ath6kl_get_hi_item_addr(ar,
539 HI_ITEM(hi_end_ram_reserve_sz)),
540 (u8 *)&ram_reserved_size, 4) != 0) {
541 ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
542 return -EIO;
543 }
544 }
545
546 /* set the block size for the target */
547 if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0))
548 /* use default number of control buffers */
549 return -EIO;
550
551 return 0;
552}
553
554struct ath6kl *ath6kl_core_alloc(struct device *sdev)
555{
556 struct net_device *dev;
557 struct ath6kl *ar;
558 struct wireless_dev *wdev;
559
560 wdev = ath6kl_cfg80211_init(sdev);
561 if (!wdev) {
562 ath6kl_err("ath6kl_cfg80211_init failed\n");
563 return NULL;
564 }
565
566 ar = wdev_priv(wdev);
567 ar->dev = sdev;
568 ar->wdev = wdev;
569 wdev->iftype = NL80211_IFTYPE_STATION;
570
571 dev = alloc_netdev(0, "wlan%d", ether_setup);
572 if (!dev) {
573 ath6kl_err("no memory for network device instance\n");
574 ath6kl_cfg80211_deinit(ar);
575 return NULL;
576 }
577
578 dev->ieee80211_ptr = wdev;
579 SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
580 wdev->netdev = dev;
581 ar->sme_state = SME_DISCONNECTED;
582 ar->auto_auth_stage = AUTH_IDLE;
583
584 init_netdev(dev);
585
586 ar->net_dev = dev;
587 ar->wlan_state = WLAN_ENABLED;
588
589 ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
590
591 spin_lock_init(&ar->lock);
592
593 ath6kl_init_control_info(ar);
594 init_waitqueue_head(&ar->event_wq);
595 sema_init(&ar->sem, 1);
596 clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
597
598 INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
599
600 setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
601 (unsigned long) dev);
602
603 return ar;
604}
605
606int ath6kl_unavail_ev(struct ath6kl *ar)
607{
608 ath6kl_destroy(ar->net_dev, 1);
609
610 return 0;
611}
612
613/* firmware upload */
614static u32 ath6kl_get_load_address(u32 target_ver, enum addr_type type)
615{
616 WARN_ON(target_ver != AR6003_REV2_VERSION &&
617 target_ver != AR6003_REV3_VERSION);
618
619 switch (type) {
620 case DATASET_PATCH_ADDR:
621 return (target_ver == AR6003_REV2_VERSION) ?
622 AR6003_REV2_DATASET_PATCH_ADDRESS :
623 AR6003_REV3_DATASET_PATCH_ADDRESS;
624 case APP_LOAD_ADDR:
625 return (target_ver == AR6003_REV2_VERSION) ?
626 AR6003_REV2_APP_LOAD_ADDRESS :
627 0x1234;
628 case APP_START_OVERRIDE_ADDR:
629 return (target_ver == AR6003_REV2_VERSION) ?
630 AR6003_REV2_APP_START_OVERRIDE :
631 AR6003_REV3_APP_START_OVERRIDE;
632 default:
633 return 0;
634 }
635}
636
637static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
638 u8 **fw, size_t *fw_len)
639{
640 const struct firmware *fw_entry;
641 int ret;
642
643 ret = request_firmware(&fw_entry, filename, ar->dev);
644 if (ret)
645 return ret;
646
647 *fw_len = fw_entry->size;
648 *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
649
650 if (*fw == NULL)
651 ret = -ENOMEM;
652
653 release_firmware(fw_entry);
654
655 return ret;
656}
657
658static int ath6kl_fetch_board_file(struct ath6kl *ar)
659{
660 const char *filename;
661 int ret;
662
663 switch (ar->version.target_ver) {
664 case AR6003_REV2_VERSION:
665 filename = AR6003_REV2_BOARD_DATA_FILE;
666 break;
667 default:
668 filename = AR6003_REV3_BOARD_DATA_FILE;
669 break;
670 }
671
672 ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
673 &ar->fw_board_len);
674 if (ret == 0) {
675 /* managed to get proper board file */
676 return 0;
677 }
678
679 /* there was no proper board file, try to use default instead */
680 ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
681 filename, ret);
682
683 switch (ar->version.target_ver) {
684 case AR6003_REV2_VERSION:
685 filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
686 break;
687 default:
688 filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
689 break;
690 }
691
692 ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
693 &ar->fw_board_len);
694 if (ret) {
695 ath6kl_err("Failed to get default board file %s: %d\n",
696 filename, ret);
697 return ret;
698 }
699
700 ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n");
701 ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n");
702
703 return 0;
704}
705
706
707static int ath6kl_upload_board_file(struct ath6kl *ar)
708{
709 u32 board_address, board_ext_address, param;
710 int ret;
711
712 if (ar->fw_board == NULL) {
713 ret = ath6kl_fetch_board_file(ar);
714 if (ret)
715 return ret;
716 }
717
718 /* Determine where in Target RAM to write Board Data */
719 ath6kl_bmi_read(ar,
720 ath6kl_get_hi_item_addr(ar,
721 HI_ITEM(hi_board_data)),
722 (u8 *) &board_address, 4);
723 ath6kl_dbg(ATH6KL_DBG_TRC, "board data download addr: 0x%x\n",
724 board_address);
725
726 /* determine where in target ram to write extended board data */
727 ath6kl_bmi_read(ar,
728 ath6kl_get_hi_item_addr(ar,
729 HI_ITEM(hi_board_ext_data)),
730 (u8 *) &board_ext_address, 4);
731
732 ath6kl_dbg(ATH6KL_DBG_TRC, "board file download addr: 0x%x\n",
733 board_ext_address);
734
735 if (board_ext_address == 0) {
736 ath6kl_err("Failed to get board file target address.\n");
737 return -EINVAL;
738 }
739
740 if (ar->fw_board_len == (AR6003_BOARD_DATA_SZ +
741 AR6003_BOARD_EXT_DATA_SZ)) {
742 /* write extended board data */
743 ret = ath6kl_bmi_write(ar, board_ext_address,
744 ar->fw_board + AR6003_BOARD_DATA_SZ,
745 AR6003_BOARD_EXT_DATA_SZ);
746
747 if (ret) {
748 ath6kl_err("Failed to write extended board data: %d\n",
749 ret);
750 return ret;
751 }
752
753 /* record that extended board data is initialized */
754 param = (AR6003_BOARD_EXT_DATA_SZ << 16) | 1;
755 ath6kl_bmi_write(ar,
756 ath6kl_get_hi_item_addr(ar,
757 HI_ITEM(hi_board_ext_data_config)),
758 (unsigned char *) &param, 4);
759 }
760
761 if (ar->fw_board_len < AR6003_BOARD_DATA_SZ) {
762 ath6kl_err("Too small board file: %zu\n", ar->fw_board_len);
763 ret = -EINVAL;
764 return ret;
765 }
766
767 ret = ath6kl_bmi_write(ar, board_address, ar->fw_board,
768 AR6003_BOARD_DATA_SZ);
769
770 if (ret) {
771 ath6kl_err("Board file bmi write failed: %d\n", ret);
772 return ret;
773 }
774
775 /* record the fact that Board Data IS initialized */
776 param = 1;
777 ath6kl_bmi_write(ar,
778 ath6kl_get_hi_item_addr(ar,
779 HI_ITEM(hi_board_data_initialized)),
780 (u8 *)&param, 4);
781
782 return ret;
783}
784
785static int ath6kl_upload_otp(struct ath6kl *ar)
786{
787 const char *filename;
788 u32 address, param;
789 int ret;
790
791 switch (ar->version.target_ver) {
792 case AR6003_REV2_VERSION:
793 filename = AR6003_REV2_OTP_FILE;
794 break;
795 default:
796 filename = AR6003_REV3_OTP_FILE;
797 break;
798 }
799
800 if (ar->fw_otp == NULL) {
801 ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
802 &ar->fw_otp_len);
803 if (ret) {
804 ath6kl_err("Failed to get OTP file %s: %d\n",
805 filename, ret);
806 return ret;
807 }
808 }
809
810 address = ath6kl_get_load_address(ar->version.target_ver,
811 APP_LOAD_ADDR);
812
813 ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp,
814 ar->fw_otp_len);
815 if (ret) {
816 ath6kl_err("Failed to upload OTP file: %d\n", ret);
817 return ret;
818 }
819
820 /* execute the OTP code */
821 param = 0;
822 address = ath6kl_get_load_address(ar->version.target_ver,
823 APP_START_OVERRIDE_ADDR);
824 ath6kl_bmi_execute(ar, address, &param);
825
826 return ret;
827}
828
829static int ath6kl_upload_firmware(struct ath6kl *ar)
830{
831 const char *filename;
832 u32 address;
833 int ret;
834
835 switch (ar->version.target_ver) {
836 case AR6003_REV2_VERSION:
837 filename = AR6003_REV2_FIRMWARE_FILE;
838 break;
839 default:
840 filename = AR6003_REV3_FIRMWARE_FILE;
841 break;
842 }
843
844 if (ar->fw == NULL) {
845 ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
846 if (ret) {
847 ath6kl_err("Failed to get firmware file %s: %d\n",
848 filename, ret);
849 return ret;
850 }
851 }
852
853 address = ath6kl_get_load_address(ar->version.target_ver,
854 APP_LOAD_ADDR);
855
856 ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len);
857
858 if (ret) {
859 ath6kl_err("Failed to write firmware: %d\n", ret);
860 return ret;
861 }
862
863 /* Set starting address for firmware */
864 address = ath6kl_get_load_address(ar->version.target_ver,
865 APP_START_OVERRIDE_ADDR);
866 ath6kl_bmi_set_app_start(ar, address);
867
868 return ret;
869}
870
871static int ath6kl_upload_patch(struct ath6kl *ar)
872{
873 const char *filename;
874 u32 address, param;
875 int ret;
876
877 switch (ar->version.target_ver) {
878 case AR6003_REV2_VERSION:
879 filename = AR6003_REV2_PATCH_FILE;
880 break;
881 default:
882 filename = AR6003_REV3_PATCH_FILE;
883 break;
884 }
885
886 if (ar->fw_patch == NULL) {
887 ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
888 &ar->fw_patch_len);
889 if (ret) {
890 ath6kl_err("Failed to get patch file %s: %d\n",
891 filename, ret);
892 return ret;
893 }
894 }
895
896 address = ath6kl_get_load_address(ar->version.target_ver,
897 DATASET_PATCH_ADDR);
898
899 ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len);
900 if (ret) {
901 ath6kl_err("Failed to write patch file: %d\n", ret);
902 return ret;
903 }
904
905 param = address;
906 ath6kl_bmi_write(ar,
907 ath6kl_get_hi_item_addr(ar,
908 HI_ITEM(hi_dset_list_head)),
909 (unsigned char *) &param, 4);
910
911 return 0;
912}
913
914static int ath6kl_init_upload(struct ath6kl *ar)
915{
916 u32 param, options, sleep, address;
917 int status = 0;
918
919 if (ar->target_type != TARGET_TYPE_AR6003)
920 return -EINVAL;
921
922 /* temporarily disable system sleep */
923 address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
924 status = ath6kl_bmi_reg_read(ar, address, &param);
925 if (status)
926 return status;
927
928 options = param;
929
930 param |= ATH6KL_OPTION_SLEEP_DISABLE;
931 status = ath6kl_bmi_reg_write(ar, address, param);
932 if (status)
933 return status;
934
935 address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
936 status = ath6kl_bmi_reg_read(ar, address, &param);
937 if (status)
938 return status;
939
940 sleep = param;
941
942 param |= SM(SYSTEM_SLEEP_DISABLE, 1);
943 status = ath6kl_bmi_reg_write(ar, address, param);
944 if (status)
945 return status;
946
947 ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n",
948 options, sleep);
949
950 /* program analog PLL register */
951 status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER,
952 0xF9104001);
953 if (status)
954 return status;
955
956 /* Run at 80/88MHz by default */
957 param = SM(CPU_CLOCK_STANDARD, 1);
958
959 address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS;
960 status = ath6kl_bmi_reg_write(ar, address, param);
961 if (status)
962 return status;
963
964 param = 0;
965 address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS;
966 param = SM(LPO_CAL_ENABLE, 1);
967 status = ath6kl_bmi_reg_write(ar, address, param);
968 if (status)
969 return status;
970
971 /* WAR to avoid SDIO CRC err */
972 if (ar->version.target_ver == AR6003_REV2_VERSION) {
973 ath6kl_err("temporary war to avoid sdio crc error\n");
974
975 param = 0x20;
976
977 address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
978 status = ath6kl_bmi_reg_write(ar, address, param);
979 if (status)
980 return status;
981
982 address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS;
983 status = ath6kl_bmi_reg_write(ar, address, param);
984 if (status)
985 return status;
986
987 address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS;
988 status = ath6kl_bmi_reg_write(ar, address, param);
989 if (status)
990 return status;
991
992 address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS;
993 status = ath6kl_bmi_reg_write(ar, address, param);
994 if (status)
995 return status;
996 }
997
998 /* write EEPROM data to Target RAM */
999 status = ath6kl_upload_board_file(ar);
1000 if (status)
1001 return status;
1002
1003 /* transfer One time Programmable data */
1004 status = ath6kl_upload_otp(ar);
1005 if (status)
1006 return status;
1007
1008 /* Download Target firmware */
1009 status = ath6kl_upload_firmware(ar);
1010 if (status)
1011 return status;
1012
1013 status = ath6kl_upload_patch(ar);
1014 if (status)
1015 return status;
1016
1017 /* Restore system sleep */
1018 address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS;
1019 status = ath6kl_bmi_reg_write(ar, address, sleep);
1020 if (status)
1021 return status;
1022
1023 address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS;
1024 param = options | 0x20;
1025 status = ath6kl_bmi_reg_write(ar, address, param);
1026 if (status)
1027 return status;
1028
1029 /* Configure GPIO AR6003 UART */
1030 param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
1031 status = ath6kl_bmi_write(ar,
1032 ath6kl_get_hi_item_addr(ar,
1033 HI_ITEM(hi_dbg_uart_txpin)),
1034 (u8 *)&param, 4);
1035
1036 return status;
1037}
1038
1039static int ath6kl_init(struct net_device *dev)
1040{
1041 struct ath6kl *ar = ath6kl_priv(dev);
1042 int status = 0;
1043 s32 timeleft;
1044
1045 if (!ar)
1046 return -EIO;
1047
1048 /* Do we need to finish the BMI phase */
1049 if (ath6kl_bmi_done(ar)) {
1050 status = -EIO;
1051 goto ath6kl_init_done;
1052 }
1053
1054 /* Indicate that WMI is enabled (although not ready yet) */
1055 set_bit(WMI_ENABLED, &ar->flag);
1056 ar->wmi = ath6kl_wmi_init((void *) ar);
1057 if (!ar->wmi) {
1058 ath6kl_err("failed to initialize wmi\n");
1059 status = -EIO;
1060 goto ath6kl_init_done;
1061 }
1062
1063 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
1064
1065 /*
1066 * The reason we have to wait for the target here is that the
1067 * driver layer has to init BMI in order to set the host block
1068 * size.
1069 */
1070 if (htc_wait_target(ar->htc_target)) {
1071 status = -EIO;
1072 goto err_wmi_cleanup;
1073 }
1074
1075 if (ath6kl_init_service_ep(ar)) {
1076 status = -EIO;
1077 goto err_cleanup_scatter;
1078 }
1079
1080 /* setup access class priority mappings */
1081 ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
1082 ar->ac_stream_pri_map[WMM_AC_BE] = 1;
1083 ar->ac_stream_pri_map[WMM_AC_VI] = 2;
1084 ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
1085
1086 /* give our connected endpoints some buffers */
1087 ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
1088 ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
1089
1090 /* allocate some buffers that handle larger AMSDU frames */
1091 ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
1092
1093 /* setup credit distribution */
1094 ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
1095
1096 ath6kl_cookie_init(ar);
1097
1098 /* start HTC */
1099 status = htc_start(ar->htc_target);
1100
1101 if (status) {
1102 ath6kl_cookie_cleanup(ar);
1103 goto err_rxbuf_cleanup;
1104 }
1105
1106 /* Wait for Wmi event to be ready */
1107 timeleft = wait_event_interruptible_timeout(ar->event_wq,
1108 test_bit(WMI_READY,
1109 &ar->flag),
1110 WMI_TIMEOUT);
1111
1112 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
1113 ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
1114 ATH6KL_ABI_VERSION, ar->version.abi_ver);
1115 status = -EIO;
1116 goto err_htc_stop;
1117 }
1118
1119 if (!timeleft || signal_pending(current)) {
1120 ath6kl_err("wmi is not ready or wait was interrupted\n");
1121 status = -EIO;
1122 goto err_htc_stop;
1123 }
1124
1125 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
1126
1127 /* communicate the wmi protocol verision to the target */
1128 if ((ath6kl_set_host_app_area(ar)) != 0)
1129 ath6kl_err("unable to set the host app area\n");
1130
1131 ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
1132 ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
1133
1134 status = ath6kl_target_config_wlan_params(ar);
1135 if (!status)
1136 goto ath6kl_init_done;
1137
1138err_htc_stop:
1139 htc_stop(ar->htc_target);
1140err_rxbuf_cleanup:
1141 htc_flush_rx_buf(ar->htc_target);
1142 ath6kl_cleanup_amsdu_rxbufs(ar);
1143err_cleanup_scatter:
1144 ath6kl_hif_cleanup_scatter(ar);
1145err_wmi_cleanup:
1146 ath6kl_wmi_shutdown(ar->wmi);
1147 clear_bit(WMI_ENABLED, &ar->flag);
1148 ar->wmi = NULL;
1149
1150ath6kl_init_done:
1151 return status;
1152}
1153
1154int ath6kl_core_init(struct ath6kl *ar)
1155{
1156 int ret = 0;
1157 struct ath6kl_bmi_target_info targ_info;
1158
1159 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
1160 if (!ar->ath6kl_wq)
1161 return -ENOMEM;
1162
1163 ret = ath6kl_bmi_init(ar);
1164 if (ret)
1165 goto err_wq;
1166
1167 ret = ath6kl_bmi_get_target_info(ar, &targ_info);
1168 if (ret)
1169 goto err_bmi_cleanup;
1170
1171 ar->version.target_ver = le32_to_cpu(targ_info.version);
1172 ar->target_type = le32_to_cpu(targ_info.type);
1173 ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
1174
1175 ret = ath6kl_configure_target(ar);
1176 if (ret)
1177 goto err_bmi_cleanup;
1178
1179 ar->htc_target = htc_create(ar);
1180
1181 if (!ar->htc_target) {
1182 ret = -ENOMEM;
1183 goto err_bmi_cleanup;
1184 }
1185
1186 ar->aggr_cntxt = aggr_init(ar->net_dev);
1187 if (!ar->aggr_cntxt) {
1188 ath6kl_err("failed to initialize aggr\n");
1189 ret = -ENOMEM;
1190 goto err_htc_cleanup;
1191 }
1192
1193 ret = ath6kl_init_upload(ar);
1194 if (ret)
1195 goto err_htc_cleanup;
1196
1197 ret = ath6kl_init(ar->net_dev);
1198 if (ret)
1199 goto err_htc_cleanup;
1200
1201 /* This runs the init function if registered */
1202 ret = register_netdev(ar->net_dev);
1203 if (ret) {
1204 ath6kl_err("register_netdev failed\n");
1205 ath6kl_destroy(ar->net_dev, 0);
1206 return ret;
1207 }
1208
1209 set_bit(NETDEV_REGISTERED, &ar->flag);
1210
1211 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
1212 __func__, ar->net_dev->name, ar->net_dev, ar);
1213
1214 return ret;
1215
1216err_htc_cleanup:
1217 htc_cleanup(ar->htc_target);
1218err_bmi_cleanup:
1219 ath6kl_bmi_cleanup(ar);
1220err_wq:
1221 destroy_workqueue(ar->ath6kl_wq);
1222 return ret;
1223}
1224
1225void ath6kl_stop_txrx(struct ath6kl *ar)
1226{
1227 struct net_device *ndev = ar->net_dev;
1228
1229 if (!ndev)
1230 return;
1231
1232 set_bit(DESTROY_IN_PROGRESS, &ar->flag);
1233
1234 if (down_interruptible(&ar->sem)) {
1235 ath6kl_err("down_interruptible failed\n");
1236 return;
1237 }
1238
1239 if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
1240 ath6kl_stop_endpoint(ndev, false, true);
1241
1242 ar->wlan_state = WLAN_DISABLED;
1243}
1244
1245/*
1246 * We need to differentiate between the surprise and planned removal of the
1247 * device because of the following consideration:
1248 *
1249 * - In case of surprise removal, the hcd already frees up the pending
1250 * for the device and hence there is no need to unregister the function
1251 * driver inorder to get these requests. For planned removal, the function
1252 * driver has to explicitly unregister itself to have the hcd return all the
1253 * pending requests before the data structures for the devices are freed up.
1254 * Note that as per the current implementation, the function driver will
1255 * end up releasing all the devices since there is no API to selectively
1256 * release a particular device.
1257 *
1258 * - Certain commands issued to the target can be skipped for surprise
1259 * removal since they will anyway not go through.
1260 */
1261void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
1262{
1263 struct ath6kl *ar;
1264
1265 if (!dev || !ath6kl_priv(dev)) {
1266 ath6kl_err("failed to get device structure\n");
1267 return;
1268 }
1269
1270 ar = ath6kl_priv(dev);
1271
1272 destroy_workqueue(ar->ath6kl_wq);
1273
1274 if (ar->htc_target)
1275 htc_cleanup(ar->htc_target);
1276
1277 aggr_module_destroy(ar->aggr_cntxt);
1278
1279 ath6kl_cookie_cleanup(ar);
1280
1281 ath6kl_cleanup_amsdu_rxbufs(ar);
1282
1283 ath6kl_bmi_cleanup(ar);
1284
1285 if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
1286 unregister_netdev(dev);
1287 clear_bit(NETDEV_REGISTERED, &ar->flag);
1288 }
1289
1290 free_netdev(dev);
1291
1292 ath6kl_cfg80211_deinit(ar);
1293}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
new file mode 100644
index 000000000000..f325a23dfff0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -0,0 +1,1337 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hif-ops.h"
19#include "cfg80211.h"
20#include "target.h"
21#include "debug.h"
22
23struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
24{
25 struct ath6kl_sta *conn = NULL;
26 u8 i, max_conn;
27
28 max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
29
30 for (i = 0; i < max_conn; i++) {
31 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
32 conn = &ar->sta_list[i];
33 break;
34 }
35 }
36
37 return conn;
38}
39
40struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid)
41{
42 struct ath6kl_sta *conn = NULL;
43 u8 ctr;
44
45 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
46 if (ar->sta_list[ctr].aid == aid) {
47 conn = &ar->sta_list[ctr];
48 break;
49 }
50 }
51 return conn;
52}
53
54static void ath6kl_add_new_sta(struct ath6kl *ar, u8 *mac, u16 aid, u8 *wpaie,
55 u8 ielen, u8 keymgmt, u8 ucipher, u8 auth)
56{
57 struct ath6kl_sta *sta;
58 u8 free_slot;
59
60 free_slot = aid - 1;
61
62 sta = &ar->sta_list[free_slot];
63 memcpy(sta->mac, mac, ETH_ALEN);
64 memcpy(sta->wpa_ie, wpaie, ielen);
65 sta->aid = aid;
66 sta->keymgmt = keymgmt;
67 sta->ucipher = ucipher;
68 sta->auth = auth;
69
70 ar->sta_list_index = ar->sta_list_index | (1 << free_slot);
71 ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid);
72}
73
74static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i)
75{
76 struct ath6kl_sta *sta = &ar->sta_list[i];
77
78 /* empty the queued pkts in the PS queue if any */
79 spin_lock_bh(&sta->psq_lock);
80 skb_queue_purge(&sta->psq);
81 spin_unlock_bh(&sta->psq_lock);
82
83 memset(&ar->ap_stats.sta[sta->aid - 1], 0,
84 sizeof(struct wmi_per_sta_stat));
85 memset(sta->mac, 0, ETH_ALEN);
86 memset(sta->wpa_ie, 0, ATH6KL_MAX_IE);
87 sta->aid = 0;
88 sta->sta_flags = 0;
89
90 ar->sta_list_index = ar->sta_list_index & ~(1 << i);
91
92}
93
94static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason)
95{
96 u8 i, removed = 0;
97
98 if (is_zero_ether_addr(mac))
99 return removed;
100
101 if (is_broadcast_ether_addr(mac)) {
102 ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n");
103
104 for (i = 0; i < AP_MAX_NUM_STA; i++) {
105 if (!is_zero_ether_addr(ar->sta_list[i].mac)) {
106 ath6kl_sta_cleanup(ar, i);
107 removed = 1;
108 }
109 }
110 } else {
111 for (i = 0; i < AP_MAX_NUM_STA; i++) {
112 if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) {
113 ath6kl_dbg(ATH6KL_DBG_TRC,
114 "deleting station %pM aid=%d reason=%d\n",
115 mac, ar->sta_list[i].aid, reason);
116 ath6kl_sta_cleanup(ar, i);
117 removed = 1;
118 break;
119 }
120 }
121 }
122
123 return removed;
124}
125
126enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac)
127{
128 struct ath6kl *ar = devt;
129 return ar->ac2ep_map[ac];
130}
131
132struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar)
133{
134 struct ath6kl_cookie *cookie;
135
136 cookie = ar->cookie_list;
137 if (cookie != NULL) {
138 ar->cookie_list = cookie->arc_list_next;
139 ar->cookie_count--;
140 }
141
142 return cookie;
143}
144
145void ath6kl_cookie_init(struct ath6kl *ar)
146{
147 u32 i;
148
149 ar->cookie_list = NULL;
150 ar->cookie_count = 0;
151
152 memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem));
153
154 for (i = 0; i < MAX_COOKIE_NUM; i++)
155 ath6kl_free_cookie(ar, &ar->cookie_mem[i]);
156}
157
158void ath6kl_cookie_cleanup(struct ath6kl *ar)
159{
160 ar->cookie_list = NULL;
161 ar->cookie_count = 0;
162}
163
164void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
165{
166 /* Insert first */
167
168 if (!ar || !cookie)
169 return;
170
171 cookie->arc_list_next = ar->cookie_list;
172 ar->cookie_list = cookie;
173 ar->cookie_count++;
174}
175
176/* set the window address register (using 4-byte register access ). */
177static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
178{
179 int status;
180 u8 addr_val[4];
181 s32 i;
182
183 /*
184 * Write bytes 1,2,3 of the register to set the upper address bytes,
185 * the LSB is written last to initiate the access cycle
186 */
187
188 for (i = 1; i <= 3; i++) {
189 /*
190 * Fill the buffer with the address byte value we want to
191 * hit 4 times.
192 */
193 memset(addr_val, ((u8 *)&addr)[i], 4);
194
195 /*
196 * Hit each byte of the register address with a 4-byte
197 * write operation to the same address, this is a harmless
198 * operation.
199 */
200 status = hif_read_write_sync(ar, reg_addr + i, addr_val,
201 4, HIF_WR_SYNC_BYTE_FIX);
202 if (status)
203 break;
204 }
205
206 if (status) {
207 ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
208 addr, reg_addr);
209 return status;
210 }
211
212 /*
213 * Write the address register again, this time write the whole
214 * 4-byte value. The effect here is that the LSB write causes the
215 * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
216 * effect since we are writing the same values again
217 */
218 status = hif_read_write_sync(ar, reg_addr, (u8 *)(&addr),
219 4, HIF_WR_SYNC_BYTE_INC);
220
221 if (status) {
222 ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
223 addr, reg_addr);
224 return status;
225 }
226
227 return 0;
228}
229
230/*
231 * Read from the ATH6KL through its diagnostic window. No cooperation from
232 * the Target is required for this.
233 */
234int ath6kl_read_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
235{
236 int status;
237
238 /* set window register to start read cycle */
239 status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
240 *address);
241
242 if (status)
243 return status;
244
245 /* read the data */
246 status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
247 sizeof(u32), HIF_RD_SYNC_BYTE_INC);
248 if (status) {
249 ath6kl_err("failed to read from window data addr\n");
250 return status;
251 }
252
253 return status;
254}
255
256
257/*
258 * Write to the ATH6KL through its diagnostic window. No cooperation from
259 * the Target is required for this.
260 */
261static int ath6kl_write_reg_diag(struct ath6kl *ar, u32 *address, u32 *data)
262{
263 int status;
264
265 /* set write data */
266 status = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *)data,
267 sizeof(u32), HIF_WR_SYNC_BYTE_INC);
268 if (status) {
269 ath6kl_err("failed to write 0x%x to window data addr\n", *data);
270 return status;
271 }
272
273 /* set window register, which starts the write cycle */
274 return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
275 *address);
276}
277
278int ath6kl_access_datadiag(struct ath6kl *ar, u32 address,
279 u8 *data, u32 length, bool read)
280{
281 u32 count;
282 int status = 0;
283
284 for (count = 0; count < length; count += 4, address += 4) {
285 if (read) {
286 status = ath6kl_read_reg_diag(ar, &address,
287 (u32 *) &data[count]);
288 if (status)
289 break;
290 } else {
291 status = ath6kl_write_reg_diag(ar, &address,
292 (u32 *) &data[count]);
293 if (status)
294 break;
295 }
296 }
297
298 return status;
299}
300
301static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
302 bool wait_fot_compltn, bool cold_reset)
303{
304 int status = 0;
305 u32 address;
306 u32 data;
307
308 if (target_type != TARGET_TYPE_AR6003)
309 return;
310
311 data = cold_reset ? RESET_CONTROL_COLD_RST : RESET_CONTROL_MBOX_RST;
312
313 address = RTC_BASE_ADDRESS;
314 status = ath6kl_write_reg_diag(ar, &address, &data);
315
316 if (status)
317 ath6kl_err("failed to reset target\n");
318}
319
320void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
321 bool get_dbglogs)
322{
323 struct ath6kl *ar = ath6kl_priv(dev);
324 static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
325 bool discon_issued;
326
327 netif_stop_queue(dev);
328
329 /* disable the target and the interrupts associated with it */
330 if (test_bit(WMI_READY, &ar->flag)) {
331 discon_issued = (test_bit(CONNECTED, &ar->flag) ||
332 test_bit(CONNECT_PEND, &ar->flag));
333 ath6kl_disconnect(ar);
334 if (!keep_profile)
335 ath6kl_init_profile_info(ar);
336
337 del_timer(&ar->disconnect_timer);
338
339 clear_bit(WMI_READY, &ar->flag);
340 ath6kl_wmi_shutdown(ar->wmi);
341 clear_bit(WMI_ENABLED, &ar->flag);
342 ar->wmi = NULL;
343
344 /*
345 * After wmi_shudown all WMI events will be dropped. We
346 * need to cleanup the buffers allocated in AP mode and
347 * give disconnect notification to stack, which usually
348 * happens in the disconnect_event. Simulate the disconnect
349 * event by calling the function directly. Sometimes
350 * disconnect_event will be received when the debug logs
351 * are collected.
352 */
353 if (discon_issued)
354 ath6kl_disconnect_event(ar, DISCONNECT_CMD,
355 (ar->nw_type & AP_NETWORK) ?
356 bcast_mac : ar->bssid,
357 0, NULL, 0);
358
359 ar->user_key_ctrl = 0;
360
361 } else {
362 ath6kl_dbg(ATH6KL_DBG_TRC,
363 "%s: wmi is not ready 0x%p 0x%p\n",
364 __func__, ar, ar->wmi);
365
366 /* Shut down WMI if we have started it */
367 if (test_bit(WMI_ENABLED, &ar->flag)) {
368 ath6kl_dbg(ATH6KL_DBG_TRC,
369 "%s: shut down wmi\n", __func__);
370 ath6kl_wmi_shutdown(ar->wmi);
371 clear_bit(WMI_ENABLED, &ar->flag);
372 ar->wmi = NULL;
373 }
374 }
375
376 if (ar->htc_target) {
377 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
378 htc_stop(ar->htc_target);
379 }
380
381 /*
382 * Try to reset the device if we can. The driver may have been
383 * configure NOT to reset the target during a debug session.
384 */
385 ath6kl_dbg(ATH6KL_DBG_TRC,
386 "attempting to reset target on instance destroy\n");
387 ath6kl_reset_device(ar, ar->target_type, true, true);
388}
389
390static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
391{
392 u8 index;
393 u8 keyusage;
394
395 for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
396 if (ar->wep_key_list[index].key_len) {
397 keyusage = GROUP_USAGE;
398 if (index == ar->def_txkey_index)
399 keyusage |= TX_USAGE;
400
401 ath6kl_wmi_addkey_cmd(ar->wmi,
402 index,
403 WEP_CRYPT,
404 keyusage,
405 ar->wep_key_list[index].key_len,
406 NULL,
407 ar->wep_key_list[index].key,
408 KEY_OP_INIT_VAL, NULL,
409 NO_SYNC_WMIFLAG);
410 }
411 }
412}
413
414static void ath6kl_connect_ap_mode(struct ath6kl *ar, u16 channel, u8 *bssid,
415 u16 listen_int, u16 beacon_int,
416 u8 assoc_resp_len, u8 *assoc_info)
417{
418 struct net_device *dev = ar->net_dev;
419 struct station_info sinfo;
420 struct ath6kl_req_key *ik;
421 enum crypto_type keyType = NONE_CRYPT;
422
423 if (memcmp(dev->dev_addr, bssid, ETH_ALEN) == 0) {
424 ik = &ar->ap_mode_bkey;
425
426 switch (ar->auth_mode) {
427 case NONE_AUTH:
428 if (ar->prwise_crypto == WEP_CRYPT)
429 ath6kl_install_static_wep_keys(ar);
430 break;
431 case WPA_PSK_AUTH:
432 case WPA2_PSK_AUTH:
433 case (WPA_PSK_AUTH|WPA2_PSK_AUTH):
434 switch (ik->ik_type) {
435 case ATH6KL_CIPHER_TKIP:
436 keyType = TKIP_CRYPT;
437 break;
438 case ATH6KL_CIPHER_AES_CCM:
439 keyType = AES_CRYPT;
440 break;
441 default:
442 goto skip_key;
443 }
444 ath6kl_wmi_addkey_cmd(ar->wmi, ik->ik_keyix, keyType,
445 GROUP_USAGE, ik->ik_keylen,
446 (u8 *)&ik->ik_keyrsc,
447 ik->ik_keydata,
448 KEY_OP_INIT_VAL, ik->ik_macaddr,
449 SYNC_BOTH_WMIFLAG);
450 break;
451 }
452skip_key:
453 set_bit(CONNECTED, &ar->flag);
454 return;
455 }
456
457 ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n",
458 bssid, channel);
459
460 ath6kl_add_new_sta(ar, bssid, channel, assoc_info, assoc_resp_len,
461 listen_int & 0xFF, beacon_int,
462 (listen_int >> 8) & 0xFF);
463
464 /* send event to application */
465 memset(&sinfo, 0, sizeof(sinfo));
466
467 /* TODO: sinfo.generation */
468 /* TODO: need to deliver (Re)AssocReq IEs somehow.. change in
469 * cfg80211 needed, e.g., by adding those into sinfo
470 */
471 cfg80211_new_sta(ar->net_dev, bssid, &sinfo, GFP_KERNEL);
472
473 netif_wake_queue(ar->net_dev);
474
475 return;
476}
477
478/* Functions for Tx credit handling */
479void ath6k_credit_init(struct htc_credit_state_info *cred_info,
480 struct list_head *ep_list,
481 int tot_credits)
482{
483 struct htc_endpoint_credit_dist *cur_ep_dist;
484 int count;
485
486 cred_info->cur_free_credits = tot_credits;
487 cred_info->total_avail_credits = tot_credits;
488
489 list_for_each_entry(cur_ep_dist, ep_list, list) {
490 if (cur_ep_dist->endpoint == ENDPOINT_0)
491 continue;
492
493 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
494
495 if (tot_credits > 4)
496 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
497 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
498 ath6kl_deposit_credit_to_ep(cred_info,
499 cur_ep_dist,
500 cur_ep_dist->cred_min);
501 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
502 }
503
504 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
505 ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
506 cur_ep_dist->cred_min);
507 /*
508 * Control service is always marked active, it
509 * never goes inactive EVER.
510 */
511 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
512 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
513 /* this is the lowest priority data endpoint */
514 cred_info->lowestpri_ep_dist = cur_ep_dist->list;
515
516 /*
517 * Streams have to be created (explicit | implicit) for all
518 * kinds of traffic. BE endpoints are also inactive in the
519 * beginning. When BE traffic starts it creates implicit
520 * streams that redistributes credits.
521 *
522 * Note: all other endpoints have minimums set but are
523 * initially given NO credits. credits will be distributed
524 * as traffic activity demands
525 */
526 }
527
528 WARN_ON(cred_info->cur_free_credits <= 0);
529
530 list_for_each_entry(cur_ep_dist, ep_list, list) {
531 if (cur_ep_dist->endpoint == ENDPOINT_0)
532 continue;
533
534 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
535 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
536 else {
537 /*
538 * For the remaining data endpoints, we assume that
539 * each cred_per_msg are the same. We use a simple
540 * calculation here, we take the remaining credits
541 * and determine how many max messages this can
542 * cover and then set each endpoint's normal value
543 * equal to 3/4 this amount.
544 */
545 count = (cred_info->cur_free_credits /
546 cur_ep_dist->cred_per_msg)
547 * cur_ep_dist->cred_per_msg;
548 count = (count * 3) >> 2;
549 count = max(count, cur_ep_dist->cred_per_msg);
550 cur_ep_dist->cred_norm = count;
551
552 }
553 }
554}
555
556/* initialize and setup credit distribution */
557int ath6k_setup_credit_dist(void *htc_handle,
558 struct htc_credit_state_info *cred_info)
559{
560 u16 servicepriority[5];
561
562 memset(cred_info, 0, sizeof(struct htc_credit_state_info));
563
564 servicepriority[0] = WMI_CONTROL_SVC; /* highest */
565 servicepriority[1] = WMI_DATA_VO_SVC;
566 servicepriority[2] = WMI_DATA_VI_SVC;
567 servicepriority[3] = WMI_DATA_BE_SVC;
568 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
569
570 /* set priority list */
571 htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
572
573 return 0;
574}
575
576/* reduce an ep's credits back to a set limit */
577static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
578 struct htc_endpoint_credit_dist *ep_dist,
579 int limit)
580{
581 int credits;
582
583 ep_dist->cred_assngd = limit;
584
585 if (ep_dist->credits <= limit)
586 return;
587
588 credits = ep_dist->credits - limit;
589 ep_dist->credits -= credits;
590 cred_info->cur_free_credits += credits;
591}
592
593static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
594 struct list_head *epdist_list)
595{
596 struct htc_endpoint_credit_dist *cur_dist_list;
597
598 list_for_each_entry(cur_dist_list, epdist_list, list) {
599 if (cur_dist_list->endpoint == ENDPOINT_0)
600 continue;
601
602 if (cur_dist_list->cred_to_dist > 0) {
603 cur_dist_list->credits +=
604 cur_dist_list->cred_to_dist;
605 cur_dist_list->cred_to_dist = 0;
606 if (cur_dist_list->credits >
607 cur_dist_list->cred_assngd)
608 ath6k_reduce_credits(cred_info,
609 cur_dist_list,
610 cur_dist_list->cred_assngd);
611
612 if (cur_dist_list->credits >
613 cur_dist_list->cred_norm)
614 ath6k_reduce_credits(cred_info, cur_dist_list,
615 cur_dist_list->cred_norm);
616
617 if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
618 if (cur_dist_list->txq_depth == 0)
619 ath6k_reduce_credits(cred_info,
620 cur_dist_list, 0);
621 }
622 }
623 }
624}
625
626/*
627 * HTC has an endpoint that needs credits, ep_dist is the endpoint in
628 * question.
629 */
630void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
631 struct htc_endpoint_credit_dist *ep_dist)
632{
633 struct htc_endpoint_credit_dist *curdist_list;
634 int credits = 0;
635 int need;
636
637 if (ep_dist->svc_id == WMI_CONTROL_SVC)
638 goto out;
639
640 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
641 (ep_dist->svc_id == WMI_DATA_VO_SVC))
642 if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
643 goto out;
644
645 /*
646 * For all other services, we follow a simple algorithm of:
647 *
648 * 1. checking the free pool for credits
649 * 2. checking lower priority endpoints for credits to take
650 */
651
652 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
653
654 if (credits >= ep_dist->seek_cred)
655 goto out;
656
657 /*
658 * We don't have enough in the free pool, try taking away from
659 * lower priority services The rule for taking away credits:
660 *
661 * 1. Only take from lower priority endpoints
662 * 2. Only take what is allocated above the minimum (never
663 * starve an endpoint completely)
664 * 3. Only take what you need.
665 */
666
667 list_for_each_entry_reverse(curdist_list,
668 &cred_info->lowestpri_ep_dist,
669 list) {
670 if (curdist_list == ep_dist)
671 break;
672
673 need = ep_dist->seek_cred - cred_info->cur_free_credits;
674
675 if ((curdist_list->cred_assngd - need) >=
676 curdist_list->cred_min) {
677 /*
678 * The current one has been allocated more than
679 * it's minimum and it has enough credits assigned
680 * above it's minimum to fulfill our need try to
681 * take away just enough to fulfill our need.
682 */
683 ath6k_reduce_credits(cred_info, curdist_list,
684 curdist_list->cred_assngd - need);
685
686 if (cred_info->cur_free_credits >=
687 ep_dist->seek_cred)
688 break;
689 }
690
691 if (curdist_list->endpoint == ENDPOINT_0)
692 break;
693 }
694
695 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
696
697out:
698 /* did we find some credits? */
699 if (credits)
700 ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
701
702 ep_dist->seek_cred = 0;
703}
704
705/* redistribute credits based on activity change */
706static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
707 struct list_head *ep_dist_list)
708{
709 struct htc_endpoint_credit_dist *curdist_list;
710
711 list_for_each_entry(curdist_list, ep_dist_list, list) {
712 if (curdist_list->endpoint == ENDPOINT_0)
713 continue;
714
715 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
716 (curdist_list->svc_id == WMI_DATA_BE_SVC))
717 curdist_list->dist_flags |= HTC_EP_ACTIVE;
718
719 if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
720 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
721 if (curdist_list->txq_depth == 0)
722 ath6k_reduce_credits(info,
723 curdist_list, 0);
724 else
725 ath6k_reduce_credits(info,
726 curdist_list,
727 curdist_list->cred_min);
728 }
729 }
730}
731
732/*
733 *
734 * This function is invoked whenever endpoints require credit
735 * distributions. A lock is held while this function is invoked, this
736 * function shall NOT block. The ep_dist_list is a list of distribution
737 * structures in prioritized order as defined by the call to the
738 * htc_set_credit_dist() api.
739 */
740void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
741 struct list_head *ep_dist_list,
742 enum htc_credit_dist_reason reason)
743{
744 switch (reason) {
745 case HTC_CREDIT_DIST_SEND_COMPLETE:
746 ath6k_credit_update(cred_info, ep_dist_list);
747 break;
748 case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
749 ath6k_redistribute_credits(cred_info, ep_dist_list);
750 break;
751 default:
752 break;
753 }
754
755 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
756 WARN_ON(cred_info->cur_free_credits < 0);
757}
758
759void disconnect_timer_handler(unsigned long ptr)
760{
761 struct net_device *dev = (struct net_device *)ptr;
762 struct ath6kl *ar = ath6kl_priv(dev);
763
764 ath6kl_init_profile_info(ar);
765 ath6kl_disconnect(ar);
766}
767
768void ath6kl_disconnect(struct ath6kl *ar)
769{
770 if (test_bit(CONNECTED, &ar->flag) ||
771 test_bit(CONNECT_PEND, &ar->flag)) {
772 ath6kl_wmi_disconnect_cmd(ar->wmi);
773 /*
774 * Disconnect command is issued, clear the connect pending
775 * flag. The connected flag will be cleared in
776 * disconnect event notification.
777 */
778 clear_bit(CONNECT_PEND, &ar->flag);
779 }
780}
781
782/* WMI Event handlers */
783
784static const char *get_hw_id_string(u32 id)
785{
786 switch (id) {
787 case AR6003_REV1_VERSION:
788 return "1.0";
789 case AR6003_REV2_VERSION:
790 return "2.0";
791 case AR6003_REV3_VERSION:
792 return "2.1.1";
793 default:
794 return "unknown";
795 }
796}
797
798void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
799{
800 struct ath6kl *ar = devt;
801 struct net_device *dev = ar->net_dev;
802
803 memcpy(dev->dev_addr, datap, ETH_ALEN);
804 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
805 __func__, dev->dev_addr);
806
807 ar->version.wlan_ver = sw_ver;
808 ar->version.abi_ver = abi_ver;
809
810 snprintf(ar->wdev->wiphy->fw_version,
811 sizeof(ar->wdev->wiphy->fw_version),
812 "%u.%u.%u.%u",
813 (ar->version.wlan_ver & 0xf0000000) >> 28,
814 (ar->version.wlan_ver & 0x0f000000) >> 24,
815 (ar->version.wlan_ver & 0x00ff0000) >> 16,
816 (ar->version.wlan_ver & 0x0000ffff));
817
818 /* indicate to the waiting thread that the ready event was received */
819 set_bit(WMI_READY, &ar->flag);
820 wake_up(&ar->event_wq);
821
822 ath6kl_info("hw %s fw %s\n",
823 get_hw_id_string(ar->wdev->wiphy->hw_version),
824 ar->wdev->wiphy->fw_version);
825}
826
827void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
828{
829 ath6kl_cfg80211_scan_complete_event(ar, status);
830
831 if (!ar->usr_bss_filter)
832 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
833
834 ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
835}
836
837void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
838 u16 listen_int, u16 beacon_int,
839 enum network_type net_type, u8 beacon_ie_len,
840 u8 assoc_req_len, u8 assoc_resp_len,
841 u8 *assoc_info)
842{
843 unsigned long flags;
844
845 if (ar->nw_type == AP_NETWORK) {
846 ath6kl_connect_ap_mode(ar, channel, bssid, listen_int,
847 beacon_int, assoc_resp_len,
848 assoc_info);
849 return;
850 }
851
852 ath6kl_cfg80211_connect_event(ar, channel, bssid,
853 listen_int, beacon_int,
854 net_type, beacon_ie_len,
855 assoc_req_len, assoc_resp_len,
856 assoc_info);
857
858 memcpy(ar->bssid, bssid, sizeof(ar->bssid));
859 ar->bss_ch = channel;
860
861 if ((ar->nw_type == INFRA_NETWORK))
862 ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
863 ar->listen_intvl_b);
864
865 netif_wake_queue(ar->net_dev);
866
867 /* Update connect & link status atomically */
868 spin_lock_irqsave(&ar->lock, flags);
869 set_bit(CONNECTED, &ar->flag);
870 clear_bit(CONNECT_PEND, &ar->flag);
871 netif_carrier_on(ar->net_dev);
872 spin_unlock_irqrestore(&ar->lock, flags);
873
874 aggr_reset_state(ar->aggr_cntxt);
875 ar->reconnect_flag = 0;
876
877 if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
878 memset(ar->node_map, 0, sizeof(ar->node_map));
879 ar->node_num = 0;
880 ar->next_ep_id = ENDPOINT_2;
881 }
882
883 if (!ar->usr_bss_filter)
884 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
885}
886
887void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
888{
889 struct ath6kl_sta *sta;
890 u8 tsc[6];
891 /*
892 * For AP case, keyid will have aid of STA which sent pkt with
893 * MIC error. Use this aid to get MAC & send it to hostapd.
894 */
895 if (ar->nw_type == AP_NETWORK) {
896 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
897 if (!sta)
898 return;
899
900 ath6kl_dbg(ATH6KL_DBG_TRC,
901 "ap tkip mic error received from aid=%d\n", keyid);
902
903 memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
904 cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
905 NL80211_KEYTYPE_PAIRWISE, keyid,
906 tsc, GFP_KERNEL);
907 } else
908 ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
909
910}
911
912static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
913{
914 struct wmi_target_stats *tgt_stats =
915 (struct wmi_target_stats *) ptr;
916 struct target_stats *stats = &ar->target_stats;
917 struct tkip_ccmp_stats *ccmp_stats;
918 struct bss *conn_bss = NULL;
919 struct cserv_stats *c_stats;
920 u8 ac;
921
922 if (len < sizeof(*tgt_stats))
923 return;
924
925 /* update the RSSI of the connected bss */
926 if (test_bit(CONNECTED, &ar->flag)) {
927 conn_bss = ath6kl_wmi_find_node(ar->wmi, ar->bssid);
928 if (conn_bss) {
929 c_stats = &tgt_stats->cserv_stats;
930 conn_bss->ni_rssi =
931 a_sle16_to_cpu(c_stats->cs_ave_beacon_rssi);
932 conn_bss->ni_snr =
933 tgt_stats->cserv_stats.cs_ave_beacon_snr;
934 ath6kl_wmi_node_return(ar->wmi, conn_bss);
935 }
936 }
937
938 ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n");
939
940 stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt);
941 stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte);
942 stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt);
943 stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte);
944 stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt);
945 stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte);
946 stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt);
947 stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte);
948 stats->tx_rts_success_cnt +=
949 le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt);
950
951 for (ac = 0; ac < WMM_NUM_AC; ac++)
952 stats->tx_pkt_per_ac[ac] +=
953 le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]);
954
955 stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err);
956 stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt);
957 stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt);
958 stats->tx_mult_retry_cnt +=
959 le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt);
960 stats->tx_rts_fail_cnt +=
961 le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt);
962 stats->tx_ucast_rate =
963 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate));
964
965 stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt);
966 stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte);
967 stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt);
968 stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte);
969 stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt);
970 stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte);
971 stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt);
972 stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte);
973 stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt);
974 stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err);
975 stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err);
976 stats->rx_key_cache_miss +=
977 le32_to_cpu(tgt_stats->stats.rx.key_cache_miss);
978 stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err);
979 stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame);
980 stats->rx_ucast_rate =
981 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate));
982
983 ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats;
984
985 stats->tkip_local_mic_fail +=
986 le32_to_cpu(ccmp_stats->tkip_local_mic_fail);
987 stats->tkip_cnter_measures_invoked +=
988 le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked);
989 stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err);
990
991 stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err);
992 stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays);
993
994 stats->pwr_save_fail_cnt +=
995 le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt);
996 stats->noise_floor_calib =
997 a_sle32_to_cpu(tgt_stats->noise_floor_calib);
998
999 stats->cs_bmiss_cnt +=
1000 le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt);
1001 stats->cs_low_rssi_cnt +=
1002 le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt);
1003 stats->cs_connect_cnt +=
1004 le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt);
1005 stats->cs_discon_cnt +=
1006 le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt);
1007
1008 stats->cs_ave_beacon_rssi =
1009 a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi);
1010
1011 stats->cs_last_roam_msec =
1012 tgt_stats->cserv_stats.cs_last_roam_msec;
1013 stats->cs_snr = tgt_stats->cserv_stats.cs_snr;
1014 stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi);
1015
1016 stats->lq_val = le32_to_cpu(tgt_stats->lq_val);
1017
1018 stats->wow_pkt_dropped +=
1019 le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped);
1020 stats->wow_host_pkt_wakeups +=
1021 tgt_stats->wow_stats.wow_host_pkt_wakeups;
1022 stats->wow_host_evt_wakeups +=
1023 tgt_stats->wow_stats.wow_host_evt_wakeups;
1024 stats->wow_evt_discarded +=
1025 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
1026
1027 if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
1028 clear_bit(STATS_UPDATE_PEND, &ar->flag);
1029 wake_up(&ar->event_wq);
1030 }
1031}
1032
1033static void ath6kl_add_le32(__le32 *var, __le32 val)
1034{
1035 *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
1036}
1037
1038void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
1039{
1040 struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
1041 struct wmi_ap_mode_stat *ap = &ar->ap_stats;
1042 struct wmi_per_sta_stat *st_ap, *st_p;
1043 u8 ac;
1044
1045 if (ar->nw_type == AP_NETWORK) {
1046 if (len < sizeof(*p))
1047 return;
1048
1049 for (ac = 0; ac < AP_MAX_NUM_STA; ac++) {
1050 st_ap = &ap->sta[ac];
1051 st_p = &p->sta[ac];
1052
1053 ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes);
1054 ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts);
1055 ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error);
1056 ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard);
1057 ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes);
1058 ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts);
1059 ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error);
1060 ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard);
1061 }
1062
1063 } else {
1064 ath6kl_update_target_stats(ar, ptr, len);
1065 }
1066}
1067
1068void ath6kl_wakeup_event(void *dev)
1069{
1070 struct ath6kl *ar = (struct ath6kl *) dev;
1071
1072 wake_up(&ar->event_wq);
1073}
1074
1075void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
1076{
1077 struct ath6kl *ar = (struct ath6kl *) devt;
1078
1079 ar->tx_pwr = tx_pwr;
1080 wake_up(&ar->event_wq);
1081}
1082
1083void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
1084{
1085 struct ath6kl_sta *conn;
1086 struct sk_buff *skb;
1087 bool psq_empty = false;
1088
1089 conn = ath6kl_find_sta_by_aid(ar, aid);
1090
1091 if (!conn)
1092 return;
1093 /*
1094 * Send out a packet queued on ps queue. When the ps queue
1095 * becomes empty update the PVB for this station.
1096 */
1097 spin_lock_bh(&conn->psq_lock);
1098 psq_empty = skb_queue_empty(&conn->psq);
1099 spin_unlock_bh(&conn->psq_lock);
1100
1101 if (psq_empty)
1102 /* TODO: Send out a NULL data frame */
1103 return;
1104
1105 spin_lock_bh(&conn->psq_lock);
1106 skb = skb_dequeue(&conn->psq);
1107 spin_unlock_bh(&conn->psq_lock);
1108
1109 conn->sta_flags |= STA_PS_POLLED;
1110 ath6kl_data_tx(skb, ar->net_dev);
1111 conn->sta_flags &= ~STA_PS_POLLED;
1112
1113 spin_lock_bh(&conn->psq_lock);
1114 psq_empty = skb_queue_empty(&conn->psq);
1115 spin_unlock_bh(&conn->psq_lock);
1116
1117 if (psq_empty)
1118 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1119}
1120
1121void ath6kl_dtimexpiry_event(struct ath6kl *ar)
1122{
1123 bool mcastq_empty = false;
1124 struct sk_buff *skb;
1125
1126 /*
1127 * If there are no associated STAs, ignore the DTIM expiry event.
1128 * There can be potential race conditions where the last associated
1129 * STA may disconnect & before the host could clear the 'Indicate
1130 * DTIM' request to the firmware, the firmware would have just
1131 * indicated a DTIM expiry event. The race is between 'clear DTIM
1132 * expiry cmd' going from the host to the firmware & the DTIM
1133 * expiry event happening from the firmware to the host.
1134 */
1135 if (!ar->sta_list_index)
1136 return;
1137
1138 spin_lock_bh(&ar->mcastpsq_lock);
1139 mcastq_empty = skb_queue_empty(&ar->mcastpsq);
1140 spin_unlock_bh(&ar->mcastpsq_lock);
1141
1142 if (mcastq_empty)
1143 return;
1144
1145 /* set the STA flag to dtim_expired for the frame to go out */
1146 set_bit(DTIM_EXPIRED, &ar->flag);
1147
1148 spin_lock_bh(&ar->mcastpsq_lock);
1149 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
1150 spin_unlock_bh(&ar->mcastpsq_lock);
1151
1152 ath6kl_data_tx(skb, ar->net_dev);
1153
1154 spin_lock_bh(&ar->mcastpsq_lock);
1155 }
1156 spin_unlock_bh(&ar->mcastpsq_lock);
1157
1158 clear_bit(DTIM_EXPIRED, &ar->flag);
1159
1160 /* clear the LSB of the BitMapCtl field of the TIM IE */
1161 ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
1162}
1163
1164void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
1165 u8 assoc_resp_len, u8 *assoc_info,
1166 u16 prot_reason_status)
1167{
1168 struct bss *wmi_ssid_node = NULL;
1169 unsigned long flags;
1170
1171 if (ar->nw_type == AP_NETWORK) {
1172 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
1173 return;
1174
1175 /* if no more associated STAs, empty the mcast PS q */
1176 if (ar->sta_list_index == 0) {
1177 spin_lock_bh(&ar->mcastpsq_lock);
1178 skb_queue_purge(&ar->mcastpsq);
1179 spin_unlock_bh(&ar->mcastpsq_lock);
1180
1181 /* clear the LSB of the TIM IE's BitMapCtl field */
1182 if (test_bit(WMI_READY, &ar->flag))
1183 ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
1184 }
1185
1186 if (!is_broadcast_ether_addr(bssid)) {
1187 /* send event to application */
1188 cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
1189 }
1190
1191 clear_bit(CONNECTED, &ar->flag);
1192 return;
1193 }
1194
1195 ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
1196 assoc_resp_len, assoc_info,
1197 prot_reason_status);
1198
1199 aggr_reset_state(ar->aggr_cntxt);
1200
1201 del_timer(&ar->disconnect_timer);
1202
1203 ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
1204 "disconnect reason is %d\n", reason);
1205
1206 /*
1207 * If the event is due to disconnect cmd from the host, only they
1208 * the target would stop trying to connect. Under any other
1209 * condition, target would keep trying to connect.
1210 */
1211 if (reason == DISCONNECT_CMD) {
1212 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
1213 ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
1214 } else {
1215 set_bit(CONNECT_PEND, &ar->flag);
1216 if (((reason == ASSOC_FAILED) &&
1217 (prot_reason_status == 0x11)) ||
1218 ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
1219 && (ar->reconnect_flag == 1))) {
1220 set_bit(CONNECTED, &ar->flag);
1221 return;
1222 }
1223 }
1224
1225 if ((reason == NO_NETWORK_AVAIL) && test_bit(WMI_READY, &ar->flag)) {
1226 ath6kl_wmi_node_free(ar->wmi, bssid);
1227
1228 /*
1229 * In case any other same SSID nodes are present remove it,
1230 * since those nodes also not available now.
1231 */
1232 do {
1233 /*
1234 * Find the nodes based on SSID and remove it
1235 *
1236 * Note: This case will not work out for
1237 * Hidden-SSID
1238 */
1239 wmi_ssid_node = ath6kl_wmi_find_ssid_node(ar->wmi,
1240 ar->ssid,
1241 ar->ssid_len,
1242 false,
1243 true);
1244
1245 if (wmi_ssid_node)
1246 ath6kl_wmi_node_free(ar->wmi,
1247 wmi_ssid_node->ni_macaddr);
1248
1249 } while (wmi_ssid_node);
1250 }
1251
1252 /* update connect & link status atomically */
1253 spin_lock_irqsave(&ar->lock, flags);
1254 clear_bit(CONNECTED, &ar->flag);
1255 netif_carrier_off(ar->net_dev);
1256 spin_unlock_irqrestore(&ar->lock, flags);
1257
1258 if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
1259 ar->reconnect_flag = 0;
1260
1261 if (reason != CSERV_DISCONNECT)
1262 ar->user_key_ctrl = 0;
1263
1264 netif_stop_queue(ar->net_dev);
1265 memset(ar->bssid, 0, sizeof(ar->bssid));
1266 ar->bss_ch = 0;
1267
1268 ath6kl_tx_data_cleanup(ar);
1269}
1270
1271static int ath6kl_open(struct net_device *dev)
1272{
1273 struct ath6kl *ar = ath6kl_priv(dev);
1274 unsigned long flags;
1275
1276 spin_lock_irqsave(&ar->lock, flags);
1277
1278 ar->wlan_state = WLAN_ENABLED;
1279
1280 if (test_bit(CONNECTED, &ar->flag)) {
1281 netif_carrier_on(dev);
1282 netif_wake_queue(dev);
1283 } else
1284 netif_carrier_off(dev);
1285
1286 spin_unlock_irqrestore(&ar->lock, flags);
1287
1288 return 0;
1289}
1290
1291static int ath6kl_close(struct net_device *dev)
1292{
1293 struct ath6kl *ar = ath6kl_priv(dev);
1294
1295 netif_stop_queue(dev);
1296
1297 ath6kl_disconnect(ar);
1298
1299 if (test_bit(WMI_READY, &ar->flag)) {
1300 if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
1301 0, 0, 0))
1302 return -EIO;
1303
1304 ar->wlan_state = WLAN_DISABLED;
1305 }
1306
1307 ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
1308
1309 return 0;
1310}
1311
1312static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
1313{
1314 struct ath6kl *ar = ath6kl_priv(dev);
1315
1316 return &ar->net_stats;
1317}
1318
1319static struct net_device_ops ath6kl_netdev_ops = {
1320 .ndo_open = ath6kl_open,
1321 .ndo_stop = ath6kl_close,
1322 .ndo_start_xmit = ath6kl_data_tx,
1323 .ndo_get_stats = ath6kl_get_stats,
1324};
1325
1326void init_netdev(struct net_device *dev)
1327{
1328 dev->netdev_ops = &ath6kl_netdev_ops;
1329 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1330
1331 dev->needed_headroom = ETH_HLEN;
1332 dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
1333 sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
1334 + WMI_MAX_TX_META_SZ;
1335
1336 return;
1337}
diff --git a/drivers/net/wireless/ath/ath6kl/node.c b/drivers/net/wireless/ath/ath6kl/node.c
new file mode 100644
index 000000000000..b0f9ba2e463c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/node.c
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18#include "wmi.h"
19#include "debug.h"
20
21struct bss *wlan_node_alloc(int wh_size)
22{
23 struct bss *ni;
24
25 ni = kzalloc(sizeof(struct bss), GFP_ATOMIC);
26
27 if ((ni != NULL) && wh_size) {
28 ni->ni_buf = kmalloc(wh_size, GFP_ATOMIC);
29 if (ni->ni_buf == NULL) {
30 kfree(ni);
31 return NULL;
32 }
33 }
34
35 return ni;
36}
37
38void wlan_node_free(struct bss *ni)
39{
40 kfree(ni->ni_buf);
41 kfree(ni);
42}
43
44void wlan_setup_node(struct ath6kl_node_table *nt, struct bss *ni,
45 const u8 *mac_addr)
46{
47 int hash;
48
49 memcpy(ni->ni_macaddr, mac_addr, ETH_ALEN);
50 hash = ATH6KL_NODE_HASH(mac_addr);
51 ni->ni_refcnt = 1;
52
53 ni->ni_tstamp = jiffies_to_msecs(jiffies);
54 ni->ni_actcnt = WLAN_NODE_INACT_CNT;
55
56 spin_lock_bh(&nt->nt_nodelock);
57
58 /* insert at the end of the node list */
59 ni->ni_list_next = NULL;
60 ni->ni_list_prev = nt->nt_node_last;
61 if (nt->nt_node_last != NULL)
62 nt->nt_node_last->ni_list_next = ni;
63
64 nt->nt_node_last = ni;
65 if (nt->nt_node_first == NULL)
66 nt->nt_node_first = ni;
67
68 /* insert into the hash list */
69 ni->ni_hash_next = nt->nt_hash[hash];
70 if (ni->ni_hash_next != NULL)
71 nt->nt_hash[hash]->ni_hash_prev = ni;
72
73 ni->ni_hash_prev = NULL;
74 nt->nt_hash[hash] = ni;
75
76 spin_unlock_bh(&nt->nt_nodelock);
77}
78
79struct bss *wlan_find_node(struct ath6kl_node_table *nt,
80 const u8 *mac_addr)
81{
82 struct bss *ni, *found_ni = NULL;
83 int hash;
84
85 spin_lock_bh(&nt->nt_nodelock);
86
87 hash = ATH6KL_NODE_HASH(mac_addr);
88 for (ni = nt->nt_hash[hash]; ni; ni = ni->ni_hash_next) {
89 if (memcmp(ni->ni_macaddr, mac_addr, ETH_ALEN) == 0) {
90 ni->ni_refcnt++;
91 found_ni = ni;
92 break;
93 }
94 }
95
96 spin_unlock_bh(&nt->nt_nodelock);
97
98 return found_ni;
99}
100
101void wlan_node_reclaim(struct ath6kl_node_table *nt, struct bss *ni)
102{
103 int hash;
104
105 spin_lock_bh(&nt->nt_nodelock);
106
107 if (ni->ni_list_prev == NULL)
108 /* fix list head */
109 nt->nt_node_first = ni->ni_list_next;
110 else
111 ni->ni_list_prev->ni_list_next = ni->ni_list_next;
112
113 if (ni->ni_list_next == NULL)
114 /* fix list tail */
115 nt->nt_node_last = ni->ni_list_prev;
116 else
117 ni->ni_list_next->ni_list_prev = ni->ni_list_prev;
118
119 if (ni->ni_hash_prev == NULL) {
120 /* first in list so fix the list head */
121 hash = ATH6KL_NODE_HASH(ni->ni_macaddr);
122 nt->nt_hash[hash] = ni->ni_hash_next;
123 } else {
124 ni->ni_hash_prev->ni_hash_next = ni->ni_hash_next;
125 }
126
127 if (ni->ni_hash_next != NULL)
128 ni->ni_hash_next->ni_hash_prev = ni->ni_hash_prev;
129
130 wlan_node_free(ni);
131
132 spin_unlock_bh(&nt->nt_nodelock);
133}
134
135static void wlan_node_dec_free(struct bss *ni)
136{
137 if ((ni->ni_refcnt--) == 1)
138 wlan_node_free(ni);
139}
140
141void wlan_free_allnodes(struct ath6kl_node_table *nt)
142{
143 struct bss *ni;
144
145 while ((ni = nt->nt_node_first) != NULL)
146 wlan_node_reclaim(nt, ni);
147}
148
149void wlan_iterate_nodes(struct ath6kl_node_table *nt,
150 void (*f) (void *arg, struct bss *), void *arg)
151{
152 struct bss *ni;
153
154 spin_lock_bh(&nt->nt_nodelock);
155 for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
156 ni->ni_refcnt++;
157 (*f) (arg, ni);
158 wlan_node_dec_free(ni);
159 }
160 spin_unlock_bh(&nt->nt_nodelock);
161}
162
163void wlan_node_table_init(void *wmi, struct ath6kl_node_table *nt)
164{
165 ath6kl_dbg(ATH6KL_DBG_WLAN_NODE, "node table = 0x%lx\n",
166 (unsigned long)nt);
167
168 memset(nt, 0, sizeof(struct ath6kl_node_table));
169
170 spin_lock_init(&nt->nt_nodelock);
171
172 nt->nt_wmi = wmi;
173 nt->nt_node_age = WLAN_NODE_INACT_TIMEOUT_MSEC;
174}
175
176void wlan_refresh_inactive_nodes(struct ath6kl_node_table *nt)
177{
178 struct bss *bss;
179 u8 my_bssid[ETH_ALEN];
180 u32 now;
181
182 ath6kl_wmi_get_current_bssid(nt->nt_wmi, my_bssid);
183
184 now = jiffies_to_msecs(jiffies);
185 bss = nt->nt_node_first;
186 while (bss != NULL) {
187 /* refresh all nodes except the current bss */
188 if (memcmp(my_bssid, bss->ni_macaddr, sizeof(my_bssid)) != 0) {
189 if (((now - bss->ni_tstamp) > nt->nt_node_age)
190 || --bss->ni_actcnt == 0) {
191 wlan_node_reclaim(nt, bss);
192 }
193 }
194 bss = bss->ni_list_next;
195 }
196}
197
198void wlan_node_table_cleanup(struct ath6kl_node_table *nt)
199{
200 wlan_free_allnodes(nt);
201}
202
203struct bss *wlan_find_ssid_node(struct ath6kl_node_table *nt, u8 * ssid,
204 u32 ssid_len, bool is_wpa2, bool match_ssid)
205{
206 struct bss *ni, *found_ni = NULL;
207 u8 *ie_ssid;
208
209 spin_lock_bh(&nt->nt_nodelock);
210
211 for (ni = nt->nt_node_first; ni; ni = ni->ni_list_next) {
212
213 ie_ssid = ni->ni_cie.ie_ssid;
214
215 if ((ie_ssid[1] <= IEEE80211_MAX_SSID_LEN) &&
216 (memcmp(ssid, &ie_ssid[2], ssid_len) == 0)) {
217
218 if (match_ssid ||
219 (is_wpa2 && ni->ni_cie.ie_rsn != NULL) ||
220 (!is_wpa2 && ni->ni_cie.ie_wpa != NULL)) {
221 ni->ni_refcnt++;
222 found_ni = ni;
223 break;
224 }
225 }
226 }
227
228 spin_unlock_bh(&nt->nt_nodelock);
229
230 return found_ni;
231}
232
233void wlan_node_return(struct ath6kl_node_table *nt, struct bss *ni)
234{
235 spin_lock_bh(&nt->nt_nodelock);
236 wlan_node_dec_free(ni);
237 spin_unlock_bh(&nt->nt_nodelock);
238}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
new file mode 100644
index 000000000000..b38732aaf41a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -0,0 +1,853 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/mmc/card.h>
18#include <linux/mmc/mmc.h>
19#include <linux/mmc/host.h>
20#include <linux/mmc/sdio_func.h>
21#include <linux/mmc/sdio_ids.h>
22#include <linux/mmc/sdio.h>
23#include <linux/mmc/sd.h>
24#include "htc_hif.h"
25#include "hif-ops.h"
26#include "target.h"
27#include "debug.h"
28
29struct ath6kl_sdio {
30 struct sdio_func *func;
31
32 spinlock_t lock;
33
34 /* free list */
35 struct list_head bus_req_freeq;
36
37 /* available bus requests */
38 struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
39
40 struct ath6kl *ar;
41 u8 *dma_buffer;
42
43 /* scatter request list head */
44 struct list_head scat_req;
45
46 spinlock_t scat_lock;
47 bool is_disabled;
48 atomic_t irq_handling;
49 const struct sdio_device_id *id;
50 struct work_struct wr_async_work;
51 struct list_head wr_asyncq;
52 spinlock_t wr_async_lock;
53};
54
55#define CMD53_ARG_READ 0
56#define CMD53_ARG_WRITE 1
57#define CMD53_ARG_BLOCK_BASIS 1
58#define CMD53_ARG_FIXED_ADDRESS 0
59#define CMD53_ARG_INCR_ADDRESS 1
60
61static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
62{
63 return ar->hif_priv;
64}
65
66/*
67 * Macro to check if DMA buffer is WORD-aligned and DMA-able.
68 * Most host controllers assume the buffer is DMA'able and will
69 * bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
70 * check fails on stack memory.
71 */
72static inline bool buf_needs_bounce(u8 *buf)
73{
74 return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
75}
76
77static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
78{
79 struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
80
81 /* EP1 has an extended range */
82 mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
83 mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
84 mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
85 mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
86 mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
87 mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
88}
89
90static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
91 u8 mode, u8 opcode, u32 addr,
92 u16 blksz)
93{
94 *arg = (((rw & 1) << 31) |
95 ((func & 0x7) << 28) |
96 ((mode & 1) << 27) |
97 ((opcode & 1) << 26) |
98 ((addr & 0x1FFFF) << 9) |
99 (blksz & 0x1FF));
100}
101
102static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
103 unsigned int address,
104 unsigned char val)
105{
106 const u8 func = 0;
107
108 *arg = ((write & 1) << 31) |
109 ((func & 0x7) << 28) |
110 ((raw & 1) << 27) |
111 (1 << 26) |
112 ((address & 0x1FFFF) << 9) |
113 (1 << 8) |
114 (val & 0xFF);
115}
116
117static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
118 unsigned int address,
119 unsigned char byte)
120{
121 struct mmc_command io_cmd;
122
123 memset(&io_cmd, 0, sizeof(io_cmd));
124 ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
125 io_cmd.opcode = SD_IO_RW_DIRECT;
126 io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
127
128 return mmc_wait_for_cmd(card->host, &io_cmd, 0);
129}
130
131static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
132{
133 struct bus_request *bus_req;
134 unsigned long flag;
135
136 spin_lock_irqsave(&ar_sdio->lock, flag);
137
138 if (list_empty(&ar_sdio->bus_req_freeq)) {
139 spin_unlock_irqrestore(&ar_sdio->lock, flag);
140 return NULL;
141 }
142
143 bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
144 struct bus_request, list);
145 list_del(&bus_req->list);
146
147 spin_unlock_irqrestore(&ar_sdio->lock, flag);
148 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
149
150 return bus_req;
151}
152
153static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
154 struct bus_request *bus_req)
155{
156 unsigned long flag;
157
158 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: bus request 0x%p\n", __func__, bus_req);
159
160 spin_lock_irqsave(&ar_sdio->lock, flag);
161 list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
162 spin_unlock_irqrestore(&ar_sdio->lock, flag);
163}
164
165static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
166 struct hif_scatter_req_priv *s_req_priv,
167 struct mmc_data *data)
168{
169 struct scatterlist *sg;
170 int i;
171
172 data->blksz = HIF_MBOX_BLOCK_SIZE;
173 data->blocks = scat_req->len / HIF_MBOX_BLOCK_SIZE;
174
175 ath6kl_dbg(ATH6KL_DBG_SCATTER,
176 "hif-scatter: (%s) addr: 0x%X, (block len: %d, block count: %d) , (tot:%d,sg:%d)\n",
177 (scat_req->req & HIF_WRITE) ? "WR" : "RD", scat_req->addr,
178 data->blksz, data->blocks, scat_req->len,
179 scat_req->scat_entries);
180
181 data->flags = (scat_req->req & HIF_WRITE) ? MMC_DATA_WRITE :
182 MMC_DATA_READ;
183
184 /* fill SG entries */
185 sg = s_req_priv->sgentries;
186 sg_init_table(sg, scat_req->scat_entries);
187
188 /* assemble SG list */
189 for (i = 0; i < scat_req->scat_entries; i++, sg++) {
190 if ((unsigned long)scat_req->scat_list[i].buf & 0x3)
191 /*
192 * Some scatter engines can handle unaligned
193 * buffers, print this as informational only.
194 */
195 ath6kl_dbg(ATH6KL_DBG_SCATTER,
196 "(%s) scatter buffer is unaligned 0x%p\n",
197 scat_req->req & HIF_WRITE ? "WR" : "RD",
198 scat_req->scat_list[i].buf);
199
200 ath6kl_dbg(ATH6KL_DBG_SCATTER, "%d: addr:0x%p, len:%d\n",
201 i, scat_req->scat_list[i].buf,
202 scat_req->scat_list[i].len);
203
204 sg_set_buf(sg, scat_req->scat_list[i].buf,
205 scat_req->scat_list[i].len);
206 }
207
208 /* set scatter-gather table for request */
209 data->sg = s_req_priv->sgentries;
210 data->sg_len = scat_req->scat_entries;
211}
212
213static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
214 struct bus_request *req)
215{
216 struct mmc_request mmc_req;
217 struct mmc_command cmd;
218 struct mmc_data data;
219 struct hif_scatter_req *scat_req;
220 u8 opcode, rw;
221 int status;
222
223 scat_req = req->scat_req;
224
225 memset(&mmc_req, 0, sizeof(struct mmc_request));
226 memset(&cmd, 0, sizeof(struct mmc_command));
227 memset(&data, 0, sizeof(struct mmc_data));
228
229 ath6kl_sdio_setup_scat_data(scat_req, scat_req->req_priv, &data);
230
231 opcode = (scat_req->req & HIF_FIXED_ADDRESS) ?
232 CMD53_ARG_FIXED_ADDRESS : CMD53_ARG_INCR_ADDRESS;
233
234 rw = (scat_req->req & HIF_WRITE) ? CMD53_ARG_WRITE : CMD53_ARG_READ;
235
236 /* Fixup the address so that the last byte will fall on MBOX EOM */
237 if (scat_req->req & HIF_WRITE) {
238 if (scat_req->addr == HIF_MBOX_BASE_ADDR)
239 scat_req->addr += HIF_MBOX_WIDTH - scat_req->len;
240 else
241 /* Uses extended address range */
242 scat_req->addr += HIF_MBOX0_EXT_WIDTH - scat_req->len;
243 }
244
245 /* set command argument */
246 ath6kl_sdio_set_cmd53_arg(&cmd.arg, rw, ar_sdio->func->num,
247 CMD53_ARG_BLOCK_BASIS, opcode, scat_req->addr,
248 data.blocks);
249
250 cmd.opcode = SD_IO_RW_EXTENDED;
251 cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
252
253 mmc_req.cmd = &cmd;
254 mmc_req.data = &data;
255
256 mmc_set_data_timeout(&data, ar_sdio->func->card);
257 /* synchronous call to process request */
258 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
259
260 status = cmd.error ? cmd.error : data.error;
261 scat_req->status = status;
262
263 if (scat_req->status)
264 ath6kl_err("Scatter write request failed:%d\n",
265 scat_req->status);
266
267 if (scat_req->req & HIF_ASYNCHRONOUS)
268 scat_req->complete(scat_req);
269
270 return status;
271}
272
273
274/* callback to issue a read-write scatter request */
275static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
276 struct hif_scatter_req *scat_req)
277{
278 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
279 struct hif_scatter_req_priv *req_priv = scat_req->req_priv;
280 u32 request = scat_req->req;
281 int status = 0;
282 unsigned long flags;
283
284 if (!scat_req->len)
285 return -EINVAL;
286
287 ath6kl_dbg(ATH6KL_DBG_SCATTER,
288 "hif-scatter: total len: %d scatter entries: %d\n",
289 scat_req->len, scat_req->scat_entries);
290
291 if (request & HIF_SYNCHRONOUS) {
292 sdio_claim_host(ar_sdio->func);
293 status = ath6kl_sdio_scat_rw(ar_sdio, req_priv->busrequest);
294 sdio_release_host(ar_sdio->func);
295 } else {
296 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
297 list_add_tail(&req_priv->busrequest->list, &ar_sdio->wr_asyncq);
298 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
299 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
300 }
301
302 return status;
303}
304
305/* clean up scatter support */
306static void ath6kl_sdio_cleanup_scat_resource(struct ath6kl_sdio *ar_sdio)
307{
308 struct hif_scatter_req *s_req, *tmp_req;
309 unsigned long flag;
310
311 /* empty the free list */
312 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
313 list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
314 list_del(&s_req->list);
315 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
316
317 if (s_req->req_priv && s_req->req_priv->busrequest)
318 ath6kl_sdio_free_bus_req(ar_sdio,
319 s_req->req_priv->busrequest);
320 kfree(s_req->virt_dma_buf);
321 kfree(s_req->req_priv);
322 kfree(s_req);
323
324 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
325 }
326 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
327}
328
329/* setup of HIF scatter resources */
330static int ath6kl_sdio_setup_scat_resource(struct ath6kl_sdio *ar_sdio,
331 struct hif_dev_scat_sup_info *pinfo)
332{
333 struct hif_scatter_req *s_req;
334 struct bus_request *bus_req;
335 int i, scat_req_sz, scat_list_sz;
336
337 /* check if host supports scatter and it meets our requirements */
338 if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
339 ath6kl_err("hif-scatter: host only supports scatter of : %d entries, need: %d\n",
340 ar_sdio->func->card->host->max_segs,
341 MAX_SCATTER_ENTRIES_PER_REQ);
342 return -EINVAL;
343 }
344
345 ath6kl_dbg(ATH6KL_DBG_ANY,
346 "hif-scatter enabled: max scatter req : %d entries: %d\n",
347 MAX_SCATTER_REQUESTS, MAX_SCATTER_ENTRIES_PER_REQ);
348
349 scat_list_sz = (MAX_SCATTER_ENTRIES_PER_REQ - 1) *
350 sizeof(struct hif_scatter_item);
351 scat_req_sz = sizeof(*s_req) + scat_list_sz;
352
353 for (i = 0; i < MAX_SCATTER_REQUESTS; i++) {
354 /* allocate the scatter request */
355 s_req = kzalloc(scat_req_sz, GFP_KERNEL);
356 if (!s_req)
357 goto fail_setup_scat;
358
359 /* allocate the private request blob */
360 s_req->req_priv = kzalloc(sizeof(*s_req->req_priv), GFP_KERNEL);
361
362 if (!s_req->req_priv) {
363 kfree(s_req);
364 goto fail_setup_scat;
365 }
366
367 /* allocate a bus request for this scatter request */
368 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
369 if (!bus_req) {
370 kfree(s_req->req_priv);
371 kfree(s_req);
372 goto fail_setup_scat;
373 }
374
375 /* assign the scatter request to this bus request */
376 bus_req->scat_req = s_req;
377 s_req->req_priv->busrequest = bus_req;
378 /* add it to the scatter pool */
379 hif_scatter_req_add(ar_sdio->ar, s_req);
380 }
381
382 /* set scatter function pointers */
383 pinfo->rw_scat_func = ath6kl_sdio_async_rw_scatter;
384 pinfo->max_scat_entries = MAX_SCATTER_ENTRIES_PER_REQ;
385 pinfo->max_xfer_szper_scatreq = MAX_SCATTER_REQ_TRANSFER_SIZE;
386
387 return 0;
388
389fail_setup_scat:
390 ath6kl_err("hif-scatter: failed to alloc scatter resources !\n");
391 ath6kl_sdio_cleanup_scat_resource(ar_sdio);
392
393 return -ENOMEM;
394}
395
396static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
397 u32 len, u32 request)
398{
399 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
400 u8 *tbuf = NULL;
401 int ret;
402 bool bounced = false;
403
404 if (request & HIF_BLOCK_BASIS)
405 len = round_down(len, HIF_MBOX_BLOCK_SIZE);
406
407 if (buf_needs_bounce(buf)) {
408 if (!ar_sdio->dma_buffer)
409 return -ENOMEM;
410 tbuf = ar_sdio->dma_buffer;
411 memcpy(tbuf, buf, len);
412 bounced = true;
413 } else
414 tbuf = buf;
415
416 sdio_claim_host(ar_sdio->func);
417 if (request & HIF_WRITE) {
418 if (addr >= HIF_MBOX_BASE_ADDR &&
419 addr <= HIF_MBOX_END_ADDR)
420 addr += (HIF_MBOX_WIDTH - len);
421
422 if (addr == HIF_MBOX0_EXT_BASE_ADDR)
423 addr += HIF_MBOX0_EXT_WIDTH - len;
424
425 if (request & HIF_FIXED_ADDRESS)
426 ret = sdio_writesb(ar_sdio->func, addr, tbuf, len);
427 else
428 ret = sdio_memcpy_toio(ar_sdio->func, addr, tbuf, len);
429 } else {
430 if (request & HIF_FIXED_ADDRESS)
431 ret = sdio_readsb(ar_sdio->func, tbuf, addr, len);
432 else
433 ret = sdio_memcpy_fromio(ar_sdio->func, tbuf,
434 addr, len);
435 if (bounced)
436 memcpy(buf, tbuf, len);
437 }
438 sdio_release_host(ar_sdio->func);
439
440 return ret;
441}
442
443static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
444 struct bus_request *req)
445{
446 if (req->scat_req)
447 ath6kl_sdio_scat_rw(ar_sdio, req);
448 else {
449 void *context;
450 int status;
451
452 status = ath6kl_sdio_read_write_sync(ar_sdio->ar, req->address,
453 req->buffer, req->length,
454 req->request);
455 context = req->packet;
456 ath6kl_sdio_free_bus_req(ar_sdio, req);
457 ath6kldev_rw_comp_handler(context, status);
458 }
459}
460
461static void ath6kl_sdio_write_async_work(struct work_struct *work)
462{
463 struct ath6kl_sdio *ar_sdio;
464 unsigned long flags;
465 struct bus_request *req, *tmp_req;
466
467 ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
468 sdio_claim_host(ar_sdio->func);
469
470 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
471 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
472 list_del(&req->list);
473 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
474 __ath6kl_sdio_write_async(ar_sdio, req);
475 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
476 }
477 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
478
479 sdio_release_host(ar_sdio->func);
480}
481
482static void ath6kl_sdio_irq_handler(struct sdio_func *func)
483{
484 int status;
485 struct ath6kl_sdio *ar_sdio;
486
487 ar_sdio = sdio_get_drvdata(func);
488 atomic_set(&ar_sdio->irq_handling, 1);
489
490 /*
491 * Release the host during interrups so we can pick it back up when
492 * we process commands.
493 */
494 sdio_release_host(ar_sdio->func);
495
496 status = ath6kldev_intr_bh_handler(ar_sdio->ar);
497 sdio_claim_host(ar_sdio->func);
498 atomic_set(&ar_sdio->irq_handling, 0);
499 WARN_ON(status && status != -ECANCELED);
500}
501
502static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
503{
504 struct sdio_func *func = ar_sdio->func;
505 int ret = 0;
506
507 if (!ar_sdio->is_disabled)
508 return 0;
509
510 sdio_claim_host(func);
511
512 ret = sdio_enable_func(func);
513 if (ret) {
514 ath6kl_err("Unable to enable sdio func: %d)\n", ret);
515 sdio_release_host(func);
516 return ret;
517 }
518
519 sdio_release_host(func);
520
521 /*
522 * Wait for hardware to initialise. It should take a lot less than
523 * 10 ms but let's be conservative here.
524 */
525 msleep(10);
526
527 ar_sdio->is_disabled = false;
528
529 return ret;
530}
531
532static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
533{
534 int ret;
535
536 if (ar_sdio->is_disabled)
537 return 0;
538
539 /* Disable the card */
540 sdio_claim_host(ar_sdio->func);
541 ret = sdio_disable_func(ar_sdio->func);
542 sdio_release_host(ar_sdio->func);
543
544 if (ret)
545 return ret;
546
547 ar_sdio->is_disabled = true;
548
549 return ret;
550}
551
552static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
553 u32 length, u32 request,
554 struct htc_packet *packet)
555{
556 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
557 struct bus_request *bus_req;
558 unsigned long flags;
559
560 bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
561
562 if (!bus_req)
563 return -ENOMEM;
564
565 bus_req->address = address;
566 bus_req->buffer = buffer;
567 bus_req->length = length;
568 bus_req->request = request;
569 bus_req->packet = packet;
570
571 spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
572 list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
573 spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
574 queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
575
576 return 0;
577}
578
579static void ath6kl_sdio_irq_enable(struct ath6kl *ar)
580{
581 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
582 int ret;
583
584 sdio_claim_host(ar_sdio->func);
585
586 /* Register the isr */
587 ret = sdio_claim_irq(ar_sdio->func, ath6kl_sdio_irq_handler);
588 if (ret)
589 ath6kl_err("Failed to claim sdio irq: %d\n", ret);
590
591 sdio_release_host(ar_sdio->func);
592}
593
594static void ath6kl_sdio_irq_disable(struct ath6kl *ar)
595{
596 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
597 int ret;
598
599 sdio_claim_host(ar_sdio->func);
600
601 /* Mask our function IRQ */
602 while (atomic_read(&ar_sdio->irq_handling)) {
603 sdio_release_host(ar_sdio->func);
604 schedule_timeout(HZ / 10);
605 sdio_claim_host(ar_sdio->func);
606 }
607
608 ret = sdio_release_irq(ar_sdio->func);
609 if (ret)
610 ath6kl_err("Failed to release sdio irq: %d\n", ret);
611
612 sdio_release_host(ar_sdio->func);
613}
614
615static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
616{
617 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
618 struct hif_scatter_req *node = NULL;
619 unsigned long flag;
620
621 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
622
623 if (!list_empty(&ar_sdio->scat_req)) {
624 node = list_first_entry(&ar_sdio->scat_req,
625 struct hif_scatter_req, list);
626 list_del(&node->list);
627 }
628
629 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
630
631 return node;
632}
633
634static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
635 struct hif_scatter_req *s_req)
636{
637 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
638 unsigned long flag;
639
640 spin_lock_irqsave(&ar_sdio->scat_lock, flag);
641
642 list_add_tail(&s_req->list, &ar_sdio->scat_req);
643
644 spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
645
646}
647
648static int ath6kl_sdio_enable_scatter(struct ath6kl *ar,
649 struct hif_dev_scat_sup_info *info)
650{
651 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
652 int ret;
653
654 ret = ath6kl_sdio_setup_scat_resource(ar_sdio, info);
655
656 return ret;
657}
658
659static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
660{
661 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
662
663 ath6kl_sdio_cleanup_scat_resource(ar_sdio);
664}
665
666static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
667 .read_write_sync = ath6kl_sdio_read_write_sync,
668 .write_async = ath6kl_sdio_write_async,
669 .irq_enable = ath6kl_sdio_irq_enable,
670 .irq_disable = ath6kl_sdio_irq_disable,
671 .scatter_req_get = ath6kl_sdio_scatter_req_get,
672 .scatter_req_add = ath6kl_sdio_scatter_req_add,
673 .enable_scatter = ath6kl_sdio_enable_scatter,
674 .cleanup_scatter = ath6kl_sdio_cleanup_scatter,
675};
676
677static int ath6kl_sdio_probe(struct sdio_func *func,
678 const struct sdio_device_id *id)
679{
680 int ret;
681 struct ath6kl_sdio *ar_sdio;
682 struct ath6kl *ar;
683 int count;
684
685 ath6kl_dbg(ATH6KL_DBG_TRC,
686 "%s: func: 0x%X, vendor id: 0x%X, dev id: 0x%X, block size: 0x%X/0x%X\n",
687 __func__, func->num, func->vendor,
688 func->device, func->max_blksize, func->cur_blksize);
689
690 ar_sdio = kzalloc(sizeof(struct ath6kl_sdio), GFP_KERNEL);
691 if (!ar_sdio)
692 return -ENOMEM;
693
694 ar_sdio->dma_buffer = kzalloc(HIF_DMA_BUFFER_SIZE, GFP_KERNEL);
695 if (!ar_sdio->dma_buffer) {
696 ret = -ENOMEM;
697 goto err_hif;
698 }
699
700 ar_sdio->func = func;
701 sdio_set_drvdata(func, ar_sdio);
702
703 ar_sdio->id = id;
704 ar_sdio->is_disabled = true;
705
706 spin_lock_init(&ar_sdio->lock);
707 spin_lock_init(&ar_sdio->scat_lock);
708 spin_lock_init(&ar_sdio->wr_async_lock);
709
710 INIT_LIST_HEAD(&ar_sdio->scat_req);
711 INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
712 INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
713
714 INIT_WORK(&ar_sdio->wr_async_work, ath6kl_sdio_write_async_work);
715
716 for (count = 0; count < BUS_REQUEST_MAX_NUM; count++)
717 ath6kl_sdio_free_bus_req(ar_sdio, &ar_sdio->bus_req[count]);
718
719 ar = ath6kl_core_alloc(&ar_sdio->func->dev);
720 if (!ar) {
721 ath6kl_err("Failed to alloc ath6kl core\n");
722 ret = -ENOMEM;
723 goto err_dma;
724 }
725
726 ar_sdio->ar = ar;
727 ar->hif_priv = ar_sdio;
728 ar->hif_ops = &ath6kl_sdio_ops;
729
730 ath6kl_sdio_set_mbox_info(ar);
731
732 sdio_claim_host(func);
733
734 if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
735 MANUFACTURER_ID_AR6003_BASE) {
736 /* enable 4-bit ASYNC interrupt on AR6003 or later */
737 ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
738 CCCR_SDIO_IRQ_MODE_REG,
739 SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
740 if (ret) {
741 ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
742 ret);
743 sdio_release_host(func);
744 goto err_dma;
745 }
746
747 ath6kl_dbg(ATH6KL_DBG_TRC, "4-bit async irq mode enabled\n");
748 }
749
750 /* give us some time to enable, in ms */
751 func->enable_timeout = 100;
752
753 sdio_release_host(func);
754
755 ret = ath6kl_sdio_power_on(ar_sdio);
756 if (ret)
757 goto err_dma;
758
759 sdio_claim_host(func);
760
761 ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
762 if (ret) {
763 ath6kl_err("Set sdio block size %d failed: %d)\n",
764 HIF_MBOX_BLOCK_SIZE, ret);
765 sdio_release_host(func);
766 goto err_off;
767 }
768
769 sdio_release_host(func);
770
771 ret = ath6kl_core_init(ar);
772 if (ret) {
773 ath6kl_err("Failed to init ath6kl core\n");
774 goto err_off;
775 }
776
777 return ret;
778
779err_off:
780 ath6kl_sdio_power_off(ar_sdio);
781err_dma:
782 kfree(ar_sdio->dma_buffer);
783err_hif:
784 kfree(ar_sdio);
785
786 return ret;
787}
788
789static void ath6kl_sdio_remove(struct sdio_func *func)
790{
791 struct ath6kl_sdio *ar_sdio;
792
793 ar_sdio = sdio_get_drvdata(func);
794
795 ath6kl_stop_txrx(ar_sdio->ar);
796 cancel_work_sync(&ar_sdio->wr_async_work);
797
798 ath6kl_unavail_ev(ar_sdio->ar);
799
800 ath6kl_sdio_power_off(ar_sdio);
801
802 kfree(ar_sdio->dma_buffer);
803 kfree(ar_sdio);
804}
805
806static const struct sdio_device_id ath6kl_sdio_devices[] = {
807 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
808 {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
809 {},
810};
811
812MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
813
814static struct sdio_driver ath6kl_sdio_driver = {
815 .name = "ath6kl_sdio",
816 .id_table = ath6kl_sdio_devices,
817 .probe = ath6kl_sdio_probe,
818 .remove = ath6kl_sdio_remove,
819};
820
821static int __init ath6kl_sdio_init(void)
822{
823 int ret;
824
825 ret = sdio_register_driver(&ath6kl_sdio_driver);
826 if (ret)
827 ath6kl_err("sdio driver registration failed: %d\n", ret);
828
829 return ret;
830}
831
832static void __exit ath6kl_sdio_exit(void)
833{
834 sdio_unregister_driver(&ath6kl_sdio_driver);
835}
836
837module_init(ath6kl_sdio_init);
838module_exit(ath6kl_sdio_exit);
839
840MODULE_AUTHOR("Atheros Communications, Inc.");
841MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
842MODULE_LICENSE("Dual BSD/GPL");
843
844MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
845MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
846MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
847MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
848MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
849MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
850MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
851MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
852MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
853MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
new file mode 100644
index 000000000000..519a013c9991
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -0,0 +1,331 @@
1/*
2 * Copyright (c) 2004-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef TARGET_H
18#define TARGET_H
19
20#define AR6003_BOARD_DATA_SZ 1024
21#define AR6003_BOARD_EXT_DATA_SZ 768
22
23#define RESET_CONTROL_ADDRESS 0x00000000
24#define RESET_CONTROL_COLD_RST 0x00000100
25#define RESET_CONTROL_MBOX_RST 0x00000004
26
27#define CPU_CLOCK_STANDARD_S 0
28#define CPU_CLOCK_STANDARD 0x00000003
29#define CPU_CLOCK_ADDRESS 0x00000020
30
31#define CLOCK_CONTROL_ADDRESS 0x00000028
32#define CLOCK_CONTROL_LF_CLK32_S 2
33#define CLOCK_CONTROL_LF_CLK32 0x00000004
34
35#define SYSTEM_SLEEP_ADDRESS 0x000000c4
36#define SYSTEM_SLEEP_DISABLE_S 0
37#define SYSTEM_SLEEP_DISABLE 0x00000001
38
39#define LPO_CAL_ADDRESS 0x000000e0
40#define LPO_CAL_ENABLE_S 20
41#define LPO_CAL_ENABLE 0x00100000
42
43#define GPIO_PIN10_ADDRESS 0x00000050
44#define GPIO_PIN11_ADDRESS 0x00000054
45#define GPIO_PIN12_ADDRESS 0x00000058
46#define GPIO_PIN13_ADDRESS 0x0000005c
47
48#define HOST_INT_STATUS_ADDRESS 0x00000400
49#define HOST_INT_STATUS_ERROR_S 7
50#define HOST_INT_STATUS_ERROR 0x00000080
51
52#define HOST_INT_STATUS_CPU_S 6
53#define HOST_INT_STATUS_CPU 0x00000040
54
55#define HOST_INT_STATUS_COUNTER_S 4
56#define HOST_INT_STATUS_COUNTER 0x00000010
57
58#define CPU_INT_STATUS_ADDRESS 0x00000401
59
60#define ERROR_INT_STATUS_ADDRESS 0x00000402
61#define ERROR_INT_STATUS_WAKEUP_S 2
62#define ERROR_INT_STATUS_WAKEUP 0x00000004
63
64#define ERROR_INT_STATUS_RX_UNDERFLOW_S 1
65#define ERROR_INT_STATUS_RX_UNDERFLOW 0x00000002
66
67#define ERROR_INT_STATUS_TX_OVERFLOW_S 0
68#define ERROR_INT_STATUS_TX_OVERFLOW 0x00000001
69
70#define COUNTER_INT_STATUS_ADDRESS 0x00000403
71#define COUNTER_INT_STATUS_COUNTER_S 0
72#define COUNTER_INT_STATUS_COUNTER 0x000000ff
73
74#define RX_LOOKAHEAD_VALID_ADDRESS 0x00000405
75
76#define INT_STATUS_ENABLE_ADDRESS 0x00000418
77#define INT_STATUS_ENABLE_ERROR_S 7
78#define INT_STATUS_ENABLE_ERROR 0x00000080
79
80#define INT_STATUS_ENABLE_CPU_S 6
81#define INT_STATUS_ENABLE_CPU 0x00000040
82
83#define INT_STATUS_ENABLE_INT_S 5
84#define INT_STATUS_ENABLE_INT 0x00000020
85#define INT_STATUS_ENABLE_COUNTER_S 4
86#define INT_STATUS_ENABLE_COUNTER 0x00000010
87
88#define INT_STATUS_ENABLE_MBOX_DATA_S 0
89#define INT_STATUS_ENABLE_MBOX_DATA 0x0000000f
90
91#define CPU_INT_STATUS_ENABLE_ADDRESS 0x00000419
92#define CPU_INT_STATUS_ENABLE_BIT_S 0
93#define CPU_INT_STATUS_ENABLE_BIT 0x000000ff
94
95#define ERROR_STATUS_ENABLE_ADDRESS 0x0000041a
96#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_S 1
97#define ERROR_STATUS_ENABLE_RX_UNDERFLOW 0x00000002
98
99#define ERROR_STATUS_ENABLE_TX_OVERFLOW_S 0
100#define ERROR_STATUS_ENABLE_TX_OVERFLOW 0x00000001
101
102#define COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000041b
103#define COUNTER_INT_STATUS_ENABLE_BIT_S 0
104#define COUNTER_INT_STATUS_ENABLE_BIT 0x000000ff
105
106#define COUNT_ADDRESS 0x00000420
107
108#define COUNT_DEC_ADDRESS 0x00000440
109
110#define WINDOW_DATA_ADDRESS 0x00000474
111#define WINDOW_WRITE_ADDR_ADDRESS 0x00000478
112#define WINDOW_READ_ADDR_ADDRESS 0x0000047c
113#define CPU_DBG_SEL_ADDRESS 0x00000483
114#define CPU_DBG_ADDRESS 0x00000484
115
116#define LOCAL_SCRATCH_ADDRESS 0x000000c0
117#define ATH6KL_OPTION_SLEEP_DISABLE 0x08
118
119#define RTC_BASE_ADDRESS 0x00004000
120#define GPIO_BASE_ADDRESS 0x00014000
121#define MBOX_BASE_ADDRESS 0x00018000
122#define ANALOG_INTF_BASE_ADDRESS 0x0001c000
123
124/* real name of the register is unknown */
125#define ATH6KL_ANALOG_PLL_REGISTER (ANALOG_INTF_BASE_ADDRESS + 0x284)
126
127#define SM(f, v) (((v) << f##_S) & f)
128#define MS(f, v) (((v) & f) >> f##_S)
129
130/*
131 * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
132 * host_interest structure.
133 *
134 * Host Interest is shared between Host and Target in order to coordinate
135 * between the two, and is intended to remain constant (with additions only
136 * at the end).
137 */
138#define ATH6KL_HI_START_ADDR 0x00540600
139
140/*
141 * These are items that the Host may need to access
142 * via BMI or via the Diagnostic Window. The position
143 * of items in this structure must remain constant.
144 * across firmware revisions!
145 *
146 * Types for each item must be fixed size across target and host platforms.
147 * The structure is used only to calculate offset for each register with
148 * HI_ITEM() macro, no values are stored to it.
149 *
150 * More items may be added at the end.
151 */
152struct host_interest {
153 /*
154 * Pointer to application-defined area, if any.
155 * Set by Target application during startup.
156 */
157 u32 hi_app_host_interest; /* 0x00 */
158
159 /* Pointer to register dump area, valid after Target crash. */
160 u32 hi_failure_state; /* 0x04 */
161
162 /* Pointer to debug logging header */
163 u32 hi_dbglog_hdr; /* 0x08 */
164
165 u32 hi_unused1; /* 0x0c */
166
167 /*
168 * General-purpose flag bits, similar to ATH6KL_OPTION_* flags.
169 * Can be used by application rather than by OS.
170 */
171 u32 hi_option_flag; /* 0x10 */
172
173 /*
174 * Boolean that determines whether or not to
175 * display messages on the serial port.
176 */
177 u32 hi_serial_enable; /* 0x14 */
178
179 /* Start address of DataSet index, if any */
180 u32 hi_dset_list_head; /* 0x18 */
181
182 /* Override Target application start address */
183 u32 hi_app_start; /* 0x1c */
184
185 /* Clock and voltage tuning */
186 u32 hi_skip_clock_init; /* 0x20 */
187 u32 hi_core_clock_setting; /* 0x24 */
188 u32 hi_cpu_clock_setting; /* 0x28 */
189 u32 hi_system_sleep_setting; /* 0x2c */
190 u32 hi_xtal_control_setting; /* 0x30 */
191 u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
192 u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
193 u32 hi_ref_voltage_trim_setting; /* 0x3c */
194 u32 hi_clock_info; /* 0x40 */
195
196 /*
197 * Flash configuration overrides, used only
198 * when firmware is not executing from flash.
199 * (When using flash, modify the global variables
200 * with equivalent names.)
201 */
202 u32 hi_bank0_addr_value; /* 0x44 */
203 u32 hi_bank0_read_value; /* 0x48 */
204 u32 hi_bank0_write_value; /* 0x4c */
205 u32 hi_bank0_config_value; /* 0x50 */
206
207 /* Pointer to Board Data */
208 u32 hi_board_data; /* 0x54 */
209 u32 hi_board_data_initialized; /* 0x58 */
210
211 u32 hi_dset_ram_index_tbl; /* 0x5c */
212
213 u32 hi_desired_baud_rate; /* 0x60 */
214 u32 hi_dbglog_config; /* 0x64 */
215 u32 hi_end_ram_reserve_sz; /* 0x68 */
216 u32 hi_mbox_io_block_sz; /* 0x6c */
217
218 u32 hi_num_bpatch_streams; /* 0x70 -- unused */
219 u32 hi_mbox_isr_yield_limit; /* 0x74 */
220
221 u32 hi_refclk_hz; /* 0x78 */
222 u32 hi_ext_clk_detected; /* 0x7c */
223 u32 hi_dbg_uart_txpin; /* 0x80 */
224 u32 hi_dbg_uart_rxpin; /* 0x84 */
225 u32 hi_hci_uart_baud; /* 0x88 */
226 u32 hi_hci_uart_pin_assignments; /* 0x8C */
227 /*
228 * NOTE: byte [0] = tx pin, [1] = rx pin, [2] = rts pin, [3] = cts
229 * pin
230 */
231 u32 hi_hci_uart_baud_scale_val; /* 0x90 */
232 u32 hi_hci_uart_baud_step_val; /* 0x94 */
233
234 u32 hi_allocram_start; /* 0x98 */
235 u32 hi_allocram_sz; /* 0x9c */
236 u32 hi_hci_bridge_flags; /* 0xa0 */
237 u32 hi_hci_uart_support_pins; /* 0xa4 */
238 /*
239 * NOTE: byte [0] = RESET pin (bit 7 is polarity),
240 * bytes[1]..bytes[3] are for future use
241 */
242 u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
243 /*
244 * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
245 * [31:16]: wakeup timeout in ms
246 */
247
248 /* Pointer to extended board data */
249 u32 hi_board_ext_data; /* 0xac */
250 u32 hi_board_ext_data_config; /* 0xb0 */
251
252 /*
253 * Bit [0] : valid
254 * Bit[31:16: size
255 */
256 /*
257 * hi_reset_flag is used to do some stuff when target reset.
258 * such as restore app_start after warm reset or
259 * preserve host Interest area, or preserve ROM data, literals etc.
260 */
261 u32 hi_reset_flag; /* 0xb4 */
262 /* indicate hi_reset_flag is valid */
263 u32 hi_reset_flag_valid; /* 0xb8 */
264 u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
265 /*
266 * 0xbc - [31:0]: idle timeout in ms
267 */
268 /* ACS flags */
269 u32 hi_acs_flags; /* 0xc0 */
270 u32 hi_console_flags; /* 0xc4 */
271 u32 hi_nvram_state; /* 0xc8 */
272 u32 hi_option_flag2; /* 0xcc */
273
274 /* If non-zero, override values sent to Host in WMI_READY event. */
275 u32 hi_sw_version_override; /* 0xd0 */
276 u32 hi_abi_version_override; /* 0xd4 */
277
278 /*
279 * Percentage of high priority RX traffic to total expected RX traffic -
280 * applicable only to ar6004
281 */
282 u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
283
284 /* test applications flags */
285 u32 hi_test_apps_related ; /* 0xdc */
286 /* location of test script */
287 u32 hi_ota_testscript; /* 0xe0 */
288 /* location of CAL data */
289 u32 hi_cal_data; /* 0xe4 */
290 /* Number of packet log buffers */
291 u32 hi_pktlog_num_buffers; /* 0xe8 */
292
293} __packed;
294
295#define HI_ITEM(item) offsetof(struct host_interest, item)
296
297#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
298
299#define HI_OPTION_FW_MODE_IBSS 0x0
300#define HI_OPTION_FW_MODE_BSS_STA 0x1
301#define HI_OPTION_FW_MODE_AP 0x2
302
303#define HI_OPTION_NUM_DEV_SHIFT 0x9
304
305#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
306
307/* Fw Mode/SubMode Mask
308|------------------------------------------------------------------------------|
309| SUB | SUB | SUB | SUB | | | |
310| MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0|
311| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
312|------------------------------------------------------------------------------|
313*/
314#define HI_OPTION_FW_MODE_SHIFT 0xC
315
316/* Convert a Target virtual address into a Target physical address */
317#define TARG_VTOP(vaddr) (vaddr & 0x001fffff)
318
319#define AR6003_REV2_APP_START_OVERRIDE 0x944C00
320#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
321#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
322#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
323#define AR6003_REV2_RAM_RESERVE_SIZE 6912
324
325#define AR6003_REV3_APP_START_OVERRIDE 0x945d00
326#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
327#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
328#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
329#define AR6003_REV3_RAM_RESERVE_SIZE 512
330
331#endif
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
new file mode 100644
index 000000000000..615b46d388f6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -0,0 +1,1452 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19
20static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
21 u32 *map_no)
22{
23 struct ath6kl *ar = ath6kl_priv(dev);
24 struct ethhdr *eth_hdr;
25 u32 i, ep_map = -1;
26 u8 *datap;
27
28 *map_no = 0;
29 datap = skb->data;
30 eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
31
32 if (is_multicast_ether_addr(eth_hdr->h_dest))
33 return ENDPOINT_2;
34
35 for (i = 0; i < ar->node_num; i++) {
36 if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
37 ETH_ALEN) == 0) {
38 *map_no = i + 1;
39 ar->node_map[i].tx_pend++;
40 return ar->node_map[i].ep_id;
41 }
42
43 if ((ep_map == -1) && !ar->node_map[i].tx_pend)
44 ep_map = i;
45 }
46
47 if (ep_map == -1) {
48 ep_map = ar->node_num;
49 ar->node_num++;
50 if (ar->node_num > MAX_NODE_NUM)
51 return ENDPOINT_UNUSED;
52 }
53
54 memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
55
56 for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
57 if (!ar->tx_pending[i]) {
58 ar->node_map[ep_map].ep_id = i;
59 break;
60 }
61
62 /*
63 * No free endpoint is available, start redistribution on
64 * the inuse endpoints.
65 */
66 if (i == ENDPOINT_5) {
67 ar->node_map[ep_map].ep_id = ar->next_ep_id;
68 ar->next_ep_id++;
69 if (ar->next_ep_id > ENDPOINT_5)
70 ar->next_ep_id = ENDPOINT_2;
71 }
72 }
73
74 *map_no = ep_map + 1;
75 ar->node_map[ep_map].tx_pend++;
76
77 return ar->node_map[ep_map].ep_id;
78}
79
80static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
81 bool *more_data)
82{
83 struct ethhdr *datap = (struct ethhdr *) skb->data;
84 struct ath6kl_sta *conn = NULL;
85 bool ps_queued = false, is_psq_empty = false;
86
87 if (is_multicast_ether_addr(datap->h_dest)) {
88 u8 ctr = 0;
89 bool q_mcast = false;
90
91 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
92 if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
93 q_mcast = true;
94 break;
95 }
96 }
97
98 if (q_mcast) {
99 /*
100 * If this transmit is not because of a Dtim Expiry
101 * q it.
102 */
103 if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
104 bool is_mcastq_empty = false;
105
106 spin_lock_bh(&ar->mcastpsq_lock);
107 is_mcastq_empty =
108 skb_queue_empty(&ar->mcastpsq);
109 skb_queue_tail(&ar->mcastpsq, skb);
110 spin_unlock_bh(&ar->mcastpsq_lock);
111
112 /*
113 * If this is the first Mcast pkt getting
114 * queued indicate to the target to set the
115 * BitmapControl LSB of the TIM IE.
116 */
117 if (is_mcastq_empty)
118 ath6kl_wmi_set_pvb_cmd(ar->wmi,
119 MCAST_AID, 1);
120
121 ps_queued = true;
122 } else {
123 /*
124 * This transmit is because of Dtim expiry.
125 * Determine if MoreData bit has to be set.
126 */
127 spin_lock_bh(&ar->mcastpsq_lock);
128 if (!skb_queue_empty(&ar->mcastpsq))
129 *more_data = true;
130 spin_unlock_bh(&ar->mcastpsq_lock);
131 }
132 }
133 } else {
134 conn = ath6kl_find_sta(ar, datap->h_dest);
135 if (!conn) {
136 dev_kfree_skb(skb);
137
138 /* Inform the caller that the skb is consumed */
139 return true;
140 }
141
142 if (conn->sta_flags & STA_PS_SLEEP) {
143 if (!(conn->sta_flags & STA_PS_POLLED)) {
144 /* Queue the frames if the STA is sleeping */
145 spin_lock_bh(&conn->psq_lock);
146 is_psq_empty = skb_queue_empty(&conn->psq);
147 skb_queue_tail(&conn->psq, skb);
148 spin_unlock_bh(&conn->psq_lock);
149
150 /*
151 * If this is the first pkt getting queued
152 * for this STA, update the PVB for this
153 * STA.
154 */
155 if (is_psq_empty)
156 ath6kl_wmi_set_pvb_cmd(ar->wmi,
157 conn->aid, 1);
158
159 ps_queued = true;
160 } else {
161 /*
162 * This tx is because of a PsPoll.
163 * Determine if MoreData bit has to be set.
164 */
165 spin_lock_bh(&conn->psq_lock);
166 if (!skb_queue_empty(&conn->psq))
167 *more_data = true;
168 spin_unlock_bh(&conn->psq_lock);
169 }
170 }
171 }
172
173 return ps_queued;
174}
175
176/* Tx functions */
177
178int ath6kl_control_tx(void *devt, struct sk_buff *skb,
179 enum htc_endpoint_id eid)
180{
181 struct ath6kl *ar = devt;
182 int status = 0;
183 struct ath6kl_cookie *cookie = NULL;
184
185 spin_lock_bh(&ar->lock);
186
187 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
188 "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
189 skb, skb->len, eid);
190
191 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
192 /*
193 * Control endpoint is full, don't allocate resources, we
194 * are just going to drop this packet.
195 */
196 cookie = NULL;
197 ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
198 skb, skb->len);
199 } else
200 cookie = ath6kl_alloc_cookie(ar);
201
202 if (cookie == NULL) {
203 spin_unlock_bh(&ar->lock);
204 status = -ENOMEM;
205 goto fail_ctrl_tx;
206 }
207
208 ar->tx_pending[eid]++;
209
210 if (eid != ar->ctrl_ep)
211 ar->total_tx_data_pend++;
212
213 spin_unlock_bh(&ar->lock);
214
215 cookie->skb = skb;
216 cookie->map_no = 0;
217 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
218 eid, ATH6KL_CONTROL_PKT_TAG);
219
220 /*
221 * This interface is asynchronous, if there is an error, cleanup
222 * will happen in the TX completion callback.
223 */
224 htc_tx(ar->htc_target, &cookie->htc_pkt);
225
226 return 0;
227
228fail_ctrl_tx:
229 dev_kfree_skb(skb);
230 return status;
231}
232
233int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
234{
235 struct ath6kl *ar = ath6kl_priv(dev);
236 struct ath6kl_cookie *cookie = NULL;
237 enum htc_endpoint_id eid = ENDPOINT_UNUSED;
238 u32 map_no = 0;
239 u16 htc_tag = ATH6KL_DATA_PKT_TAG;
240 u8 ac = 99 ; /* initialize to unmapped ac */
241 bool chk_adhoc_ps_mapping = false, more_data = false;
242 struct wmi_tx_meta_v2 meta_v2;
243 int ret;
244
245 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
246 "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
247 skb, skb->data, skb->len);
248
249 /* If target is not associated */
250 if (!test_bit(CONNECTED, &ar->flag)) {
251 dev_kfree_skb(skb);
252 return 0;
253 }
254
255 if (!test_bit(WMI_READY, &ar->flag))
256 goto fail_tx;
257
258 /* AP mode Power saving processing */
259 if (ar->nw_type == AP_NETWORK) {
260 if (ath6kl_powersave_ap(ar, skb, &more_data))
261 return 0;
262 }
263
264 if (test_bit(WMI_ENABLED, &ar->flag)) {
265 memset(&meta_v2, 0, sizeof(meta_v2));
266
267 if (skb_headroom(skb) < dev->needed_headroom) {
268 WARN_ON(1);
269 goto fail_tx;
270 }
271
272 if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
273 ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
274 goto fail_tx;
275 }
276
277 if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
278 more_data, 0, 0, NULL)) {
279 ath6kl_err("wmi_data_hdr_add failed\n");
280 goto fail_tx;
281 }
282
283 if ((ar->nw_type == ADHOC_NETWORK) &&
284 ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
285 chk_adhoc_ps_mapping = true;
286 else {
287 /* get the stream mapping */
288 ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
289 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
290 if (ret)
291 goto fail_tx;
292 }
293 } else
294 goto fail_tx;
295
296 spin_lock_bh(&ar->lock);
297
298 if (chk_adhoc_ps_mapping)
299 eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
300 else
301 eid = ar->ac2ep_map[ac];
302
303 if (eid == 0 || eid == ENDPOINT_UNUSED) {
304 ath6kl_err("eid %d is not mapped!\n", eid);
305 spin_unlock_bh(&ar->lock);
306 goto fail_tx;
307 }
308
309 /* allocate resource for this packet */
310 cookie = ath6kl_alloc_cookie(ar);
311
312 if (!cookie) {
313 spin_unlock_bh(&ar->lock);
314 goto fail_tx;
315 }
316
317 /* update counts while the lock is held */
318 ar->tx_pending[eid]++;
319 ar->total_tx_data_pend++;
320
321 spin_unlock_bh(&ar->lock);
322
323 cookie->skb = skb;
324 cookie->map_no = map_no;
325 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
326 eid, htc_tag);
327
328 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len);
329
330 /*
331 * HTC interface is asynchronous, if this fails, cleanup will
332 * happen in the ath6kl_tx_complete callback.
333 */
334 htc_tx(ar->htc_target, &cookie->htc_pkt);
335
336 return 0;
337
338fail_tx:
339 dev_kfree_skb(skb);
340
341 ar->net_stats.tx_dropped++;
342 ar->net_stats.tx_aborted_errors++;
343
344 return 0;
345}
346
347/* indicate tx activity or inactivity on a WMI stream */
348void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
349{
350 struct ath6kl *ar = devt;
351 enum htc_endpoint_id eid;
352 int i;
353
354 eid = ar->ac2ep_map[traffic_class];
355
356 if (!test_bit(WMI_ENABLED, &ar->flag))
357 goto notify_htc;
358
359 spin_lock_bh(&ar->lock);
360
361 ar->ac_stream_active[traffic_class] = active;
362
363 if (active) {
364 /*
365 * Keep track of the active stream with the highest
366 * priority.
367 */
368 if (ar->ac_stream_pri_map[traffic_class] >
369 ar->hiac_stream_active_pri)
370 /* set the new highest active priority */
371 ar->hiac_stream_active_pri =
372 ar->ac_stream_pri_map[traffic_class];
373
374 } else {
375 /*
376 * We may have to search for the next active stream
377 * that is the highest priority.
378 */
379 if (ar->hiac_stream_active_pri ==
380 ar->ac_stream_pri_map[traffic_class]) {
381 /*
382 * The highest priority stream just went inactive
383 * reset and search for the "next" highest "active"
384 * priority stream.
385 */
386 ar->hiac_stream_active_pri = 0;
387
388 for (i = 0; i < WMM_NUM_AC; i++) {
389 if (ar->ac_stream_active[i] &&
390 (ar->ac_stream_pri_map[i] >
391 ar->hiac_stream_active_pri))
392 /*
393 * Set the new highest active
394 * priority.
395 */
396 ar->hiac_stream_active_pri =
397 ar->ac_stream_pri_map[i];
398 }
399 }
400 }
401
402 spin_unlock_bh(&ar->lock);
403
404notify_htc:
405 /* notify HTC, this may cause credit distribution changes */
406 htc_indicate_activity_change(ar->htc_target, eid, active);
407}
408
409enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
410 struct htc_packet *packet)
411{
412 struct ath6kl *ar = target->dev->ar;
413 enum htc_endpoint_id endpoint = packet->endpoint;
414
415 if (endpoint == ar->ctrl_ep) {
416 /*
417 * Under normal WMI if this is getting full, then something
418 * is running rampant the host should not be exhausting the
419 * WMI queue with too many commands the only exception to
420 * this is during testing using endpointping.
421 */
422 spin_lock_bh(&ar->lock);
423 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
424 spin_unlock_bh(&ar->lock);
425 ath6kl_err("wmi ctrl ep is full\n");
426 return HTC_SEND_FULL_KEEP;
427 }
428
429 if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
430 return HTC_SEND_FULL_KEEP;
431
432 if (ar->nw_type == ADHOC_NETWORK)
433 /*
434 * In adhoc mode, we cannot differentiate traffic
435 * priorities so there is no need to continue, however we
436 * should stop the network.
437 */
438 goto stop_net_queues;
439
440 /*
441 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
442 * the highest active stream.
443 */
444 if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
445 ar->hiac_stream_active_pri &&
446 ar->cookie_count <= MAX_HI_COOKIE_NUM)
447 /*
448 * Give preference to the highest priority stream by
449 * dropping the packets which overflowed.
450 */
451 return HTC_SEND_FULL_DROP;
452
453stop_net_queues:
454 spin_lock_bh(&ar->lock);
455 set_bit(NETQ_STOPPED, &ar->flag);
456 spin_unlock_bh(&ar->lock);
457 netif_stop_queue(ar->net_dev);
458
459 return HTC_SEND_FULL_KEEP;
460}
461
462/* TODO this needs to be looked at */
463static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
464 enum htc_endpoint_id eid, u32 map_no)
465{
466 u32 i;
467
468 if (ar->nw_type != ADHOC_NETWORK)
469 return;
470
471 if (!ar->ibss_ps_enable)
472 return;
473
474 if (eid == ar->ctrl_ep)
475 return;
476
477 if (map_no == 0)
478 return;
479
480 map_no--;
481 ar->node_map[map_no].tx_pend--;
482
483 if (ar->node_map[map_no].tx_pend)
484 return;
485
486 if (map_no != (ar->node_num - 1))
487 return;
488
489 for (i = ar->node_num; i > 0; i--) {
490 if (ar->node_map[i - 1].tx_pend)
491 break;
492
493 memset(&ar->node_map[i - 1], 0,
494 sizeof(struct ath6kl_node_mapping));
495 ar->node_num--;
496 }
497}
498
499void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
500{
501 struct ath6kl *ar = context;
502 struct sk_buff_head skb_queue;
503 struct htc_packet *packet;
504 struct sk_buff *skb;
505 struct ath6kl_cookie *ath6kl_cookie;
506 u32 map_no = 0;
507 int status;
508 enum htc_endpoint_id eid;
509 bool wake_event = false;
510 bool flushing = false;
511
512 skb_queue_head_init(&skb_queue);
513
514 /* lock the driver as we update internal state */
515 spin_lock_bh(&ar->lock);
516
517 /* reap completed packets */
518 while (!list_empty(packet_queue)) {
519
520 packet = list_first_entry(packet_queue, struct htc_packet,
521 list);
522 list_del(&packet->list);
523
524 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
525 if (!ath6kl_cookie)
526 goto fatal;
527
528 status = packet->status;
529 skb = ath6kl_cookie->skb;
530 eid = packet->endpoint;
531 map_no = ath6kl_cookie->map_no;
532
533 if (!skb || !skb->data)
534 goto fatal;
535
536 packet->buf = skb->data;
537
538 __skb_queue_tail(&skb_queue, skb);
539
540 if (!status && (packet->act_len != skb->len))
541 goto fatal;
542
543 ar->tx_pending[eid]--;
544
545 if (eid != ar->ctrl_ep)
546 ar->total_tx_data_pend--;
547
548 if (eid == ar->ctrl_ep) {
549 if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
550 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
551
552 if (ar->tx_pending[eid] == 0)
553 wake_event = true;
554 }
555
556 if (status) {
557 if (status == -ECANCELED)
558 /* a packet was flushed */
559 flushing = true;
560
561 ar->net_stats.tx_errors++;
562
563 if (status != -ENOSPC)
564 ath6kl_err("tx error, status: 0x%x\n", status);
565 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
566 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
567 __func__, skb, packet->buf, packet->act_len,
568 eid, "error!");
569 } else {
570 ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
571 "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
572 __func__, skb, packet->buf, packet->act_len,
573 eid, "OK");
574
575 flushing = false;
576 ar->net_stats.tx_packets++;
577 ar->net_stats.tx_bytes += skb->len;
578 }
579
580 ath6kl_tx_clear_node_map(ar, eid, map_no);
581
582 ath6kl_free_cookie(ar, ath6kl_cookie);
583
584 if (test_bit(NETQ_STOPPED, &ar->flag))
585 clear_bit(NETQ_STOPPED, &ar->flag);
586 }
587
588 spin_unlock_bh(&ar->lock);
589
590 __skb_queue_purge(&skb_queue);
591
592 if (test_bit(CONNECTED, &ar->flag)) {
593 if (!flushing)
594 netif_wake_queue(ar->net_dev);
595 }
596
597 if (wake_event)
598 wake_up(&ar->event_wq);
599
600 return;
601
602fatal:
603 WARN_ON(1);
604 spin_unlock_bh(&ar->lock);
605 return;
606}
607
608void ath6kl_tx_data_cleanup(struct ath6kl *ar)
609{
610 int i;
611
612 /* flush all the data (non-control) streams */
613 for (i = 0; i < WMM_NUM_AC; i++)
614 htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
615 ATH6KL_DATA_PKT_TAG);
616}
617
618/* Rx functions */
619
620static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
621 struct sk_buff *skb)
622{
623 if (!skb)
624 return;
625
626 skb->dev = dev;
627
628 if (!(skb->dev->flags & IFF_UP)) {
629 dev_kfree_skb(skb);
630 return;
631 }
632
633 skb->protocol = eth_type_trans(skb, skb->dev);
634
635 netif_rx_ni(skb);
636}
637
638static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
639{
640 struct sk_buff *skb;
641
642 while (num) {
643 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
644 if (!skb) {
645 ath6kl_err("netbuf allocation failed\n");
646 return;
647 }
648 skb_queue_tail(q, skb);
649 num--;
650 }
651}
652
653static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
654{
655 struct sk_buff *skb = NULL;
656
657 if (skb_queue_len(&p_aggr->free_q) < (AGGR_NUM_OF_FREE_NETBUFS >> 2))
658 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
659
660 skb = skb_dequeue(&p_aggr->free_q);
661
662 return skb;
663}
664
665void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
666{
667 struct ath6kl *ar = target->dev->ar;
668 struct sk_buff *skb;
669 int rx_buf;
670 int n_buf_refill;
671 struct htc_packet *packet;
672 struct list_head queue;
673
674 n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
675 htc_get_rxbuf_num(ar->htc_target, endpoint);
676
677 if (n_buf_refill <= 0)
678 return;
679
680 INIT_LIST_HEAD(&queue);
681
682 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
683 "%s: providing htc with %d buffers at eid=%d\n",
684 __func__, n_buf_refill, endpoint);
685
686 for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
687 skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
688 if (!skb)
689 break;
690
691 packet = (struct htc_packet *) skb->head;
692 set_htc_rxpkt_info(packet, skb, skb->data,
693 ATH6KL_BUFFER_SIZE, endpoint);
694 list_add_tail(&packet->list, &queue);
695 }
696
697 if (!list_empty(&queue))
698 htc_add_rxbuf_multiple(ar->htc_target, &queue);
699}
700
701void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
702{
703 struct htc_packet *packet;
704 struct sk_buff *skb;
705
706 while (count) {
707 skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
708 if (!skb)
709 return;
710
711 packet = (struct htc_packet *) skb->head;
712 set_htc_rxpkt_info(packet, skb, skb->data,
713 ATH6KL_AMSDU_BUFFER_SIZE, 0);
714 spin_lock_bh(&ar->lock);
715 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
716 spin_unlock_bh(&ar->lock);
717 count--;
718 }
719}
720
721/*
722 * Callback to allocate a receive buffer for a pending packet. We use a
723 * pre-allocated list of buffers of maximum AMSDU size (4K).
724 */
725struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
726 enum htc_endpoint_id endpoint,
727 int len)
728{
729 struct ath6kl *ar = target->dev->ar;
730 struct htc_packet *packet = NULL;
731 struct list_head *pkt_pos;
732 int refill_cnt = 0, depth = 0;
733
734 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
735 __func__, endpoint, len);
736
737 if ((len <= ATH6KL_BUFFER_SIZE) ||
738 (len > ATH6KL_AMSDU_BUFFER_SIZE))
739 return NULL;
740
741 spin_lock_bh(&ar->lock);
742
743 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
744 spin_unlock_bh(&ar->lock);
745 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
746 goto refill_buf;
747 }
748
749 packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
750 struct htc_packet, list);
751 list_del(&packet->list);
752 list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
753 depth++;
754
755 refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
756 spin_unlock_bh(&ar->lock);
757
758 /* set actual endpoint ID */
759 packet->endpoint = endpoint;
760
761refill_buf:
762 if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
763 ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
764
765 return packet;
766}
767
768static void aggr_slice_amsdu(struct aggr_info *p_aggr,
769 struct rxtid *rxtid, struct sk_buff *skb)
770{
771 struct sk_buff *new_skb;
772 struct ethhdr *hdr;
773 u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
774 u8 *framep;
775
776 mac_hdr_len = sizeof(struct ethhdr);
777 framep = skb->data + mac_hdr_len;
778 amsdu_len = skb->len - mac_hdr_len;
779
780 while (amsdu_len > mac_hdr_len) {
781 hdr = (struct ethhdr *) framep;
782 payload_8023_len = ntohs(hdr->h_proto);
783
784 if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
785 payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
786 ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
787 payload_8023_len);
788 break;
789 }
790
791 frame_8023_len = payload_8023_len + mac_hdr_len;
792 new_skb = aggr_get_free_skb(p_aggr);
793 if (!new_skb) {
794 ath6kl_err("no buffer available\n");
795 break;
796 }
797
798 memcpy(new_skb->data, framep, frame_8023_len);
799 skb_put(new_skb, frame_8023_len);
800 if (ath6kl_wmi_dot3_2_dix(new_skb)) {
801 ath6kl_err("dot3_2_dix error\n");
802 dev_kfree_skb(new_skb);
803 break;
804 }
805
806 skb_queue_tail(&rxtid->q, new_skb);
807
808 /* Is this the last subframe within this aggregate ? */
809 if ((amsdu_len - frame_8023_len) == 0)
810 break;
811
812 /* Add the length of A-MSDU subframe padding bytes -
813 * Round to nearest word.
814 */
815 frame_8023_len = ALIGN(frame_8023_len + 3, 3);
816
817 framep += frame_8023_len;
818 amsdu_len -= frame_8023_len;
819 }
820
821 dev_kfree_skb(skb);
822}
823
824static void aggr_deque_frms(struct aggr_info *p_aggr, u8 tid,
825 u16 seq_no, u8 order)
826{
827 struct sk_buff *skb;
828 struct rxtid *rxtid;
829 struct skb_hold_q *node;
830 u16 idx, idx_end, seq_end;
831 struct rxtid_stats *stats;
832
833 if (!p_aggr)
834 return;
835
836 rxtid = &p_aggr->rx_tid[tid];
837 stats = &p_aggr->stat[tid];
838
839 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
840
841 /*
842 * idx_end is typically the last possible frame in the window,
843 * but changes to 'the' seq_no, when BAR comes. If seq_no
844 * is non-zero, we will go up to that and stop.
845 * Note: last seq no in current window will occupy the same
846 * index position as index that is just previous to start.
847 * An imp point : if win_sz is 7, for seq_no space of 4095,
848 * then, there would be holes when sequence wrap around occurs.
849 * Target should judiciously choose the win_sz, based on
850 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
851 * 2, 4, 8, 16 win_sz works fine).
852 * We must deque from "idx" to "idx_end", including both.
853 */
854 seq_end = seq_no ? seq_no : rxtid->seq_next;
855 idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
856
857 spin_lock_bh(&rxtid->lock);
858
859 do {
860 node = &rxtid->hold_q[idx];
861 if ((order == 1) && (!node->skb))
862 break;
863
864 if (node->skb) {
865 if (node->is_amsdu)
866 aggr_slice_amsdu(p_aggr, rxtid, node->skb);
867 else
868 skb_queue_tail(&rxtid->q, node->skb);
869 node->skb = NULL;
870 } else
871 stats->num_hole++;
872
873 rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
874 idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
875 } while (idx != idx_end);
876
877 spin_unlock_bh(&rxtid->lock);
878
879 stats->num_delivered += skb_queue_len(&rxtid->q);
880
881 while ((skb = skb_dequeue(&rxtid->q)))
882 ath6kl_deliver_frames_to_nw_stack(p_aggr->dev, skb);
883}
884
885static bool aggr_process_recv_frm(struct aggr_info *agg_info, u8 tid,
886 u16 seq_no,
887 bool is_amsdu, struct sk_buff *frame)
888{
889 struct rxtid *rxtid;
890 struct rxtid_stats *stats;
891 struct sk_buff *skb;
892 struct skb_hold_q *node;
893 u16 idx, st, cur, end;
894 bool is_queued = false;
895 u16 extended_end;
896
897 rxtid = &agg_info->rx_tid[tid];
898 stats = &agg_info->stat[tid];
899
900 stats->num_into_aggr++;
901
902 if (!rxtid->aggr) {
903 if (is_amsdu) {
904 aggr_slice_amsdu(agg_info, rxtid, frame);
905 is_queued = true;
906 stats->num_amsdu++;
907 while ((skb = skb_dequeue(&rxtid->q)))
908 ath6kl_deliver_frames_to_nw_stack(agg_info->dev,
909 skb);
910 }
911 return is_queued;
912 }
913
914 /* Check the incoming sequence no, if it's in the window */
915 st = rxtid->seq_next;
916 cur = seq_no;
917 end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
918
919 if (((st < end) && (cur < st || cur > end)) ||
920 ((st > end) && (cur > end) && (cur < st))) {
921 extended_end = (end + rxtid->hold_q_sz - 1) &
922 ATH6KL_MAX_SEQ_NO;
923
924 if (((end < extended_end) &&
925 (cur < end || cur > extended_end)) ||
926 ((end > extended_end) && (cur > extended_end) &&
927 (cur < end))) {
928 aggr_deque_frms(agg_info, tid, 0, 0);
929 if (cur >= rxtid->hold_q_sz - 1)
930 rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
931 else
932 rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
933 (rxtid->hold_q_sz - 2 - cur);
934 } else {
935 /*
936 * Dequeue only those frames that are outside the
937 * new shifted window.
938 */
939 if (cur >= rxtid->hold_q_sz - 1)
940 st = cur - (rxtid->hold_q_sz - 1);
941 else
942 st = ATH6KL_MAX_SEQ_NO -
943 (rxtid->hold_q_sz - 2 - cur);
944
945 aggr_deque_frms(agg_info, tid, st, 0);
946 }
947
948 stats->num_oow++;
949 }
950
951 idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
952
953 node = &rxtid->hold_q[idx];
954
955 spin_lock_bh(&rxtid->lock);
956
957 /*
958 * Is the cur frame duplicate or something beyond our window(hold_q
959 * -> which is 2x, already)?
960 *
961 * 1. Duplicate is easy - drop incoming frame.
962 * 2. Not falling in current sliding window.
963 * 2a. is the frame_seq_no preceding current tid_seq_no?
964 * -> drop the frame. perhaps sender did not get our ACK.
965 * this is taken care of above.
966 * 2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
967 * -> Taken care of it above, by moving window forward.
968 */
969 dev_kfree_skb(node->skb);
970 stats->num_dups++;
971
972 node->skb = frame;
973 is_queued = true;
974 node->is_amsdu = is_amsdu;
975 node->seq_no = seq_no;
976
977 if (node->is_amsdu)
978 stats->num_amsdu++;
979 else
980 stats->num_mpdu++;
981
982 spin_unlock_bh(&rxtid->lock);
983
984 aggr_deque_frms(agg_info, tid, 0, 1);
985
986 if (agg_info->timer_scheduled)
987 rxtid->progress = true;
988 else
989 for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
990 if (rxtid->hold_q[idx].skb) {
991 /*
992 * There is a frame in the queue and no
993 * timer so start a timer to ensure that
994 * the frame doesn't remain stuck
995 * forever.
996 */
997 agg_info->timer_scheduled = true;
998 mod_timer(&agg_info->timer,
999 (jiffies +
1000 HZ * (AGGR_RX_TIMEOUT) / 1000));
1001 rxtid->progress = false;
1002 rxtid->timer_mon = true;
1003 break;
1004 }
1005 }
1006
1007 return is_queued;
1008}
1009
1010void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1011{
1012 struct ath6kl *ar = target->dev->ar;
1013 struct sk_buff *skb = packet->pkt_cntxt;
1014 struct wmi_rx_meta_v2 *meta;
1015 struct wmi_data_hdr *dhdr;
1016 int min_hdr_len;
1017 u8 meta_type, dot11_hdr = 0;
1018 int status = packet->status;
1019 enum htc_endpoint_id ept = packet->endpoint;
1020 bool is_amsdu, prev_ps, ps_state = false;
1021 struct ath6kl_sta *conn = NULL;
1022 struct sk_buff *skb1 = NULL;
1023 struct ethhdr *datap = NULL;
1024 u16 seq_no, offset;
1025 u8 tid;
1026
1027 ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1028 "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1029 __func__, ar, ept, skb, packet->buf,
1030 packet->act_len, status);
1031
1032 if (status || !(skb->data + HTC_HDR_LENGTH)) {
1033 ar->net_stats.rx_errors++;
1034 dev_kfree_skb(skb);
1035 return;
1036 }
1037
1038 /*
1039 * Take lock to protect buffer counts and adaptive power throughput
1040 * state.
1041 */
1042 spin_lock_bh(&ar->lock);
1043
1044 ar->net_stats.rx_packets++;
1045 ar->net_stats.rx_bytes += packet->act_len;
1046
1047 skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1048 skb_pull(skb, HTC_HDR_LENGTH);
1049
1050 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, skb->data, skb->len);
1051
1052 spin_unlock_bh(&ar->lock);
1053
1054 skb->dev = ar->net_dev;
1055
1056 if (!test_bit(WMI_ENABLED, &ar->flag)) {
1057 if (EPPING_ALIGNMENT_PAD > 0)
1058 skb_pull(skb, EPPING_ALIGNMENT_PAD);
1059 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1060 return;
1061 }
1062
1063 if (ept == ar->ctrl_ep) {
1064 ath6kl_wmi_control_rx(ar->wmi, skb);
1065 return;
1066 }
1067
1068 min_hdr_len = sizeof(struct ethhdr);
1069 min_hdr_len += sizeof(struct wmi_data_hdr) +
1070 sizeof(struct ath6kl_llc_snap_hdr);
1071
1072 dhdr = (struct wmi_data_hdr *) skb->data;
1073
1074 /*
1075 * In the case of AP mode we may receive NULL data frames
1076 * that do not have LLC hdr. They are 16 bytes in size.
1077 * Allow these frames in the AP mode.
1078 */
1079 if (ar->nw_type != AP_NETWORK &&
1080 ((packet->act_len < min_hdr_len) ||
1081 (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1082 ath6kl_info("frame len is too short or too long\n");
1083 ar->net_stats.rx_errors++;
1084 ar->net_stats.rx_length_errors++;
1085 dev_kfree_skb(skb);
1086 return;
1087 }
1088
1089 /* Get the Power save state of the STA */
1090 if (ar->nw_type == AP_NETWORK) {
1091 meta_type = wmi_data_hdr_get_meta(dhdr);
1092
1093 ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1094 WMI_DATA_HDR_PS_MASK);
1095
1096 offset = sizeof(struct wmi_data_hdr);
1097
1098 switch (meta_type) {
1099 case 0:
1100 break;
1101 case WMI_META_VERSION_1:
1102 offset += sizeof(struct wmi_rx_meta_v1);
1103 break;
1104 case WMI_META_VERSION_2:
1105 offset += sizeof(struct wmi_rx_meta_v2);
1106 break;
1107 default:
1108 break;
1109 }
1110
1111 datap = (struct ethhdr *) (skb->data + offset);
1112 conn = ath6kl_find_sta(ar, datap->h_source);
1113
1114 if (!conn) {
1115 dev_kfree_skb(skb);
1116 return;
1117 }
1118
1119 /*
1120 * If there is a change in PS state of the STA,
1121 * take appropriate steps:
1122 *
1123 * 1. If Sleep-->Awake, flush the psq for the STA
1124 * Clear the PVB for the STA.
1125 * 2. If Awake-->Sleep, Starting queueing frames
1126 * the STA.
1127 */
1128 prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1129
1130 if (ps_state)
1131 conn->sta_flags |= STA_PS_SLEEP;
1132 else
1133 conn->sta_flags &= ~STA_PS_SLEEP;
1134
1135 if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1136 if (!(conn->sta_flags & STA_PS_SLEEP)) {
1137 struct sk_buff *skbuff = NULL;
1138
1139 spin_lock_bh(&conn->psq_lock);
1140 while ((skbuff = skb_dequeue(&conn->psq))
1141 != NULL) {
1142 spin_unlock_bh(&conn->psq_lock);
1143 ath6kl_data_tx(skbuff, ar->net_dev);
1144 spin_lock_bh(&conn->psq_lock);
1145 }
1146 spin_unlock_bh(&conn->psq_lock);
1147 /* Clear the PVB for this STA */
1148 ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
1149 }
1150 }
1151
1152 /* drop NULL data frames here */
1153 if ((packet->act_len < min_hdr_len) ||
1154 (packet->act_len >
1155 WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1156 dev_kfree_skb(skb);
1157 return;
1158 }
1159 }
1160
1161 is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1162 tid = wmi_data_hdr_get_up(dhdr);
1163 seq_no = wmi_data_hdr_get_seqno(dhdr);
1164 meta_type = wmi_data_hdr_get_meta(dhdr);
1165 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1166
1167 ath6kl_wmi_data_hdr_remove(ar->wmi, skb);
1168
1169 switch (meta_type) {
1170 case WMI_META_VERSION_1:
1171 skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1172 break;
1173 case WMI_META_VERSION_2:
1174 meta = (struct wmi_rx_meta_v2 *) skb->data;
1175 if (meta->csum_flags & 0x1) {
1176 skb->ip_summed = CHECKSUM_COMPLETE;
1177 skb->csum = (__force __wsum) meta->csum;
1178 }
1179 skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1180 break;
1181 default:
1182 break;
1183 }
1184
1185 if (dot11_hdr)
1186 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1187 else if (!is_amsdu)
1188 status = ath6kl_wmi_dot3_2_dix(skb);
1189
1190 if (status) {
1191 /*
1192 * Drop frames that could not be processed (lack of
1193 * memory, etc.)
1194 */
1195 dev_kfree_skb(skb);
1196 return;
1197 }
1198
1199 if (!(ar->net_dev->flags & IFF_UP)) {
1200 dev_kfree_skb(skb);
1201 return;
1202 }
1203
1204 if (ar->nw_type == AP_NETWORK) {
1205 datap = (struct ethhdr *) skb->data;
1206 if (is_multicast_ether_addr(datap->h_dest))
1207 /*
1208 * Bcast/Mcast frames should be sent to the
1209 * OS stack as well as on the air.
1210 */
1211 skb1 = skb_copy(skb, GFP_ATOMIC);
1212 else {
1213 /*
1214 * Search for a connected STA with dstMac
1215 * as the Mac address. If found send the
1216 * frame to it on the air else send the
1217 * frame up the stack.
1218 */
1219 struct ath6kl_sta *conn = NULL;
1220 conn = ath6kl_find_sta(ar, datap->h_dest);
1221
1222 if (conn && ar->intra_bss) {
1223 skb1 = skb;
1224 skb = NULL;
1225 } else if (conn && !ar->intra_bss) {
1226 dev_kfree_skb(skb);
1227 skb = NULL;
1228 }
1229 }
1230 if (skb1)
1231 ath6kl_data_tx(skb1, ar->net_dev);
1232 }
1233
1234 if (!aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
1235 is_amsdu, skb))
1236 ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
1237}
1238
1239static void aggr_timeout(unsigned long arg)
1240{
1241 u8 i, j;
1242 struct aggr_info *p_aggr = (struct aggr_info *) arg;
1243 struct rxtid *rxtid;
1244 struct rxtid_stats *stats;
1245
1246 for (i = 0; i < NUM_OF_TIDS; i++) {
1247 rxtid = &p_aggr->rx_tid[i];
1248 stats = &p_aggr->stat[i];
1249
1250 if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1251 continue;
1252
1253 stats->num_timeouts++;
1254 ath6kl_err("aggr timeout (st %d end %d)\n",
1255 rxtid->seq_next,
1256 ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1257 ATH6KL_MAX_SEQ_NO));
1258 aggr_deque_frms(p_aggr, i, 0, 0);
1259 }
1260
1261 p_aggr->timer_scheduled = false;
1262
1263 for (i = 0; i < NUM_OF_TIDS; i++) {
1264 rxtid = &p_aggr->rx_tid[i];
1265
1266 if (rxtid->aggr && rxtid->hold_q) {
1267 for (j = 0; j < rxtid->hold_q_sz; j++) {
1268 if (rxtid->hold_q[j].skb) {
1269 p_aggr->timer_scheduled = true;
1270 rxtid->timer_mon = true;
1271 rxtid->progress = false;
1272 break;
1273 }
1274 }
1275
1276 if (j >= rxtid->hold_q_sz)
1277 rxtid->timer_mon = false;
1278 }
1279 }
1280
1281 if (p_aggr->timer_scheduled)
1282 mod_timer(&p_aggr->timer,
1283 jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1284}
1285
1286static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
1287{
1288 struct rxtid *rxtid;
1289 struct rxtid_stats *stats;
1290
1291 if (!p_aggr || tid >= NUM_OF_TIDS)
1292 return;
1293
1294 rxtid = &p_aggr->rx_tid[tid];
1295 stats = &p_aggr->stat[tid];
1296
1297 if (rxtid->aggr)
1298 aggr_deque_frms(p_aggr, tid, 0, 0);
1299
1300 rxtid->aggr = false;
1301 rxtid->progress = false;
1302 rxtid->timer_mon = false;
1303 rxtid->win_sz = 0;
1304 rxtid->seq_next = 0;
1305 rxtid->hold_q_sz = 0;
1306
1307 kfree(rxtid->hold_q);
1308 rxtid->hold_q = NULL;
1309
1310 memset(stats, 0, sizeof(struct rxtid_stats));
1311}
1312
1313void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
1314{
1315 struct aggr_info *p_aggr = ar->aggr_cntxt;
1316 struct rxtid *rxtid;
1317 struct rxtid_stats *stats;
1318 u16 hold_q_size;
1319
1320 if (!p_aggr)
1321 return;
1322
1323 rxtid = &p_aggr->rx_tid[tid];
1324 stats = &p_aggr->stat[tid];
1325
1326 if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1327 ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1328 __func__, win_sz, tid);
1329
1330 if (rxtid->aggr)
1331 aggr_delete_tid_state(p_aggr, tid);
1332
1333 rxtid->seq_next = seq_no;
1334 hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1335 rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1336 if (!rxtid->hold_q)
1337 return;
1338
1339 rxtid->win_sz = win_sz;
1340 rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1341 if (!skb_queue_empty(&rxtid->q))
1342 return;
1343
1344 rxtid->aggr = true;
1345}
1346
1347struct aggr_info *aggr_init(struct net_device *dev)
1348{
1349 struct aggr_info *p_aggr = NULL;
1350 struct rxtid *rxtid;
1351 u8 i;
1352
1353 p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1354 if (!p_aggr) {
1355 ath6kl_err("failed to alloc memory for aggr_node\n");
1356 return NULL;
1357 }
1358
1359 p_aggr->aggr_sz = AGGR_SZ_DEFAULT;
1360 p_aggr->dev = dev;
1361 init_timer(&p_aggr->timer);
1362 p_aggr->timer.function = aggr_timeout;
1363 p_aggr->timer.data = (unsigned long) p_aggr;
1364
1365 p_aggr->timer_scheduled = false;
1366 skb_queue_head_init(&p_aggr->free_q);
1367
1368 ath6kl_alloc_netbufs(&p_aggr->free_q, AGGR_NUM_OF_FREE_NETBUFS);
1369
1370 for (i = 0; i < NUM_OF_TIDS; i++) {
1371 rxtid = &p_aggr->rx_tid[i];
1372 rxtid->aggr = false;
1373 rxtid->progress = false;
1374 rxtid->timer_mon = false;
1375 skb_queue_head_init(&rxtid->q);
1376 spin_lock_init(&rxtid->lock);
1377 }
1378
1379 return p_aggr;
1380}
1381
1382void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
1383{
1384 struct aggr_info *p_aggr = ar->aggr_cntxt;
1385 struct rxtid *rxtid;
1386
1387 if (!p_aggr)
1388 return;
1389
1390 rxtid = &p_aggr->rx_tid[tid];
1391
1392 if (rxtid->aggr)
1393 aggr_delete_tid_state(p_aggr, tid);
1394}
1395
1396void aggr_reset_state(struct aggr_info *aggr_info)
1397{
1398 u8 tid;
1399
1400 for (tid = 0; tid < NUM_OF_TIDS; tid++)
1401 aggr_delete_tid_state(aggr_info, tid);
1402}
1403
1404/* clean up our amsdu buffer list */
1405void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1406{
1407 struct htc_packet *packet, *tmp_pkt;
1408
1409 spin_lock_bh(&ar->lock);
1410 if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1411 spin_unlock_bh(&ar->lock);
1412 return;
1413 }
1414
1415 list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1416 list) {
1417 list_del(&packet->list);
1418 spin_unlock_bh(&ar->lock);
1419 dev_kfree_skb(packet->pkt_cntxt);
1420 spin_lock_bh(&ar->lock);
1421 }
1422
1423 spin_unlock_bh(&ar->lock);
1424}
1425
1426void aggr_module_destroy(struct aggr_info *aggr_info)
1427{
1428 struct rxtid *rxtid;
1429 u8 i, k;
1430
1431 if (!aggr_info)
1432 return;
1433
1434 if (aggr_info->timer_scheduled) {
1435 del_timer(&aggr_info->timer);
1436 aggr_info->timer_scheduled = false;
1437 }
1438
1439 for (i = 0; i < NUM_OF_TIDS; i++) {
1440 rxtid = &aggr_info->rx_tid[i];
1441 if (rxtid->hold_q) {
1442 for (k = 0; k < rxtid->hold_q_sz; k++)
1443 dev_kfree_skb(rxtid->hold_q[k].skb);
1444 kfree(rxtid->hold_q);
1445 }
1446
1447 skb_queue_purge(&rxtid->q);
1448 }
1449
1450 skb_queue_purge(&aggr_info->free_q);
1451 kfree(aggr_info);
1452}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
new file mode 100644
index 000000000000..a52d7d201fbd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -0,0 +1,2762 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/ip.h>
18#include "core.h"
19#include "debug.h"
20
21static int ath6kl_wmi_sync_point(struct wmi *wmi);
22
23static const s32 wmi_rate_tbl[][2] = {
24 /* {W/O SGI, with SGI} */
25 {1000, 1000},
26 {2000, 2000},
27 {5500, 5500},
28 {11000, 11000},
29 {6000, 6000},
30 {9000, 9000},
31 {12000, 12000},
32 {18000, 18000},
33 {24000, 24000},
34 {36000, 36000},
35 {48000, 48000},
36 {54000, 54000},
37 {6500, 7200},
38 {13000, 14400},
39 {19500, 21700},
40 {26000, 28900},
41 {39000, 43300},
42 {52000, 57800},
43 {58500, 65000},
44 {65000, 72200},
45 {13500, 15000},
46 {27000, 30000},
47 {40500, 45000},
48 {54000, 60000},
49 {81000, 90000},
50 {108000, 120000},
51 {121500, 135000},
52 {135000, 150000},
53 {0, 0}
54};
55
56/* 802.1d to AC mapping. Refer pg 57 of WMM-test-plan-v1.2 */
57static const u8 up_to_ac[] = {
58 WMM_AC_BE,
59 WMM_AC_BK,
60 WMM_AC_BK,
61 WMM_AC_BE,
62 WMM_AC_VI,
63 WMM_AC_VI,
64 WMM_AC_VO,
65 WMM_AC_VO,
66};
67
68void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id)
69{
70 if (WARN_ON(ep_id == ENDPOINT_UNUSED || ep_id >= ENDPOINT_MAX))
71 return;
72
73 wmi->ep_id = ep_id;
74}
75
76enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
77{
78 return wmi->ep_id;
79}
80
81/* Performs DIX to 802.3 encapsulation for transmit packets.
82 * Assumes the entire DIX header is contigous and that there is
83 * enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
84 */
85int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
86{
87 struct ath6kl_llc_snap_hdr *llc_hdr;
88 struct ethhdr *eth_hdr;
89 size_t new_len;
90 __be16 type;
91 u8 *datap;
92 u16 size;
93
94 if (WARN_ON(skb == NULL))
95 return -EINVAL;
96
97 size = sizeof(struct ath6kl_llc_snap_hdr) + sizeof(struct wmi_data_hdr);
98 if (skb_headroom(skb) < size)
99 return -ENOMEM;
100
101 eth_hdr = (struct ethhdr *) skb->data;
102 type = eth_hdr->h_proto;
103
104 if (!is_ethertype(be16_to_cpu(type))) {
105 ath6kl_dbg(ATH6KL_DBG_WMI,
106 "%s: pkt is already in 802.3 format\n", __func__);
107 return 0;
108 }
109
110 new_len = skb->len - sizeof(*eth_hdr) + sizeof(*llc_hdr);
111
112 skb_push(skb, sizeof(struct ath6kl_llc_snap_hdr));
113 datap = skb->data;
114
115 eth_hdr->h_proto = cpu_to_be16(new_len);
116
117 memcpy(datap, eth_hdr, sizeof(*eth_hdr));
118
119 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap + sizeof(*eth_hdr));
120 llc_hdr->dsap = 0xAA;
121 llc_hdr->ssap = 0xAA;
122 llc_hdr->cntl = 0x03;
123 llc_hdr->org_code[0] = 0x0;
124 llc_hdr->org_code[1] = 0x0;
125 llc_hdr->org_code[2] = 0x0;
126 llc_hdr->eth_type = type;
127
128 return 0;
129}
130
131static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
132 u8 *version, void *tx_meta_info)
133{
134 struct wmi_tx_meta_v1 *v1;
135 struct wmi_tx_meta_v2 *v2;
136
137 if (WARN_ON(skb == NULL || version == NULL))
138 return -EINVAL;
139
140 switch (*version) {
141 case WMI_META_VERSION_1:
142 skb_push(skb, WMI_MAX_TX_META_SZ);
143 v1 = (struct wmi_tx_meta_v1 *) skb->data;
144 v1->pkt_id = 0;
145 v1->rate_plcy_id = 0;
146 *version = WMI_META_VERSION_1;
147 break;
148 case WMI_META_VERSION_2:
149 skb_push(skb, WMI_MAX_TX_META_SZ);
150 v2 = (struct wmi_tx_meta_v2 *) skb->data;
151 memcpy(v2, (struct wmi_tx_meta_v2 *) tx_meta_info,
152 sizeof(struct wmi_tx_meta_v2));
153 break;
154 }
155
156 return 0;
157}
158
159int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
160 u8 msg_type, bool more_data,
161 enum wmi_data_hdr_data_type data_type,
162 u8 meta_ver, void *tx_meta_info)
163{
164 struct wmi_data_hdr *data_hdr;
165 int ret;
166
167 if (WARN_ON(skb == NULL))
168 return -EINVAL;
169
170 ret = ath6kl_wmi_meta_add(wmi, skb, &meta_ver, tx_meta_info);
171 if (ret)
172 return ret;
173
174 skb_push(skb, sizeof(struct wmi_data_hdr));
175
176 data_hdr = (struct wmi_data_hdr *)skb->data;
177 memset(data_hdr, 0, sizeof(struct wmi_data_hdr));
178
179 data_hdr->info = msg_type << WMI_DATA_HDR_MSG_TYPE_SHIFT;
180 data_hdr->info |= data_type << WMI_DATA_HDR_DATA_TYPE_SHIFT;
181
182 if (more_data)
183 data_hdr->info |=
184 WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
185
186 data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
187 data_hdr->info3 = 0;
188
189 return 0;
190}
191
192static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
193{
194 struct iphdr *ip_hdr = (struct iphdr *) pkt;
195 u8 ip_pri;
196
197 /*
198 * Determine IPTOS priority
199 *
200 * IP-TOS - 8bits
201 * : DSCP(6-bits) ECN(2-bits)
202 * : DSCP - P2 P1 P0 X X X
203 * where (P2 P1 P0) form 802.1D
204 */
205 ip_pri = ip_hdr->tos >> 5;
206 ip_pri &= 0x7;
207
208 if ((layer2_pri & 0x7) > ip_pri)
209 return (u8) layer2_pri & 0x7;
210 else
211 return ip_pri;
212}
213
214int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
215 u32 layer2_priority, bool wmm_enabled,
216 u8 *ac)
217{
218 struct wmi_data_hdr *data_hdr;
219 struct ath6kl_llc_snap_hdr *llc_hdr;
220 struct wmi_create_pstream_cmd cmd;
221 u32 meta_size, hdr_size;
222 u16 ip_type = IP_ETHERTYPE;
223 u8 stream_exist, usr_pri;
224 u8 traffic_class = WMM_AC_BE;
225 u8 *datap;
226
227 if (WARN_ON(skb == NULL))
228 return -EINVAL;
229
230 datap = skb->data;
231 data_hdr = (struct wmi_data_hdr *) datap;
232
233 meta_size = ((le16_to_cpu(data_hdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
234 WMI_DATA_HDR_META_MASK) ? WMI_MAX_TX_META_SZ : 0;
235
236 if (!wmm_enabled) {
237 /* If WMM is disabled all traffic goes as BE traffic */
238 usr_pri = 0;
239 } else {
240 hdr_size = sizeof(struct ethhdr);
241
242 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap +
243 sizeof(struct
244 wmi_data_hdr) +
245 meta_size + hdr_size);
246
247 if (llc_hdr->eth_type == htons(ip_type)) {
248 /*
249 * Extract the endpoint info from the TOS field
250 * in the IP header.
251 */
252 usr_pri =
253 ath6kl_wmi_determine_user_priority(((u8 *) llc_hdr) +
254 sizeof(struct ath6kl_llc_snap_hdr),
255 layer2_priority);
256 } else
257 usr_pri = layer2_priority & 0x7;
258 }
259
260 /* workaround for WMM S5 */
261 if ((wmi->traffic_class == WMM_AC_VI) &&
262 ((usr_pri == 5) || (usr_pri == 4)))
263 usr_pri = 1;
264
265 /* Convert user priority to traffic class */
266 traffic_class = up_to_ac[usr_pri & 0x7];
267
268 wmi_data_hdr_set_up(data_hdr, usr_pri);
269
270 spin_lock_bh(&wmi->lock);
271 stream_exist = wmi->fat_pipe_exist;
272 spin_unlock_bh(&wmi->lock);
273
274 if (!(stream_exist & (1 << traffic_class))) {
275 memset(&cmd, 0, sizeof(cmd));
276 cmd.traffic_class = traffic_class;
277 cmd.user_pri = usr_pri;
278 cmd.inactivity_int =
279 cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
280 /* Implicit streams are created with TSID 0xFF */
281 cmd.tsid = WMI_IMPLICIT_PSTREAM;
282 ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
283 }
284
285 *ac = traffic_class;
286
287 return 0;
288}
289
290int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
291{
292 struct ieee80211_hdr_3addr *pwh, wh;
293 struct ath6kl_llc_snap_hdr *llc_hdr;
294 struct ethhdr eth_hdr;
295 u32 hdr_size;
296 u8 *datap;
297 __le16 sub_type;
298
299 if (WARN_ON(skb == NULL))
300 return -EINVAL;
301
302 datap = skb->data;
303 pwh = (struct ieee80211_hdr_3addr *) datap;
304
305 sub_type = pwh->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
306
307 memcpy((u8 *) &wh, datap, sizeof(struct ieee80211_hdr_3addr));
308
309 /* Strip off the 802.11 header */
310 if (sub_type == cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
311 hdr_size = roundup(sizeof(struct ieee80211_qos_hdr),
312 sizeof(u32));
313 skb_pull(skb, hdr_size);
314 } else if (sub_type == cpu_to_le16(IEEE80211_STYPE_DATA))
315 skb_pull(skb, sizeof(struct ieee80211_hdr_3addr));
316
317 datap = skb->data;
318 llc_hdr = (struct ath6kl_llc_snap_hdr *)(datap);
319
320 eth_hdr.h_proto = llc_hdr->eth_type;
321 memset(eth_hdr.h_dest, 0, sizeof(eth_hdr.h_dest));
322 memset(eth_hdr.h_source, 0, sizeof(eth_hdr.h_source));
323
324 switch ((le16_to_cpu(wh.frame_control)) &
325 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
326 case 0:
327 memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
328 memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
329 break;
330 case IEEE80211_FCTL_TODS:
331 memcpy(eth_hdr.h_dest, wh.addr3, ETH_ALEN);
332 memcpy(eth_hdr.h_source, wh.addr2, ETH_ALEN);
333 break;
334 case IEEE80211_FCTL_FROMDS:
335 memcpy(eth_hdr.h_dest, wh.addr1, ETH_ALEN);
336 memcpy(eth_hdr.h_source, wh.addr3, ETH_ALEN);
337 break;
338 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
339 break;
340 }
341
342 skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
343 skb_push(skb, sizeof(eth_hdr));
344
345 datap = skb->data;
346
347 memcpy(datap, &eth_hdr, sizeof(eth_hdr));
348
349 return 0;
350}
351
352/*
353 * Performs 802.3 to DIX encapsulation for received packets.
354 * Assumes the entire 802.3 header is contigous.
355 */
356int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
357{
358 struct ath6kl_llc_snap_hdr *llc_hdr;
359 struct ethhdr eth_hdr;
360 u8 *datap;
361
362 if (WARN_ON(skb == NULL))
363 return -EINVAL;
364
365 datap = skb->data;
366
367 memcpy(&eth_hdr, datap, sizeof(eth_hdr));
368
369 llc_hdr = (struct ath6kl_llc_snap_hdr *) (datap + sizeof(eth_hdr));
370 eth_hdr.h_proto = llc_hdr->eth_type;
371
372 skb_pull(skb, sizeof(struct ath6kl_llc_snap_hdr));
373 datap = skb->data;
374
375 memcpy(datap, &eth_hdr, sizeof(eth_hdr));
376
377 return 0;
378}
379
380int ath6kl_wmi_data_hdr_remove(struct wmi *wmi, struct sk_buff *skb)
381{
382 if (WARN_ON(skb == NULL))
383 return -EINVAL;
384
385 skb_pull(skb, sizeof(struct wmi_data_hdr));
386
387 return 0;
388}
389
390void ath6kl_wmi_iterate_nodes(struct wmi *wmi,
391 void (*f) (void *arg, struct bss *),
392 void *arg)
393{
394 wlan_iterate_nodes(&wmi->scan_table, f, arg);
395}
396
397static void ath6kl_wmi_convert_bssinfo_hdr2_to_hdr(struct sk_buff *skb,
398 u8 *datap)
399{
400 struct wmi_bss_info_hdr2 bih2;
401 struct wmi_bss_info_hdr *bih;
402
403 memcpy(&bih2, datap, sizeof(struct wmi_bss_info_hdr2));
404
405 skb_push(skb, 4);
406 bih = (struct wmi_bss_info_hdr *) skb->data;
407
408 bih->ch = bih2.ch;
409 bih->frame_type = bih2.frame_type;
410 bih->snr = bih2.snr;
411 bih->rssi = a_cpu_to_sle16(bih2.snr - 95);
412 bih->ie_mask = cpu_to_le32(le16_to_cpu(bih2.ie_mask));
413 memcpy(bih->bssid, bih2.bssid, ETH_ALEN);
414}
415
416static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
417{
418 struct tx_complete_msg_v1 *msg_v1;
419 struct wmi_tx_complete_event *evt;
420 int index;
421 u16 size;
422
423 evt = (struct wmi_tx_complete_event *) datap;
424
425 ath6kl_dbg(ATH6KL_DBG_WMI, "comp: %d %d %d\n",
426 evt->num_msg, evt->msg_len, evt->msg_type);
427
428 if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_WMI))
429 return 0;
430
431 for (index = 0; index < evt->num_msg; index++) {
432 size = sizeof(struct wmi_tx_complete_event) +
433 (index * sizeof(struct tx_complete_msg_v1));
434 msg_v1 = (struct tx_complete_msg_v1 *)(datap + size);
435
436 ath6kl_dbg(ATH6KL_DBG_WMI, "msg: %d %d %d %d\n",
437 msg_v1->status, msg_v1->pkt_id,
438 msg_v1->rate_idx, msg_v1->ack_failures);
439 }
440
441 return 0;
442}
443
444static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
445{
446 struct sk_buff *skb;
447
448 skb = ath6kl_buf_alloc(size);
449 if (!skb)
450 return NULL;
451
452 skb_put(skb, size);
453 if (size)
454 memset(skb->data, 0, size);
455
456 return skb;
457}
458
459/* Send a "simple" wmi command -- one with no arguments */
460static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
461{
462 struct sk_buff *skb;
463 int ret;
464
465 skb = ath6kl_wmi_get_new_buf(0);
466 if (!skb)
467 return -ENOMEM;
468
469 ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
470
471 return ret;
472}
473
474static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
475{
476 struct wmi_ready_event_2 *ev = (struct wmi_ready_event_2 *) datap;
477
478 if (len < sizeof(struct wmi_ready_event_2))
479 return -EINVAL;
480
481 wmi->ready = true;
482 ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
483 le32_to_cpu(ev->sw_version),
484 le32_to_cpu(ev->abi_version));
485
486 return 0;
487}
488
489static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
490{
491 struct wmi_connect_event *ev;
492 u8 *pie, *peie;
493
494 if (len < sizeof(struct wmi_connect_event))
495 return -EINVAL;
496
497 ev = (struct wmi_connect_event *) datap;
498
499 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM\n",
500 __func__, ev->ch, ev->bssid);
501
502 memcpy(wmi->bssid, ev->bssid, ETH_ALEN);
503
504 /* Start of assoc rsp IEs */
505 pie = ev->assoc_info + ev->beacon_ie_len +
506 ev->assoc_req_len + (sizeof(u16) * 3); /* capinfo, status, aid */
507
508 /* End of assoc rsp IEs */
509 peie = ev->assoc_info + ev->beacon_ie_len + ev->assoc_req_len +
510 ev->assoc_resp_len;
511
512 while (pie < peie) {
513 switch (*pie) {
514 case WLAN_EID_VENDOR_SPECIFIC:
515 if (pie[1] > 3 && pie[2] == 0x00 && pie[3] == 0x50 &&
516 pie[4] == 0xf2 && pie[5] == WMM_OUI_TYPE) {
517 /* WMM OUT (00:50:F2) */
518 if (pie[1] > 5
519 && pie[6] == WMM_PARAM_OUI_SUBTYPE)
520 wmi->is_wmm_enabled = true;
521 }
522 break;
523 }
524
525 if (wmi->is_wmm_enabled)
526 break;
527
528 pie += pie[1] + 2;
529 }
530
531 ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->ch), ev->bssid,
532 le16_to_cpu(ev->listen_intvl),
533 le16_to_cpu(ev->beacon_intvl),
534 le32_to_cpu(ev->nw_type),
535 ev->beacon_ie_len, ev->assoc_req_len,
536 ev->assoc_resp_len, ev->assoc_info);
537
538 return 0;
539}
540
541static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
542{
543 struct wmi_disconnect_event *ev;
544 wmi->traffic_class = 100;
545
546 if (len < sizeof(struct wmi_disconnect_event))
547 return -EINVAL;
548
549 ev = (struct wmi_disconnect_event *) datap;
550 memset(wmi->bssid, 0, sizeof(wmi->bssid));
551
552 wmi->is_wmm_enabled = false;
553 wmi->pair_crypto_type = NONE_CRYPT;
554 wmi->grp_crypto_type = NONE_CRYPT;
555
556 ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
557 ev->bssid, ev->assoc_resp_len, ev->assoc_info,
558 le16_to_cpu(ev->proto_reason_status));
559
560 return 0;
561}
562
563static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
564{
565 struct wmi_peer_node_event *ev;
566
567 if (len < sizeof(struct wmi_peer_node_event))
568 return -EINVAL;
569
570 ev = (struct wmi_peer_node_event *) datap;
571
572 if (ev->event_code == PEER_NODE_JOIN_EVENT)
573 ath6kl_dbg(ATH6KL_DBG_WMI, "joined node with mac addr: %pM\n",
574 ev->peer_mac_addr);
575 else if (ev->event_code == PEER_NODE_LEAVE_EVENT)
576 ath6kl_dbg(ATH6KL_DBG_WMI, "left node with mac addr: %pM\n",
577 ev->peer_mac_addr);
578
579 return 0;
580}
581
582static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
583{
584 struct wmi_tkip_micerr_event *ev;
585
586 if (len < sizeof(struct wmi_tkip_micerr_event))
587 return -EINVAL;
588
589 ev = (struct wmi_tkip_micerr_event *) datap;
590
591 ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
592
593 return 0;
594}
595
596static int ath6kl_wlan_parse_beacon(u8 *buf, int frame_len,
597 struct ath6kl_common_ie *cie)
598{
599 u8 *frm, *efrm;
600 u8 elemid_ssid = false;
601
602 frm = buf;
603 efrm = (u8 *) (frm + frame_len);
604
605 /*
606 * beacon/probe response frame format
607 * [8] time stamp
608 * [2] beacon interval
609 * [2] capability information
610 * [tlv] ssid
611 * [tlv] supported rates
612 * [tlv] country information
613 * [tlv] parameter set (FH/DS)
614 * [tlv] erp information
615 * [tlv] extended supported rates
616 * [tlv] WMM
617 * [tlv] WPA or RSN
618 * [tlv] Atheros Advanced Capabilities
619 */
620 if ((efrm - frm) < 12)
621 return -EINVAL;
622
623 memset(cie, 0, sizeof(*cie));
624
625 cie->ie_tstamp = frm;
626 frm += 8;
627 cie->ie_beaconInt = *(u16 *) frm;
628 frm += 2;
629 cie->ie_capInfo = *(u16 *) frm;
630 frm += 2;
631 cie->ie_chan = 0;
632
633 while (frm < efrm) {
634 switch (*frm) {
635 case WLAN_EID_SSID:
636 if (!elemid_ssid) {
637 cie->ie_ssid = frm;
638 elemid_ssid = true;
639 }
640 break;
641 case WLAN_EID_SUPP_RATES:
642 cie->ie_rates = frm;
643 break;
644 case WLAN_EID_COUNTRY:
645 cie->ie_country = frm;
646 break;
647 case WLAN_EID_FH_PARAMS:
648 break;
649 case WLAN_EID_DS_PARAMS:
650 cie->ie_chan = frm[2];
651 break;
652 case WLAN_EID_TIM:
653 cie->ie_tim = frm;
654 break;
655 case WLAN_EID_IBSS_PARAMS:
656 break;
657 case WLAN_EID_EXT_SUPP_RATES:
658 cie->ie_xrates = frm;
659 break;
660 case WLAN_EID_ERP_INFO:
661 if (frm[1] != 1)
662 return -EINVAL;
663
664 cie->ie_erp = frm[2];
665 break;
666 case WLAN_EID_RSN:
667 cie->ie_rsn = frm;
668 break;
669 case WLAN_EID_HT_CAPABILITY:
670 cie->ie_htcap = frm;
671 break;
672 case WLAN_EID_HT_INFORMATION:
673 cie->ie_htop = frm;
674 break;
675 case WLAN_EID_VENDOR_SPECIFIC:
676 if (frm[1] > 3 && frm[2] == 0x00 && frm[3] == 0x50 &&
677 frm[4] == 0xf2) {
678 /* OUT Type (00:50:F2) */
679
680 if (frm[5] == WPA_OUI_TYPE) {
681 /* WPA OUT */
682 cie->ie_wpa = frm;
683 } else if (frm[5] == WMM_OUI_TYPE) {
684 /* WMM OUT */
685 cie->ie_wmm = frm;
686 } else if (frm[5] == WSC_OUT_TYPE) {
687 /* WSC OUT */
688 cie->ie_wsc = frm;
689 }
690
691 } else if (frm[1] > 3 && frm[2] == 0x00
692 && frm[3] == 0x03 && frm[4] == 0x7f
693 && frm[5] == ATH_OUI_TYPE) {
694 /* Atheros OUI (00:03:7f) */
695 cie->ie_ath = frm;
696 }
697 break;
698 default:
699 break;
700 }
701 frm += frm[1] + 2;
702 }
703
704 if ((cie->ie_rates == NULL)
705 || (cie->ie_rates[1] > ATH6KL_RATE_MAXSIZE))
706 return -EINVAL;
707
708 if ((cie->ie_ssid == NULL)
709 || (cie->ie_ssid[1] > IEEE80211_MAX_SSID_LEN))
710 return -EINVAL;
711
712 return 0;
713}
714
715static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
716{
717 struct bss *bss = NULL;
718 struct wmi_bss_info_hdr *bih;
719 u8 cached_ssid_len = 0;
720 u8 cached_ssid[IEEE80211_MAX_SSID_LEN] = { 0 };
721 u8 beacon_ssid_len = 0;
722 u8 *buf, *ie_ssid;
723 u8 *ni_buf;
724 int buf_len;
725
726 int ret;
727
728 if (len <= sizeof(struct wmi_bss_info_hdr))
729 return -EINVAL;
730
731 bih = (struct wmi_bss_info_hdr *) datap;
732 bss = wlan_find_node(&wmi->scan_table, bih->bssid);
733
734 if (a_sle16_to_cpu(bih->rssi) > 0) {
735 if (bss == NULL)
736 return 0;
737 else
738 bih->rssi = a_cpu_to_sle16(bss->ni_rssi);
739 }
740
741 buf = datap + sizeof(struct wmi_bss_info_hdr);
742 len -= sizeof(struct wmi_bss_info_hdr);
743
744 ath6kl_dbg(ATH6KL_DBG_WMI,
745 "bss info evt - ch %u, rssi %02x, bssid \"%pM\"\n",
746 bih->ch, a_sle16_to_cpu(bih->rssi), bih->bssid);
747
748 if (bss != NULL) {
749 /*
750 * Free up the node. We are about to allocate a new node.
751 * In case of hidden AP, beacon will not have ssid,
752 * but a directed probe response will have it,
753 * so cache the probe-resp-ssid if already present.
754 */
755 if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE)) {
756 ie_ssid = bss->ni_cie.ie_ssid;
757 if (ie_ssid && (ie_ssid[1] <= IEEE80211_MAX_SSID_LEN) &&
758 (ie_ssid[2] != 0)) {
759 cached_ssid_len = ie_ssid[1];
760 memcpy(cached_ssid, ie_ssid + 2,
761 cached_ssid_len);
762 }
763 }
764
765 /*
766 * Use the current average rssi of associated AP base on
767 * assumption
768 * 1. Most os with GUI will update RSSI by
769 * ath6kl_wmi_get_stats_cmd() periodically.
770 * 2. ath6kl_wmi_get_stats_cmd(..) will be called when calling
771 * ath6kl_wmi_startscan_cmd(...)
772 * The average value of RSSI give end-user better feeling for
773 * instance value of scan result. It also sync up RSSI info
774 * in GUI between scan result and RSSI signal icon.
775 */
776 if (memcmp(wmi->bssid, bih->bssid, ETH_ALEN) == 0) {
777 bih->rssi = a_cpu_to_sle16(bss->ni_rssi);
778 bih->snr = bss->ni_snr;
779 }
780
781 wlan_node_reclaim(&wmi->scan_table, bss);
782 }
783
784 /*
785 * beacon/probe response frame format
786 * [8] time stamp
787 * [2] beacon interval
788 * [2] capability information
789 * [tlv] ssid
790 */
791 beacon_ssid_len = buf[SSID_IE_LEN_INDEX];
792
793 /*
794 * If ssid is cached for this hidden AP, then change
795 * buffer len accordingly.
796 */
797 if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE) &&
798 (cached_ssid_len != 0) &&
799 (beacon_ssid_len == 0 || (cached_ssid_len > beacon_ssid_len &&
800 buf[SSID_IE_LEN_INDEX + 1] == 0))) {
801
802 len += (cached_ssid_len - beacon_ssid_len);
803 }
804
805 bss = wlan_node_alloc(len);
806 if (!bss)
807 return -ENOMEM;
808
809 bss->ni_snr = bih->snr;
810 bss->ni_rssi = a_sle16_to_cpu(bih->rssi);
811
812 if (WARN_ON(!bss->ni_buf))
813 return -EINVAL;
814
815 /*
816 * In case of hidden AP, beacon will not have ssid,
817 * but a directed probe response will have it,
818 * so place the cached-ssid(probe-resp) in the bss info.
819 */
820 if (wmi->is_probe_ssid && (bih->frame_type == BEACON_FTYPE) &&
821 (cached_ssid_len != 0) &&
822 (beacon_ssid_len == 0 || (beacon_ssid_len &&
823 buf[SSID_IE_LEN_INDEX + 1] == 0))) {
824 ni_buf = bss->ni_buf;
825 buf_len = len;
826
827 /*
828 * Copy the first 14 bytes:
829 * time-stamp(8), beacon-interval(2),
830 * cap-info(2), ssid-id(1), ssid-len(1).
831 */
832 memcpy(ni_buf, buf, SSID_IE_LEN_INDEX + 1);
833
834 ni_buf[SSID_IE_LEN_INDEX] = cached_ssid_len;
835 ni_buf += (SSID_IE_LEN_INDEX + 1);
836
837 buf += (SSID_IE_LEN_INDEX + 1);
838 buf_len -= (SSID_IE_LEN_INDEX + 1);
839
840 memcpy(ni_buf, cached_ssid, cached_ssid_len);
841 ni_buf += cached_ssid_len;
842
843 buf += beacon_ssid_len;
844 buf_len -= beacon_ssid_len;
845
846 if (cached_ssid_len > beacon_ssid_len)
847 buf_len -= (cached_ssid_len - beacon_ssid_len);
848
849 memcpy(ni_buf, buf, buf_len);
850 } else
851 memcpy(bss->ni_buf, buf, len);
852
853 bss->ni_framelen = len;
854
855 ret = ath6kl_wlan_parse_beacon(bss->ni_buf, len, &bss->ni_cie);
856 if (ret) {
857 wlan_node_free(bss);
858 return -EINVAL;
859 }
860
861 /*
862 * Update the frequency in ie_chan, overwriting of channel number
863 * which is done in ath6kl_wlan_parse_beacon
864 */
865 bss->ni_cie.ie_chan = le16_to_cpu(bih->ch);
866 wlan_setup_node(&wmi->scan_table, bss, bih->bssid);
867
868 return 0;
869}
870
871static int ath6kl_wmi_opt_frame_event_rx(struct wmi *wmi, u8 *datap, int len)
872{
873 struct bss *bss;
874 struct wmi_opt_rx_info_hdr *bih;
875 u8 *buf;
876
877 if (len <= sizeof(struct wmi_opt_rx_info_hdr))
878 return -EINVAL;
879
880 bih = (struct wmi_opt_rx_info_hdr *) datap;
881 buf = datap + sizeof(struct wmi_opt_rx_info_hdr);
882 len -= sizeof(struct wmi_opt_rx_info_hdr);
883
884 ath6kl_dbg(ATH6KL_DBG_WMI, "opt frame event %2.2x:%2.2x\n",
885 bih->bssid[4], bih->bssid[5]);
886
887 bss = wlan_find_node(&wmi->scan_table, bih->bssid);
888 if (bss != NULL) {
889 /* Free up the node. We are about to allocate a new node. */
890 wlan_node_reclaim(&wmi->scan_table, bss);
891 }
892
893 bss = wlan_node_alloc(len);
894 if (!bss)
895 return -ENOMEM;
896
897 bss->ni_snr = bih->snr;
898 bss->ni_cie.ie_chan = le16_to_cpu(bih->ch);
899
900 if (WARN_ON(!bss->ni_buf))
901 return -EINVAL;
902
903 memcpy(bss->ni_buf, buf, len);
904 wlan_setup_node(&wmi->scan_table, bss, bih->bssid);
905
906 return 0;
907}
908
909/* Inactivity timeout of a fatpipe(pstream) at the target */
910static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
911 int len)
912{
913 struct wmi_pstream_timeout_event *ev;
914
915 if (len < sizeof(struct wmi_pstream_timeout_event))
916 return -EINVAL;
917
918 ev = (struct wmi_pstream_timeout_event *) datap;
919
920 /*
921 * When the pstream (fat pipe == AC) timesout, it means there were
922 * no thinStreams within this pstream & it got implicitly created
923 * due to data flow on this AC. We start the inactivity timer only
924 * for implicitly created pstream. Just reset the host state.
925 */
926 spin_lock_bh(&wmi->lock);
927 wmi->stream_exist_for_ac[ev->traffic_class] = 0;
928 wmi->fat_pipe_exist &= ~(1 << ev->traffic_class);
929 spin_unlock_bh(&wmi->lock);
930
931 /* Indicate inactivity to driver layer for this fatpipe (pstream) */
932 ath6kl_indicate_tx_activity(wmi->parent_dev, ev->traffic_class, false);
933
934 return 0;
935}
936
937static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
938{
939 struct wmi_bit_rate_reply *reply;
940 s32 rate;
941 u32 sgi, index;
942
943 if (len < sizeof(struct wmi_bit_rate_reply))
944 return -EINVAL;
945
946 reply = (struct wmi_bit_rate_reply *) datap;
947
948 ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
949
950 if (reply->rate_index == (s8) RATE_AUTO) {
951 rate = RATE_AUTO;
952 } else {
953 index = reply->rate_index & 0x7f;
954 sgi = (reply->rate_index & 0x80) ? 1 : 0;
955 rate = wmi_rate_tbl[index][sgi];
956 }
957
958 ath6kl_wakeup_event(wmi->parent_dev);
959
960 return 0;
961}
962
963static int ath6kl_wmi_ratemask_reply_rx(struct wmi *wmi, u8 *datap, int len)
964{
965 if (len < sizeof(struct wmi_fix_rates_reply))
966 return -EINVAL;
967
968 ath6kl_wakeup_event(wmi->parent_dev);
969
970 return 0;
971}
972
973static int ath6kl_wmi_ch_list_reply_rx(struct wmi *wmi, u8 *datap, int len)
974{
975 if (len < sizeof(struct wmi_channel_list_reply))
976 return -EINVAL;
977
978 ath6kl_wakeup_event(wmi->parent_dev);
979
980 return 0;
981}
982
983static int ath6kl_wmi_tx_pwr_reply_rx(struct wmi *wmi, u8 *datap, int len)
984{
985 struct wmi_tx_pwr_reply *reply;
986
987 if (len < sizeof(struct wmi_tx_pwr_reply))
988 return -EINVAL;
989
990 reply = (struct wmi_tx_pwr_reply *) datap;
991 ath6kl_txpwr_rx_evt(wmi->parent_dev, reply->dbM);
992
993 return 0;
994}
995
996static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
997{
998 if (len < sizeof(struct wmi_get_keepalive_cmd))
999 return -EINVAL;
1000
1001 ath6kl_wakeup_event(wmi->parent_dev);
1002
1003 return 0;
1004}
1005
1006static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
1007{
1008 struct wmi_scan_complete_event *ev;
1009
1010 ev = (struct wmi_scan_complete_event *) datap;
1011
1012 if (a_sle32_to_cpu(ev->status) == 0)
1013 wlan_refresh_inactive_nodes(&wmi->scan_table);
1014
1015 ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
1016 wmi->is_probe_ssid = false;
1017
1018 return 0;
1019}
1020
1021/*
1022 * Target is reporting a programming error. This is for
1023 * developer aid only. Target only checks a few common violations
1024 * and it is responsibility of host to do all error checking.
1025 * Behavior of target after wmi error event is undefined.
1026 * A reset is recommended.
1027 */
1028static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
1029{
1030 const char *type = "unknown error";
1031 struct wmi_cmd_error_event *ev;
1032 ev = (struct wmi_cmd_error_event *) datap;
1033
1034 switch (ev->err_code) {
1035 case INVALID_PARAM:
1036 type = "invalid parameter";
1037 break;
1038 case ILLEGAL_STATE:
1039 type = "invalid state";
1040 break;
1041 case INTERNAL_ERROR:
1042 type = "internal error";
1043 break;
1044 }
1045
1046 ath6kl_dbg(ATH6KL_DBG_WMI, "programming error, cmd=%d %s\n",
1047 ev->cmd_id, type);
1048
1049 return 0;
1050}
1051
1052static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
1053{
1054 ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
1055
1056 return 0;
1057}
1058
1059static u8 ath6kl_wmi_get_upper_threshold(s16 rssi,
1060 struct sq_threshold_params *sq_thresh,
1061 u32 size)
1062{
1063 u32 index;
1064 u8 threshold = (u8) sq_thresh->upper_threshold[size - 1];
1065
1066 /* The list is already in sorted order. Get the next lower value */
1067 for (index = 0; index < size; index++) {
1068 if (rssi < sq_thresh->upper_threshold[index]) {
1069 threshold = (u8) sq_thresh->upper_threshold[index];
1070 break;
1071 }
1072 }
1073
1074 return threshold;
1075}
1076
1077static u8 ath6kl_wmi_get_lower_threshold(s16 rssi,
1078 struct sq_threshold_params *sq_thresh,
1079 u32 size)
1080{
1081 u32 index;
1082 u8 threshold = (u8) sq_thresh->lower_threshold[size - 1];
1083
1084 /* The list is already in sorted order. Get the next lower value */
1085 for (index = 0; index < size; index++) {
1086 if (rssi > sq_thresh->lower_threshold[index]) {
1087 threshold = (u8) sq_thresh->lower_threshold[index];
1088 break;
1089 }
1090 }
1091
1092 return threshold;
1093}
1094
1095static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
1096 struct wmi_rssi_threshold_params_cmd *rssi_cmd)
1097{
1098 struct sk_buff *skb;
1099 struct wmi_rssi_threshold_params_cmd *cmd;
1100
1101 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1102 if (!skb)
1103 return -ENOMEM;
1104
1105 cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
1106 memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
1107
1108 return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
1109 NO_SYNC_WMIFLAG);
1110}
1111
1112static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
1113 int len)
1114{
1115 struct wmi_rssi_threshold_event *reply;
1116 struct wmi_rssi_threshold_params_cmd cmd;
1117 struct sq_threshold_params *sq_thresh;
1118 enum wmi_rssi_threshold_val new_threshold;
1119 u8 upper_rssi_threshold, lower_rssi_threshold;
1120 s16 rssi;
1121 int ret;
1122
1123 if (len < sizeof(struct wmi_rssi_threshold_event))
1124 return -EINVAL;
1125
1126 reply = (struct wmi_rssi_threshold_event *) datap;
1127 new_threshold = (enum wmi_rssi_threshold_val) reply->range;
1128 rssi = a_sle16_to_cpu(reply->rssi);
1129
1130 sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_RSSI];
1131
1132 /*
1133 * Identify the threshold breached and communicate that to the app.
1134 * After that install a new set of thresholds based on the signal
1135 * quality reported by the target
1136 */
1137 if (new_threshold) {
1138 /* Upper threshold breached */
1139 if (rssi < sq_thresh->upper_threshold[0]) {
1140 ath6kl_dbg(ATH6KL_DBG_WMI,
1141 "spurious upper rssi threshold event: %d\n",
1142 rssi);
1143 } else if ((rssi < sq_thresh->upper_threshold[1]) &&
1144 (rssi >= sq_thresh->upper_threshold[0])) {
1145 new_threshold = WMI_RSSI_THRESHOLD1_ABOVE;
1146 } else if ((rssi < sq_thresh->upper_threshold[2]) &&
1147 (rssi >= sq_thresh->upper_threshold[1])) {
1148 new_threshold = WMI_RSSI_THRESHOLD2_ABOVE;
1149 } else if ((rssi < sq_thresh->upper_threshold[3]) &&
1150 (rssi >= sq_thresh->upper_threshold[2])) {
1151 new_threshold = WMI_RSSI_THRESHOLD3_ABOVE;
1152 } else if ((rssi < sq_thresh->upper_threshold[4]) &&
1153 (rssi >= sq_thresh->upper_threshold[3])) {
1154 new_threshold = WMI_RSSI_THRESHOLD4_ABOVE;
1155 } else if ((rssi < sq_thresh->upper_threshold[5]) &&
1156 (rssi >= sq_thresh->upper_threshold[4])) {
1157 new_threshold = WMI_RSSI_THRESHOLD5_ABOVE;
1158 } else if (rssi >= sq_thresh->upper_threshold[5]) {
1159 new_threshold = WMI_RSSI_THRESHOLD6_ABOVE;
1160 }
1161 } else {
1162 /* Lower threshold breached */
1163 if (rssi > sq_thresh->lower_threshold[0]) {
1164 ath6kl_dbg(ATH6KL_DBG_WMI,
1165 "spurious lower rssi threshold event: %d %d\n",
1166 rssi, sq_thresh->lower_threshold[0]);
1167 } else if ((rssi > sq_thresh->lower_threshold[1]) &&
1168 (rssi <= sq_thresh->lower_threshold[0])) {
1169 new_threshold = WMI_RSSI_THRESHOLD6_BELOW;
1170 } else if ((rssi > sq_thresh->lower_threshold[2]) &&
1171 (rssi <= sq_thresh->lower_threshold[1])) {
1172 new_threshold = WMI_RSSI_THRESHOLD5_BELOW;
1173 } else if ((rssi > sq_thresh->lower_threshold[3]) &&
1174 (rssi <= sq_thresh->lower_threshold[2])) {
1175 new_threshold = WMI_RSSI_THRESHOLD4_BELOW;
1176 } else if ((rssi > sq_thresh->lower_threshold[4]) &&
1177 (rssi <= sq_thresh->lower_threshold[3])) {
1178 new_threshold = WMI_RSSI_THRESHOLD3_BELOW;
1179 } else if ((rssi > sq_thresh->lower_threshold[5]) &&
1180 (rssi <= sq_thresh->lower_threshold[4])) {
1181 new_threshold = WMI_RSSI_THRESHOLD2_BELOW;
1182 } else if (rssi <= sq_thresh->lower_threshold[5]) {
1183 new_threshold = WMI_RSSI_THRESHOLD1_BELOW;
1184 }
1185 }
1186
1187 /* Calculate and install the next set of thresholds */
1188 lower_rssi_threshold = ath6kl_wmi_get_lower_threshold(rssi, sq_thresh,
1189 sq_thresh->lower_threshold_valid_count);
1190 upper_rssi_threshold = ath6kl_wmi_get_upper_threshold(rssi, sq_thresh,
1191 sq_thresh->upper_threshold_valid_count);
1192
1193 /* Issue a wmi command to install the thresholds */
1194 cmd.thresh_above1_val = a_cpu_to_sle16(upper_rssi_threshold);
1195 cmd.thresh_below1_val = a_cpu_to_sle16(lower_rssi_threshold);
1196 cmd.weight = sq_thresh->weight;
1197 cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
1198
1199 ret = ath6kl_wmi_send_rssi_threshold_params(wmi, &cmd);
1200 if (ret) {
1201 ath6kl_err("unable to configure rssi thresholds\n");
1202 return -EIO;
1203 }
1204
1205 return 0;
1206}
1207
1208static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
1209{
1210 struct wmi_cac_event *reply;
1211 struct ieee80211_tspec_ie *ts;
1212 u16 active_tsids, tsinfo;
1213 u8 tsid, index;
1214 u8 ts_id;
1215
1216 if (len < sizeof(struct wmi_cac_event))
1217 return -EINVAL;
1218
1219 reply = (struct wmi_cac_event *) datap;
1220
1221 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
1222 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
1223
1224 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1225 tsinfo = le16_to_cpu(ts->tsinfo);
1226 tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
1227 IEEE80211_WMM_IE_TSPEC_TID_MASK;
1228
1229 ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
1230 } else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
1231 /*
1232 * Following assumes that there is only one outstanding
1233 * ADDTS request when this event is received
1234 */
1235 spin_lock_bh(&wmi->lock);
1236 active_tsids = wmi->stream_exist_for_ac[reply->ac];
1237 spin_unlock_bh(&wmi->lock);
1238
1239 for (index = 0; index < sizeof(active_tsids) * 8; index++) {
1240 if ((active_tsids >> index) & 1)
1241 break;
1242 }
1243 if (index < (sizeof(active_tsids) * 8))
1244 ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
1245 }
1246
1247 /*
1248 * Clear active tsids and Add missing handling
1249 * for delete qos stream from AP
1250 */
1251 else if (reply->cac_indication == CAC_INDICATION_DELETE) {
1252
1253 ts = (struct ieee80211_tspec_ie *) &(reply->tspec_suggestion);
1254 tsinfo = le16_to_cpu(ts->tsinfo);
1255 ts_id = ((tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
1256 IEEE80211_WMM_IE_TSPEC_TID_MASK);
1257
1258 spin_lock_bh(&wmi->lock);
1259 wmi->stream_exist_for_ac[reply->ac] &= ~(1 << ts_id);
1260 active_tsids = wmi->stream_exist_for_ac[reply->ac];
1261 spin_unlock_bh(&wmi->lock);
1262
1263 /* Indicate stream inactivity to driver layer only if all tsids
1264 * within this AC are deleted.
1265 */
1266 if (!active_tsids) {
1267 ath6kl_indicate_tx_activity(wmi->parent_dev, reply->ac,
1268 false);
1269 wmi->fat_pipe_exist &= ~(1 << reply->ac);
1270 }
1271 }
1272
1273 return 0;
1274}
1275
1276static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
1277 struct wmi_snr_threshold_params_cmd *snr_cmd)
1278{
1279 struct sk_buff *skb;
1280 struct wmi_snr_threshold_params_cmd *cmd;
1281
1282 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1283 if (!skb)
1284 return -ENOMEM;
1285
1286 cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
1287 memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
1288
1289 return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
1290 NO_SYNC_WMIFLAG);
1291}
1292
1293static int ath6kl_wmi_snr_threshold_event_rx(struct wmi *wmi, u8 *datap,
1294 int len)
1295{
1296 struct wmi_snr_threshold_event *reply;
1297 struct sq_threshold_params *sq_thresh;
1298 struct wmi_snr_threshold_params_cmd cmd;
1299 enum wmi_snr_threshold_val new_threshold;
1300 u8 upper_snr_threshold, lower_snr_threshold;
1301 s16 snr;
1302 int ret;
1303
1304 if (len < sizeof(struct wmi_snr_threshold_event))
1305 return -EINVAL;
1306
1307 reply = (struct wmi_snr_threshold_event *) datap;
1308
1309 new_threshold = (enum wmi_snr_threshold_val) reply->range;
1310 snr = reply->snr;
1311
1312 sq_thresh = &wmi->sq_threshld[SIGNAL_QUALITY_METRICS_SNR];
1313
1314 /*
1315 * Identify the threshold breached and communicate that to the app.
1316 * After that install a new set of thresholds based on the signal
1317 * quality reported by the target.
1318 */
1319 if (new_threshold) {
1320 /* Upper threshold breached */
1321 if (snr < sq_thresh->upper_threshold[0]) {
1322 ath6kl_dbg(ATH6KL_DBG_WMI,
1323 "spurious upper snr threshold event: %d\n",
1324 snr);
1325 } else if ((snr < sq_thresh->upper_threshold[1]) &&
1326 (snr >= sq_thresh->upper_threshold[0])) {
1327 new_threshold = WMI_SNR_THRESHOLD1_ABOVE;
1328 } else if ((snr < sq_thresh->upper_threshold[2]) &&
1329 (snr >= sq_thresh->upper_threshold[1])) {
1330 new_threshold = WMI_SNR_THRESHOLD2_ABOVE;
1331 } else if ((snr < sq_thresh->upper_threshold[3]) &&
1332 (snr >= sq_thresh->upper_threshold[2])) {
1333 new_threshold = WMI_SNR_THRESHOLD3_ABOVE;
1334 } else if (snr >= sq_thresh->upper_threshold[3]) {
1335 new_threshold = WMI_SNR_THRESHOLD4_ABOVE;
1336 }
1337 } else {
1338 /* Lower threshold breached */
1339 if (snr > sq_thresh->lower_threshold[0]) {
1340 ath6kl_dbg(ATH6KL_DBG_WMI,
1341 "spurious lower snr threshold event: %d\n",
1342 sq_thresh->lower_threshold[0]);
1343 } else if ((snr > sq_thresh->lower_threshold[1]) &&
1344 (snr <= sq_thresh->lower_threshold[0])) {
1345 new_threshold = WMI_SNR_THRESHOLD4_BELOW;
1346 } else if ((snr > sq_thresh->lower_threshold[2]) &&
1347 (snr <= sq_thresh->lower_threshold[1])) {
1348 new_threshold = WMI_SNR_THRESHOLD3_BELOW;
1349 } else if ((snr > sq_thresh->lower_threshold[3]) &&
1350 (snr <= sq_thresh->lower_threshold[2])) {
1351 new_threshold = WMI_SNR_THRESHOLD2_BELOW;
1352 } else if (snr <= sq_thresh->lower_threshold[3]) {
1353 new_threshold = WMI_SNR_THRESHOLD1_BELOW;
1354 }
1355 }
1356
1357 /* Calculate and install the next set of thresholds */
1358 lower_snr_threshold = ath6kl_wmi_get_lower_threshold(snr, sq_thresh,
1359 sq_thresh->lower_threshold_valid_count);
1360 upper_snr_threshold = ath6kl_wmi_get_upper_threshold(snr, sq_thresh,
1361 sq_thresh->upper_threshold_valid_count);
1362
1363 /* Issue a wmi command to install the thresholds */
1364 cmd.thresh_above1_val = upper_snr_threshold;
1365 cmd.thresh_below1_val = lower_snr_threshold;
1366 cmd.weight = sq_thresh->weight;
1367 cmd.poll_time = cpu_to_le32(sq_thresh->polling_interval);
1368
1369 ath6kl_dbg(ATH6KL_DBG_WMI,
1370 "snr: %d, threshold: %d, lower: %d, upper: %d\n",
1371 snr, new_threshold,
1372 lower_snr_threshold, upper_snr_threshold);
1373
1374 ret = ath6kl_wmi_send_snr_threshold_params(wmi, &cmd);
1375 if (ret) {
1376 ath6kl_err("unable to configure snr threshold\n");
1377 return -EIO;
1378 }
1379
1380 return 0;
1381}
1382
1383static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
1384{
1385 u16 ap_info_entry_size;
1386 struct wmi_aplist_event *ev = (struct wmi_aplist_event *) datap;
1387 struct wmi_ap_info_v1 *ap_info_v1;
1388 u8 index;
1389
1390 if (len < sizeof(struct wmi_aplist_event) ||
1391 ev->ap_list_ver != APLIST_VER1)
1392 return -EINVAL;
1393
1394 ap_info_entry_size = sizeof(struct wmi_ap_info_v1);
1395 ap_info_v1 = (struct wmi_ap_info_v1 *) ev->ap_list;
1396
1397 ath6kl_dbg(ATH6KL_DBG_WMI,
1398 "number of APs in aplist event: %d\n", ev->num_ap);
1399
1400 if (len < (int) (sizeof(struct wmi_aplist_event) +
1401 (ev->num_ap - 1) * ap_info_entry_size))
1402 return -EINVAL;
1403
1404 /* AP list version 1 contents */
1405 for (index = 0; index < ev->num_ap; index++) {
1406 ath6kl_dbg(ATH6KL_DBG_WMI, "AP#%d BSSID %pM Channel %d\n",
1407 index, ap_info_v1->bssid, ap_info_v1->channel);
1408 ap_info_v1++;
1409 }
1410
1411 return 0;
1412}
1413
1414int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
1415 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
1416{
1417 struct wmi_cmd_hdr *cmd_hdr;
1418 enum htc_endpoint_id ep_id = wmi->ep_id;
1419 int ret;
1420
1421 if (WARN_ON(skb == NULL))
1422 return -EINVAL;
1423
1424 if (sync_flag >= END_WMIFLAG) {
1425 dev_kfree_skb(skb);
1426 return -EINVAL;
1427 }
1428
1429 if ((sync_flag == SYNC_BEFORE_WMIFLAG) ||
1430 (sync_flag == SYNC_BOTH_WMIFLAG)) {
1431 /*
1432 * Make sure all data currently queued is transmitted before
1433 * the cmd execution. Establish a new sync point.
1434 */
1435 ath6kl_wmi_sync_point(wmi);
1436 }
1437
1438 skb_push(skb, sizeof(struct wmi_cmd_hdr));
1439
1440 cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
1441 cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
1442 cmd_hdr->info1 = 0; /* added for virtual interface */
1443
1444 /* Only for OPT_TX_CMD, use BE endpoint. */
1445 if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
1446 ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
1447 false, false, 0, NULL);
1448 if (ret) {
1449 dev_kfree_skb(skb);
1450 return ret;
1451 }
1452 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, WMM_AC_BE);
1453 }
1454
1455 ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
1456
1457 if ((sync_flag == SYNC_AFTER_WMIFLAG) ||
1458 (sync_flag == SYNC_BOTH_WMIFLAG)) {
1459 /*
1460 * Make sure all new data queued waits for the command to
1461 * execute. Establish a new sync point.
1462 */
1463 ath6kl_wmi_sync_point(wmi);
1464 }
1465
1466 return 0;
1467}
1468
1469int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
1470 enum dot11_auth_mode dot11_auth_mode,
1471 enum auth_mode auth_mode,
1472 enum crypto_type pairwise_crypto,
1473 u8 pairwise_crypto_len,
1474 enum crypto_type group_crypto,
1475 u8 group_crypto_len, int ssid_len, u8 *ssid,
1476 u8 *bssid, u16 channel, u32 ctrl_flags)
1477{
1478 struct sk_buff *skb;
1479 struct wmi_connect_cmd *cc;
1480 int ret;
1481
1482 wmi->traffic_class = 100;
1483
1484 if ((pairwise_crypto == NONE_CRYPT) && (group_crypto != NONE_CRYPT))
1485 return -EINVAL;
1486
1487 if ((pairwise_crypto != NONE_CRYPT) && (group_crypto == NONE_CRYPT))
1488 return -EINVAL;
1489
1490 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_connect_cmd));
1491 if (!skb)
1492 return -ENOMEM;
1493
1494 cc = (struct wmi_connect_cmd *) skb->data;
1495
1496 if (ssid_len)
1497 memcpy(cc->ssid, ssid, ssid_len);
1498
1499 cc->ssid_len = ssid_len;
1500 cc->nw_type = nw_type;
1501 cc->dot11_auth_mode = dot11_auth_mode;
1502 cc->auth_mode = auth_mode;
1503 cc->prwise_crypto_type = pairwise_crypto;
1504 cc->prwise_crypto_len = pairwise_crypto_len;
1505 cc->grp_crypto_type = group_crypto;
1506 cc->grp_crypto_len = group_crypto_len;
1507 cc->ch = cpu_to_le16(channel);
1508 cc->ctrl_flags = cpu_to_le32(ctrl_flags);
1509
1510 if (bssid != NULL)
1511 memcpy(cc->bssid, bssid, ETH_ALEN);
1512
1513 wmi->pair_crypto_type = pairwise_crypto;
1514 wmi->grp_crypto_type = group_crypto;
1515
1516 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
1517
1518 return ret;
1519}
1520
1521int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
1522{
1523 struct sk_buff *skb;
1524 struct wmi_reconnect_cmd *cc;
1525 int ret;
1526
1527 wmi->traffic_class = 100;
1528
1529 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_reconnect_cmd));
1530 if (!skb)
1531 return -ENOMEM;
1532
1533 cc = (struct wmi_reconnect_cmd *) skb->data;
1534 cc->channel = cpu_to_le16(channel);
1535
1536 if (bssid != NULL)
1537 memcpy(cc->bssid, bssid, ETH_ALEN);
1538
1539 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
1540 NO_SYNC_WMIFLAG);
1541
1542 return ret;
1543}
1544
1545int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
1546{
1547 int ret;
1548
1549 wmi->traffic_class = 100;
1550
1551 /* Disconnect command does not need to do a SYNC before. */
1552 ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
1553
1554 return ret;
1555}
1556
1557int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
1558 u32 force_fgscan, u32 is_legacy,
1559 u32 home_dwell_time, u32 force_scan_interval,
1560 s8 num_chan, u16 *ch_list)
1561{
1562 struct sk_buff *skb;
1563 struct wmi_start_scan_cmd *sc;
1564 s8 size;
1565 int ret;
1566
1567 size = sizeof(struct wmi_start_scan_cmd);
1568
1569 if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
1570 return -EINVAL;
1571
1572 if (num_chan > WMI_MAX_CHANNELS)
1573 return -EINVAL;
1574
1575 if (num_chan)
1576 size += sizeof(u16) * (num_chan - 1);
1577
1578 skb = ath6kl_wmi_get_new_buf(size);
1579 if (!skb)
1580 return -ENOMEM;
1581
1582 sc = (struct wmi_start_scan_cmd *) skb->data;
1583 sc->scan_type = scan_type;
1584 sc->force_fg_scan = cpu_to_le32(force_fgscan);
1585 sc->is_legacy = cpu_to_le32(is_legacy);
1586 sc->home_dwell_time = cpu_to_le32(home_dwell_time);
1587 sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
1588 sc->num_ch = num_chan;
1589
1590 if (num_chan)
1591 memcpy(sc->ch_list, ch_list, num_chan * sizeof(u16));
1592
1593 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
1594 NO_SYNC_WMIFLAG);
1595
1596 return ret;
1597}
1598
1599int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
1600 u16 fg_end_sec, u16 bg_sec,
1601 u16 minact_chdw_msec, u16 maxact_chdw_msec,
1602 u16 pas_chdw_msec, u8 short_scan_ratio,
1603 u8 scan_ctrl_flag, u32 max_dfsch_act_time,
1604 u16 maxact_scan_per_ssid)
1605{
1606 struct sk_buff *skb;
1607 struct wmi_scan_params_cmd *sc;
1608 int ret;
1609
1610 skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
1611 if (!skb)
1612 return -ENOMEM;
1613
1614 sc = (struct wmi_scan_params_cmd *) skb->data;
1615 sc->fg_start_period = cpu_to_le16(fg_start_sec);
1616 sc->fg_end_period = cpu_to_le16(fg_end_sec);
1617 sc->bg_period = cpu_to_le16(bg_sec);
1618 sc->minact_chdwell_time = cpu_to_le16(minact_chdw_msec);
1619 sc->maxact_chdwell_time = cpu_to_le16(maxact_chdw_msec);
1620 sc->pas_chdwell_time = cpu_to_le16(pas_chdw_msec);
1621 sc->short_scan_ratio = short_scan_ratio;
1622 sc->scan_ctrl_flags = scan_ctrl_flag;
1623 sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
1624 sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
1625
1626 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
1627 NO_SYNC_WMIFLAG);
1628 return ret;
1629}
1630
1631int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
1632{
1633 struct sk_buff *skb;
1634 struct wmi_bss_filter_cmd *cmd;
1635 int ret;
1636
1637 if (filter >= LAST_BSS_FILTER)
1638 return -EINVAL;
1639
1640 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1641 if (!skb)
1642 return -ENOMEM;
1643
1644 cmd = (struct wmi_bss_filter_cmd *) skb->data;
1645 cmd->bss_filter = filter;
1646 cmd->ie_mask = cpu_to_le32(ie_mask);
1647
1648 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
1649 NO_SYNC_WMIFLAG);
1650 return ret;
1651}
1652
1653int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
1654 u8 ssid_len, u8 *ssid)
1655{
1656 struct sk_buff *skb;
1657 struct wmi_probed_ssid_cmd *cmd;
1658 int ret;
1659
1660 if (index > MAX_PROBED_SSID_INDEX)
1661 return -EINVAL;
1662
1663 if (ssid_len > sizeof(cmd->ssid))
1664 return -EINVAL;
1665
1666 if ((flag & (DISABLE_SSID_FLAG | ANY_SSID_FLAG)) && (ssid_len > 0))
1667 return -EINVAL;
1668
1669 if ((flag & SPECIFIC_SSID_FLAG) && !ssid_len)
1670 return -EINVAL;
1671
1672 if (flag & SPECIFIC_SSID_FLAG)
1673 wmi->is_probe_ssid = true;
1674
1675 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1676 if (!skb)
1677 return -ENOMEM;
1678
1679 cmd = (struct wmi_probed_ssid_cmd *) skb->data;
1680 cmd->entry_index = index;
1681 cmd->flag = flag;
1682 cmd->ssid_len = ssid_len;
1683 memcpy(cmd->ssid, ssid, ssid_len);
1684
1685 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
1686 NO_SYNC_WMIFLAG);
1687 return ret;
1688}
1689
1690int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
1691 u16 listen_beacons)
1692{
1693 struct sk_buff *skb;
1694 struct wmi_listen_int_cmd *cmd;
1695 int ret;
1696
1697 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1698 if (!skb)
1699 return -ENOMEM;
1700
1701 cmd = (struct wmi_listen_int_cmd *) skb->data;
1702 cmd->listen_intvl = cpu_to_le16(listen_interval);
1703 cmd->num_beacons = cpu_to_le16(listen_beacons);
1704
1705 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
1706 NO_SYNC_WMIFLAG);
1707 return ret;
1708}
1709
1710int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
1711{
1712 struct sk_buff *skb;
1713 struct wmi_power_mode_cmd *cmd;
1714 int ret;
1715
1716 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1717 if (!skb)
1718 return -ENOMEM;
1719
1720 cmd = (struct wmi_power_mode_cmd *) skb->data;
1721 cmd->pwr_mode = pwr_mode;
1722 wmi->pwr_mode = pwr_mode;
1723
1724 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
1725 NO_SYNC_WMIFLAG);
1726 return ret;
1727}
1728
1729int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
1730 u16 ps_poll_num, u16 dtim_policy,
1731 u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
1732 u16 ps_fail_event_policy)
1733{
1734 struct sk_buff *skb;
1735 struct wmi_power_params_cmd *pm;
1736 int ret;
1737
1738 skb = ath6kl_wmi_get_new_buf(sizeof(*pm));
1739 if (!skb)
1740 return -ENOMEM;
1741
1742 pm = (struct wmi_power_params_cmd *)skb->data;
1743 pm->idle_period = cpu_to_le16(idle_period);
1744 pm->pspoll_number = cpu_to_le16(ps_poll_num);
1745 pm->dtim_policy = cpu_to_le16(dtim_policy);
1746 pm->tx_wakeup_policy = cpu_to_le16(tx_wakeup_policy);
1747 pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
1748 pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
1749
1750 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
1751 NO_SYNC_WMIFLAG);
1752 return ret;
1753}
1754
1755int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
1756{
1757 struct sk_buff *skb;
1758 struct wmi_disc_timeout_cmd *cmd;
1759 int ret;
1760
1761 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1762 if (!skb)
1763 return -ENOMEM;
1764
1765 cmd = (struct wmi_disc_timeout_cmd *) skb->data;
1766 cmd->discon_timeout = timeout;
1767
1768 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
1769 NO_SYNC_WMIFLAG);
1770 return ret;
1771}
1772
1773int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
1774 enum crypto_type key_type,
1775 u8 key_usage, u8 key_len,
1776 u8 *key_rsc, u8 *key_material,
1777 u8 key_op_ctrl, u8 *mac_addr,
1778 enum wmi_sync_flag sync_flag)
1779{
1780 struct sk_buff *skb;
1781 struct wmi_add_cipher_key_cmd *cmd;
1782 int ret;
1783
1784 if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
1785 (key_material == NULL))
1786 return -EINVAL;
1787
1788 if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
1789 return -EINVAL;
1790
1791 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1792 if (!skb)
1793 return -ENOMEM;
1794
1795 cmd = (struct wmi_add_cipher_key_cmd *) skb->data;
1796 cmd->key_index = key_index;
1797 cmd->key_type = key_type;
1798 cmd->key_usage = key_usage;
1799 cmd->key_len = key_len;
1800 memcpy(cmd->key, key_material, key_len);
1801
1802 if (key_rsc != NULL)
1803 memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
1804
1805 cmd->key_op_ctrl = key_op_ctrl;
1806
1807 if (mac_addr)
1808 memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
1809
1810 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
1811 sync_flag);
1812
1813 return ret;
1814}
1815
1816int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
1817{
1818 struct sk_buff *skb;
1819 struct wmi_add_krk_cmd *cmd;
1820 int ret;
1821
1822 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1823 if (!skb)
1824 return -ENOMEM;
1825
1826 cmd = (struct wmi_add_krk_cmd *) skb->data;
1827 memcpy(cmd->krk, krk, WMI_KRK_LEN);
1828
1829 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
1830
1831 return ret;
1832}
1833
1834int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
1835{
1836 struct sk_buff *skb;
1837 struct wmi_delete_cipher_key_cmd *cmd;
1838 int ret;
1839
1840 if (key_index > WMI_MAX_KEY_INDEX)
1841 return -EINVAL;
1842
1843 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1844 if (!skb)
1845 return -ENOMEM;
1846
1847 cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
1848 cmd->key_index = key_index;
1849
1850 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
1851 NO_SYNC_WMIFLAG);
1852
1853 return ret;
1854}
1855
1856int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
1857 const u8 *pmkid, bool set)
1858{
1859 struct sk_buff *skb;
1860 struct wmi_setpmkid_cmd *cmd;
1861 int ret;
1862
1863 if (bssid == NULL)
1864 return -EINVAL;
1865
1866 if (set && pmkid == NULL)
1867 return -EINVAL;
1868
1869 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1870 if (!skb)
1871 return -ENOMEM;
1872
1873 cmd = (struct wmi_setpmkid_cmd *) skb->data;
1874 memcpy(cmd->bssid, bssid, ETH_ALEN);
1875 if (set) {
1876 memcpy(cmd->pmkid, pmkid, sizeof(cmd->pmkid));
1877 cmd->enable = PMKID_ENABLE;
1878 } else {
1879 memset(cmd->pmkid, 0, sizeof(cmd->pmkid));
1880 cmd->enable = PMKID_DISABLE;
1881 }
1882
1883 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
1884 NO_SYNC_WMIFLAG);
1885
1886 return ret;
1887}
1888
1889static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
1890 enum htc_endpoint_id ep_id)
1891{
1892 struct wmi_data_hdr *data_hdr;
1893 int ret;
1894
1895 if (WARN_ON(skb == NULL || ep_id == wmi->ep_id))
1896 return -EINVAL;
1897
1898 skb_push(skb, sizeof(struct wmi_data_hdr));
1899
1900 data_hdr = (struct wmi_data_hdr *) skb->data;
1901 data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
1902 data_hdr->info3 = 0;
1903
1904 ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
1905
1906 return ret;
1907}
1908
1909static int ath6kl_wmi_sync_point(struct wmi *wmi)
1910{
1911 struct sk_buff *skb;
1912 struct wmi_sync_cmd *cmd;
1913 struct wmi_data_sync_bufs data_sync_bufs[WMM_NUM_AC];
1914 enum htc_endpoint_id ep_id;
1915 u8 index, num_pri_streams = 0;
1916 int ret = 0;
1917
1918 memset(data_sync_bufs, 0, sizeof(data_sync_bufs));
1919
1920 spin_lock_bh(&wmi->lock);
1921
1922 for (index = 0; index < WMM_NUM_AC; index++) {
1923 if (wmi->fat_pipe_exist & (1 << index)) {
1924 num_pri_streams++;
1925 data_sync_bufs[num_pri_streams - 1].traffic_class =
1926 index;
1927 }
1928 }
1929
1930 spin_unlock_bh(&wmi->lock);
1931
1932 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1933 if (!skb) {
1934 ret = -ENOMEM;
1935 goto free_skb;
1936 }
1937
1938 cmd = (struct wmi_sync_cmd *) skb->data;
1939
1940 /*
1941 * In the SYNC cmd sent on the control Ep, send a bitmap
1942 * of the data eps on which the Data Sync will be sent
1943 */
1944 cmd->data_sync_map = wmi->fat_pipe_exist;
1945
1946 for (index = 0; index < num_pri_streams; index++) {
1947 data_sync_bufs[index].skb = ath6kl_buf_alloc(0);
1948 if (data_sync_bufs[index].skb == NULL) {
1949 ret = -ENOMEM;
1950 break;
1951 }
1952 }
1953
1954 /*
1955 * If buffer allocation for any of the dataSync fails,
1956 * then do not send the Synchronize cmd on the control ep
1957 */
1958 if (ret)
1959 goto free_skb;
1960
1961 /*
1962 * Send sync cmd followed by sync data messages on all
1963 * endpoints being used
1964 */
1965 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
1966 NO_SYNC_WMIFLAG);
1967
1968 if (ret)
1969 goto free_skb;
1970
1971 /* cmd buffer sent, we no longer own it */
1972 skb = NULL;
1973
1974 for (index = 0; index < num_pri_streams; index++) {
1975
1976 if (WARN_ON(!data_sync_bufs[index].skb))
1977 break;
1978
1979 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
1980 data_sync_bufs[index].
1981 traffic_class);
1982 ret =
1983 ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
1984 ep_id);
1985
1986 if (ret)
1987 break;
1988
1989 data_sync_bufs[index].skb = NULL;
1990 }
1991
1992free_skb:
1993 /* free up any resources left over (possibly due to an error) */
1994 if (skb)
1995 dev_kfree_skb(skb);
1996
1997 for (index = 0; index < num_pri_streams; index++) {
1998 if (data_sync_bufs[index].skb != NULL) {
1999 dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
2000 skb);
2001 }
2002 }
2003
2004 return ret;
2005}
2006
2007int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
2008 struct wmi_create_pstream_cmd *params)
2009{
2010 struct sk_buff *skb;
2011 struct wmi_create_pstream_cmd *cmd;
2012 u8 fatpipe_exist_for_ac = 0;
2013 s32 min_phy = 0;
2014 s32 nominal_phy = 0;
2015 int ret;
2016
2017 if (!((params->user_pri < 8) &&
2018 (params->user_pri <= 0x7) &&
2019 (up_to_ac[params->user_pri & 0x7] == params->traffic_class) &&
2020 (params->traffic_direc == UPLINK_TRAFFIC ||
2021 params->traffic_direc == DNLINK_TRAFFIC ||
2022 params->traffic_direc == BIDIR_TRAFFIC) &&
2023 (params->traffic_type == TRAFFIC_TYPE_APERIODIC ||
2024 params->traffic_type == TRAFFIC_TYPE_PERIODIC) &&
2025 (params->voice_psc_cap == DISABLE_FOR_THIS_AC ||
2026 params->voice_psc_cap == ENABLE_FOR_THIS_AC ||
2027 params->voice_psc_cap == ENABLE_FOR_ALL_AC) &&
2028 (params->tsid == WMI_IMPLICIT_PSTREAM ||
2029 params->tsid <= WMI_MAX_THINSTREAM))) {
2030 return -EINVAL;
2031 }
2032
2033 /*
2034 * Check nominal PHY rate is >= minimalPHY,
2035 * so that DUT can allow TSRS IE
2036 */
2037
2038 /* Get the physical rate (units of bps) */
2039 min_phy = ((le32_to_cpu(params->min_phy_rate) / 1000) / 1000);
2040
2041 /* Check minimal phy < nominal phy rate */
2042 if (params->nominal_phy >= min_phy) {
2043 /* unit of 500 kbps */
2044 nominal_phy = (params->nominal_phy * 1000) / 500;
2045 ath6kl_dbg(ATH6KL_DBG_WMI,
2046 "TSRS IE enabled::MinPhy %x->NominalPhy ===> %x\n",
2047 min_phy, nominal_phy);
2048
2049 params->nominal_phy = nominal_phy;
2050 } else {
2051 params->nominal_phy = 0;
2052 }
2053
2054 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2055 if (!skb)
2056 return -ENOMEM;
2057
2058 ath6kl_dbg(ATH6KL_DBG_WMI,
2059 "sending create_pstream_cmd: ac=%d tsid:%d\n",
2060 params->traffic_class, params->tsid);
2061
2062 cmd = (struct wmi_create_pstream_cmd *) skb->data;
2063 memcpy(cmd, params, sizeof(*cmd));
2064
2065 /* This is an implicitly created Fat pipe */
2066 if ((u32) params->tsid == (u32) WMI_IMPLICIT_PSTREAM) {
2067 spin_lock_bh(&wmi->lock);
2068 fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
2069 (1 << params->traffic_class));
2070 wmi->fat_pipe_exist |= (1 << params->traffic_class);
2071 spin_unlock_bh(&wmi->lock);
2072 } else {
2073 /* explicitly created thin stream within a fat pipe */
2074 spin_lock_bh(&wmi->lock);
2075 fatpipe_exist_for_ac = (wmi->fat_pipe_exist &
2076 (1 << params->traffic_class));
2077 wmi->stream_exist_for_ac[params->traffic_class] |=
2078 (1 << params->tsid);
2079 /*
2080 * If a thinstream becomes active, the fat pipe automatically
2081 * becomes active
2082 */
2083 wmi->fat_pipe_exist |= (1 << params->traffic_class);
2084 spin_unlock_bh(&wmi->lock);
2085 }
2086
2087 /*
2088 * Indicate activty change to driver layer only if this is the
2089 * first TSID to get created in this AC explicitly or an implicit
2090 * fat pipe is getting created.
2091 */
2092 if (!fatpipe_exist_for_ac)
2093 ath6kl_indicate_tx_activity(wmi->parent_dev,
2094 params->traffic_class, true);
2095
2096 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
2097 NO_SYNC_WMIFLAG);
2098 return ret;
2099}
2100
2101int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
2102{
2103 struct sk_buff *skb;
2104 struct wmi_delete_pstream_cmd *cmd;
2105 u16 active_tsids = 0;
2106 int ret;
2107
2108 if (traffic_class > 3) {
2109 ath6kl_err("invalid traffic class: %d\n", traffic_class);
2110 return -EINVAL;
2111 }
2112
2113 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2114 if (!skb)
2115 return -ENOMEM;
2116
2117 cmd = (struct wmi_delete_pstream_cmd *) skb->data;
2118 cmd->traffic_class = traffic_class;
2119 cmd->tsid = tsid;
2120
2121 spin_lock_bh(&wmi->lock);
2122 active_tsids = wmi->stream_exist_for_ac[traffic_class];
2123 spin_unlock_bh(&wmi->lock);
2124
2125 if (!(active_tsids & (1 << tsid))) {
2126 dev_kfree_skb(skb);
2127 ath6kl_dbg(ATH6KL_DBG_WMI,
2128 "TSID %d doesn't exist for traffic class: %d\n",
2129 tsid, traffic_class);
2130 return -ENODATA;
2131 }
2132
2133 ath6kl_dbg(ATH6KL_DBG_WMI,
2134 "sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
2135 traffic_class, tsid);
2136
2137 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
2138 SYNC_BEFORE_WMIFLAG);
2139
2140 spin_lock_bh(&wmi->lock);
2141 wmi->stream_exist_for_ac[traffic_class] &= ~(1 << tsid);
2142 active_tsids = wmi->stream_exist_for_ac[traffic_class];
2143 spin_unlock_bh(&wmi->lock);
2144
2145 /*
2146 * Indicate stream inactivity to driver layer only if all tsids
2147 * within this AC are deleted.
2148 */
2149 if (!active_tsids) {
2150 ath6kl_indicate_tx_activity(wmi->parent_dev,
2151 traffic_class, false);
2152 wmi->fat_pipe_exist &= ~(1 << traffic_class);
2153 }
2154
2155 return ret;
2156}
2157
2158int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
2159{
2160 struct sk_buff *skb;
2161 struct wmi_set_ip_cmd *cmd;
2162 int ret;
2163
2164 /* Multicast address are not valid */
2165 if ((*((u8 *) &ip_cmd->ips[0]) >= 0xE0) ||
2166 (*((u8 *) &ip_cmd->ips[1]) >= 0xE0))
2167 return -EINVAL;
2168
2169 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_ip_cmd));
2170 if (!skb)
2171 return -ENOMEM;
2172
2173 cmd = (struct wmi_set_ip_cmd *) skb->data;
2174 memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
2175
2176 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
2177 return ret;
2178}
2179
2180static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
2181 int len)
2182{
2183 if (len < sizeof(struct wmi_get_wow_list_reply))
2184 return -EINVAL;
2185
2186 return 0;
2187}
2188
2189static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
2190 enum wmix_command_id cmd_id,
2191 enum wmi_sync_flag sync_flag)
2192{
2193 struct wmix_cmd_hdr *cmd_hdr;
2194 int ret;
2195
2196 skb_push(skb, sizeof(struct wmix_cmd_hdr));
2197
2198 cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
2199 cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
2200
2201 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
2202
2203 return ret;
2204}
2205
2206int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source)
2207{
2208 struct sk_buff *skb;
2209 struct wmix_hb_challenge_resp_cmd *cmd;
2210 int ret;
2211
2212 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2213 if (!skb)
2214 return -ENOMEM;
2215
2216 cmd = (struct wmix_hb_challenge_resp_cmd *) skb->data;
2217 cmd->cookie = cpu_to_le32(cookie);
2218 cmd->source = cpu_to_le32(source);
2219
2220 ret = ath6kl_wmi_cmd_send_xtnd(wmi, skb, WMIX_HB_CHALLENGE_RESP_CMDID,
2221 NO_SYNC_WMIFLAG);
2222 return ret;
2223}
2224
2225int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
2226{
2227 return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
2228}
2229
2230int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
2231{
2232 struct sk_buff *skb;
2233 struct wmi_set_tx_pwr_cmd *cmd;
2234 int ret;
2235
2236 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_tx_pwr_cmd));
2237 if (!skb)
2238 return -ENOMEM;
2239
2240 cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
2241 cmd->dbM = dbM;
2242
2243 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
2244 NO_SYNC_WMIFLAG);
2245
2246 return ret;
2247}
2248
2249int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
2250{
2251 return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
2252}
2253
2254void ath6kl_wmi_get_current_bssid(struct wmi *wmi, u8 *bssid)
2255{
2256 if (bssid)
2257 memcpy(bssid, wmi->bssid, ETH_ALEN);
2258}
2259
2260int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
2261{
2262 struct sk_buff *skb;
2263 struct wmi_set_lpreamble_cmd *cmd;
2264 int ret;
2265
2266 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_lpreamble_cmd));
2267 if (!skb)
2268 return -ENOMEM;
2269
2270 cmd = (struct wmi_set_lpreamble_cmd *) skb->data;
2271 cmd->status = status;
2272 cmd->preamble_policy = preamble_policy;
2273
2274 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
2275 NO_SYNC_WMIFLAG);
2276 return ret;
2277}
2278
2279int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
2280{
2281 struct sk_buff *skb;
2282 struct wmi_set_rts_cmd *cmd;
2283 int ret;
2284
2285 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_rts_cmd));
2286 if (!skb)
2287 return -ENOMEM;
2288
2289 cmd = (struct wmi_set_rts_cmd *) skb->data;
2290 cmd->threshold = cpu_to_le16(threshold);
2291
2292 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
2293 return ret;
2294}
2295
2296int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
2297{
2298 struct sk_buff *skb;
2299 struct wmi_set_wmm_txop_cmd *cmd;
2300 int ret;
2301
2302 if (!((cfg == WMI_TXOP_DISABLED) || (cfg == WMI_TXOP_ENABLED)))
2303 return -EINVAL;
2304
2305 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_set_wmm_txop_cmd));
2306 if (!skb)
2307 return -ENOMEM;
2308
2309 cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
2310 cmd->txop_enable = cfg;
2311
2312 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
2313 NO_SYNC_WMIFLAG);
2314 return ret;
2315}
2316
2317int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
2318{
2319 struct sk_buff *skb;
2320 struct wmi_set_keepalive_cmd *cmd;
2321 int ret;
2322
2323 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2324 if (!skb)
2325 return -ENOMEM;
2326
2327 cmd = (struct wmi_set_keepalive_cmd *) skb->data;
2328 cmd->keep_alive_intvl = keep_alive_intvl;
2329 wmi->keep_alive_intvl = keep_alive_intvl;
2330
2331 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
2332 NO_SYNC_WMIFLAG);
2333 return ret;
2334}
2335
2336s32 ath6kl_wmi_get_rate(s8 rate_index)
2337{
2338 if (rate_index == RATE_AUTO)
2339 return 0;
2340
2341 return wmi_rate_tbl[(u32) rate_index][0];
2342}
2343
2344void ath6kl_wmi_node_return(struct wmi *wmi, struct bss *bss)
2345{
2346 if (bss)
2347 wlan_node_return(&wmi->scan_table, bss);
2348}
2349
2350struct bss *ath6kl_wmi_find_ssid_node(struct wmi *wmi, u8 * ssid,
2351 u32 ssid_len, bool is_wpa2,
2352 bool match_ssid)
2353{
2354 struct bss *node = NULL;
2355
2356 node = wlan_find_ssid_node(&wmi->scan_table, ssid,
2357 ssid_len, is_wpa2, match_ssid);
2358 return node;
2359}
2360
2361struct bss *ath6kl_wmi_find_node(struct wmi *wmi, const u8 * mac_addr)
2362{
2363 struct bss *ni = NULL;
2364
2365 ni = wlan_find_node(&wmi->scan_table, mac_addr);
2366
2367 return ni;
2368}
2369
2370void ath6kl_wmi_node_free(struct wmi *wmi, const u8 * mac_addr)
2371{
2372 struct bss *ni = NULL;
2373
2374 ni = wlan_find_node(&wmi->scan_table, mac_addr);
2375 if (ni != NULL)
2376 wlan_node_reclaim(&wmi->scan_table, ni);
2377
2378 return;
2379}
2380
2381static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
2382 u32 len)
2383{
2384 struct wmi_pmkid_list_reply *reply;
2385 u32 expected_len;
2386
2387 if (len < sizeof(struct wmi_pmkid_list_reply))
2388 return -EINVAL;
2389
2390 reply = (struct wmi_pmkid_list_reply *)datap;
2391 expected_len = sizeof(reply->num_pmkid) +
2392 le32_to_cpu(reply->num_pmkid) * WMI_PMKID_LEN;
2393
2394 if (len < expected_len)
2395 return -EINVAL;
2396
2397 return 0;
2398}
2399
2400static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
2401{
2402 struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
2403
2404 aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
2405 le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
2406
2407 return 0;
2408}
2409
2410static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
2411{
2412 struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
2413
2414 aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
2415
2416 return 0;
2417}
2418
2419/* AP mode functions */
2420static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
2421{
2422 struct wmi_pspoll_event *ev;
2423
2424 if (len < sizeof(struct wmi_pspoll_event))
2425 return -EINVAL;
2426
2427 ev = (struct wmi_pspoll_event *) datap;
2428
2429 ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
2430
2431 return 0;
2432}
2433
2434static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
2435{
2436 ath6kl_dtimexpiry_event(wmi->parent_dev);
2437
2438 return 0;
2439}
2440
2441int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
2442{
2443 struct sk_buff *skb;
2444 struct wmi_ap_set_pvb_cmd *cmd;
2445 int ret;
2446
2447 skb = ath6kl_wmi_get_new_buf(sizeof(struct wmi_ap_set_pvb_cmd));
2448 if (!skb)
2449 return -ENOMEM;
2450
2451 cmd = (struct wmi_ap_set_pvb_cmd *) skb->data;
2452 cmd->aid = cpu_to_le16(aid);
2453 cmd->flag = cpu_to_le32(flag);
2454
2455 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
2456 NO_SYNC_WMIFLAG);
2457
2458 return 0;
2459}
2460
2461int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
2462 bool rx_dot11_hdr, bool defrag_on_host)
2463{
2464 struct sk_buff *skb;
2465 struct wmi_rx_frame_format_cmd *cmd;
2466 int ret;
2467
2468 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2469 if (!skb)
2470 return -ENOMEM;
2471
2472 cmd = (struct wmi_rx_frame_format_cmd *) skb->data;
2473 cmd->dot11_hdr = rx_dot11_hdr ? 1 : 0;
2474 cmd->defrag_on_host = defrag_on_host ? 1 : 0;
2475 cmd->meta_ver = rx_meta_ver;
2476
2477 /* Delete the local aggr state, on host */
2478 ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
2479 NO_SYNC_WMIFLAG);
2480
2481 return ret;
2482}
2483
2484static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
2485{
2486 struct wmix_cmd_hdr *cmd;
2487 u32 len;
2488 u16 id;
2489 u8 *datap;
2490 int ret = 0;
2491
2492 if (skb->len < sizeof(struct wmix_cmd_hdr)) {
2493 ath6kl_err("bad packet 1\n");
2494 wmi->stat.cmd_len_err++;
2495 return -EINVAL;
2496 }
2497
2498 cmd = (struct wmix_cmd_hdr *) skb->data;
2499 id = le32_to_cpu(cmd->cmd_id);
2500
2501 skb_pull(skb, sizeof(struct wmix_cmd_hdr));
2502
2503 datap = skb->data;
2504 len = skb->len;
2505
2506 switch (id) {
2507 case WMIX_HB_CHALLENGE_RESP_EVENTID:
2508 break;
2509 case WMIX_DBGLOG_EVENTID:
2510 break;
2511 default:
2512 ath6kl_err("unknown cmd id 0x%x\n", id);
2513 wmi->stat.cmd_id_err++;
2514 ret = -EINVAL;
2515 break;
2516 }
2517
2518 return ret;
2519}
2520
2521/* Control Path */
2522int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
2523{
2524 struct wmi_cmd_hdr *cmd;
2525 u32 len;
2526 u16 id;
2527 u8 *datap;
2528 int ret = 0;
2529
2530 if (WARN_ON(skb == NULL))
2531 return -EINVAL;
2532
2533 if (skb->len < sizeof(struct wmi_cmd_hdr)) {
2534 ath6kl_err("bad packet 1\n");
2535 dev_kfree_skb(skb);
2536 wmi->stat.cmd_len_err++;
2537 return -EINVAL;
2538 }
2539
2540 cmd = (struct wmi_cmd_hdr *) skb->data;
2541 id = le16_to_cpu(cmd->cmd_id);
2542
2543 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
2544
2545 datap = skb->data;
2546 len = skb->len;
2547
2548 ath6kl_dbg(ATH6KL_DBG_WMI, "%s: wmi id: %d\n", __func__, id);
2549 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "msg payload ", datap, len);
2550
2551 switch (id) {
2552 case WMI_GET_BITRATE_CMDID:
2553 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
2554 ret = ath6kl_wmi_bitrate_reply_rx(wmi, datap, len);
2555 break;
2556 case WMI_GET_CHANNEL_LIST_CMDID:
2557 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_CHANNEL_LIST_CMDID\n");
2558 ret = ath6kl_wmi_ch_list_reply_rx(wmi, datap, len);
2559 break;
2560 case WMI_GET_TX_PWR_CMDID:
2561 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_TX_PWR_CMDID\n");
2562 ret = ath6kl_wmi_tx_pwr_reply_rx(wmi, datap, len);
2563 break;
2564 case WMI_READY_EVENTID:
2565 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_READY_EVENTID\n");
2566 ret = ath6kl_wmi_ready_event_rx(wmi, datap, len);
2567 break;
2568 case WMI_CONNECT_EVENTID:
2569 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
2570 ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
2571 break;
2572 case WMI_DISCONNECT_EVENTID:
2573 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
2574 ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
2575 break;
2576 case WMI_PEER_NODE_EVENTID:
2577 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
2578 ret = ath6kl_wmi_peer_node_event_rx(wmi, datap, len);
2579 break;
2580 case WMI_TKIP_MICERR_EVENTID:
2581 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
2582 ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
2583 break;
2584 case WMI_BSSINFO_EVENTID:
2585 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
2586 ath6kl_wmi_convert_bssinfo_hdr2_to_hdr(skb, datap);
2587 ret = ath6kl_wmi_bssinfo_event_rx(wmi, skb->data, skb->len);
2588 break;
2589 case WMI_REGDOMAIN_EVENTID:
2590 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
2591 break;
2592 case WMI_PSTREAM_TIMEOUT_EVENTID:
2593 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSTREAM_TIMEOUT_EVENTID\n");
2594 ret = ath6kl_wmi_pstream_timeout_event_rx(wmi, datap, len);
2595 break;
2596 case WMI_NEIGHBOR_REPORT_EVENTID:
2597 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
2598 break;
2599 case WMI_SCAN_COMPLETE_EVENTID:
2600 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
2601 ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
2602 break;
2603 case WMI_CMDERROR_EVENTID:
2604 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
2605 ret = ath6kl_wmi_error_event_rx(wmi, datap, len);
2606 break;
2607 case WMI_REPORT_STATISTICS_EVENTID:
2608 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
2609 ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
2610 break;
2611 case WMI_RSSI_THRESHOLD_EVENTID:
2612 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
2613 ret = ath6kl_wmi_rssi_threshold_event_rx(wmi, datap, len);
2614 break;
2615 case WMI_ERROR_REPORT_EVENTID:
2616 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ERROR_REPORT_EVENTID\n");
2617 break;
2618 case WMI_OPT_RX_FRAME_EVENTID:
2619 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_OPT_RX_FRAME_EVENTID\n");
2620 ret = ath6kl_wmi_opt_frame_event_rx(wmi, datap, len);
2621 break;
2622 case WMI_REPORT_ROAM_TBL_EVENTID:
2623 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
2624 break;
2625 case WMI_EXTENSION_EVENTID:
2626 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
2627 ret = ath6kl_wmi_control_rx_xtnd(wmi, skb);
2628 break;
2629 case WMI_CAC_EVENTID:
2630 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
2631 ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
2632 break;
2633 case WMI_CHANNEL_CHANGE_EVENTID:
2634 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
2635 break;
2636 case WMI_REPORT_ROAM_DATA_EVENTID:
2637 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_DATA_EVENTID\n");
2638 break;
2639 case WMI_GET_FIXRATES_CMDID:
2640 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_FIXRATES_CMDID\n");
2641 ret = ath6kl_wmi_ratemask_reply_rx(wmi, datap, len);
2642 break;
2643 case WMI_TX_RETRY_ERR_EVENTID:
2644 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_RETRY_ERR_EVENTID\n");
2645 break;
2646 case WMI_SNR_THRESHOLD_EVENTID:
2647 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SNR_THRESHOLD_EVENTID\n");
2648 ret = ath6kl_wmi_snr_threshold_event_rx(wmi, datap, len);
2649 break;
2650 case WMI_LQ_THRESHOLD_EVENTID:
2651 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_LQ_THRESHOLD_EVENTID\n");
2652 break;
2653 case WMI_APLIST_EVENTID:
2654 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_APLIST_EVENTID\n");
2655 ret = ath6kl_wmi_aplist_event_rx(wmi, datap, len);
2656 break;
2657 case WMI_GET_KEEPALIVE_CMDID:
2658 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_KEEPALIVE_CMDID\n");
2659 ret = ath6kl_wmi_keepalive_reply_rx(wmi, datap, len);
2660 break;
2661 case WMI_GET_WOW_LIST_EVENTID:
2662 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
2663 ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
2664 break;
2665 case WMI_GET_PMKID_LIST_EVENTID:
2666 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
2667 ret = ath6kl_wmi_get_pmkid_list_event_rx(wmi, datap, len);
2668 break;
2669 case WMI_PSPOLL_EVENTID:
2670 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
2671 ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
2672 break;
2673 case WMI_DTIMEXPIRY_EVENTID:
2674 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
2675 ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
2676 break;
2677 case WMI_SET_PARAMS_REPLY_EVENTID:
2678 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
2679 break;
2680 case WMI_ADDBA_REQ_EVENTID:
2681 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
2682 ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
2683 break;
2684 case WMI_ADDBA_RESP_EVENTID:
2685 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
2686 break;
2687 case WMI_DELBA_REQ_EVENTID:
2688 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
2689 ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
2690 break;
2691 case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
2692 ath6kl_dbg(ATH6KL_DBG_WMI,
2693 "WMI_REPORT_BTCOEX_CONFIG_EVENTID\n");
2694 break;
2695 case WMI_REPORT_BTCOEX_STATS_EVENTID:
2696 ath6kl_dbg(ATH6KL_DBG_WMI,
2697 "WMI_REPORT_BTCOEX_STATS_EVENTID\n");
2698 break;
2699 case WMI_TX_COMPLETE_EVENTID:
2700 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_COMPLETE_EVENTID\n");
2701 ret = ath6kl_wmi_tx_complete_event_rx(datap, len);
2702 break;
2703 default:
2704 ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
2705 wmi->stat.cmd_id_err++;
2706 ret = -EINVAL;
2707 break;
2708 }
2709
2710 dev_kfree_skb(skb);
2711
2712 return ret;
2713}
2714
2715static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
2716{
2717 if (!wmi)
2718 return;
2719
2720 spin_lock_bh(&wmi->lock);
2721
2722 wmi->fat_pipe_exist = 0;
2723 memset(wmi->stream_exist_for_ac, 0, sizeof(wmi->stream_exist_for_ac));
2724
2725 spin_unlock_bh(&wmi->lock);
2726}
2727
2728void *ath6kl_wmi_init(void *dev)
2729{
2730 struct wmi *wmi;
2731
2732 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
2733 if (!wmi)
2734 return NULL;
2735
2736 spin_lock_init(&wmi->lock);
2737
2738 wmi->parent_dev = dev;
2739
2740 wlan_node_table_init(wmi, &wmi->scan_table);
2741 ath6kl_wmi_qos_state_init(wmi);
2742
2743 wmi->pwr_mode = REC_POWER;
2744 wmi->phy_mode = WMI_11G_MODE;
2745
2746 wmi->pair_crypto_type = NONE_CRYPT;
2747 wmi->grp_crypto_type = NONE_CRYPT;
2748
2749 wmi->ht_allowed[A_BAND_24GHZ] = 1;
2750 wmi->ht_allowed[A_BAND_5GHZ] = 1;
2751
2752 return wmi;
2753}
2754
2755void ath6kl_wmi_shutdown(struct wmi *wmi)
2756{
2757 if (!wmi)
2758 return;
2759
2760 wlan_node_table_cleanup(&wmi->scan_table);
2761 kfree(wmi);
2762}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
new file mode 100644
index 000000000000..bbaa7049f4a8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -0,0 +1,2024 @@
1/*
2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * This file contains the definitions of the WMI protocol specified in the
19 * Wireless Module Interface (WMI). It includes definitions of all the
20 * commands and events. Commands are messages from the host to the WM.
21 * Events and Replies are messages from the WM to the host.
22 */
23
24#ifndef WMI_H
25#define WMI_H
26
27#include <linux/ieee80211.h>
28
29#include "htc.h"
30
31#define HTC_PROTOCOL_VERSION 0x0002
32#define WMI_PROTOCOL_VERSION 0x0002
33#define WMI_CONTROL_MSG_MAX_LEN 256
34#define is_ethertype(type_or_len) ((type_or_len) >= 0x0600)
35
36#define IP_ETHERTYPE 0x0800
37
38#define WMI_IMPLICIT_PSTREAM 0xFF
39#define WMI_MAX_THINSTREAM 15
40
41#define SSID_IE_LEN_INDEX 13
42
43/* Host side link management data structures */
44#define SIG_QUALITY_THRESH_LVLS 6
45#define SIG_QUALITY_UPPER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
46#define SIG_QUALITY_LOWER_THRESH_LVLS SIG_QUALITY_THRESH_LVLS
47
48#define A_BAND_24GHZ 0
49#define A_BAND_5GHZ 1
50#define A_NUM_BANDS 2
51
52/* in ms */
53#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
54
55/*
56 * There are no signed versions of __le16 and __le32, so for a temporary
57 * solution come up with our own version. The idea is from fs/ntfs/types.h.
58 *
59 * Use a_ prefix so that it doesn't conflict if we get proper support to
60 * linux/types.h.
61 */
62typedef __s16 __bitwise a_sle16;
63typedef __s32 __bitwise a_sle32;
64
65static inline a_sle32 a_cpu_to_sle32(s32 val)
66{
67 return (__force a_sle32) cpu_to_le32(val);
68}
69
70static inline s32 a_sle32_to_cpu(a_sle32 val)
71{
72 return le32_to_cpu((__force __le32) val);
73}
74
75static inline a_sle16 a_cpu_to_sle16(s16 val)
76{
77 return (__force a_sle16) cpu_to_le16(val);
78}
79
80static inline s16 a_sle16_to_cpu(a_sle16 val)
81{
82 return le16_to_cpu((__force __le16) val);
83}
84
85struct sq_threshold_params {
86 s16 upper_threshold[SIG_QUALITY_UPPER_THRESH_LVLS];
87 s16 lower_threshold[SIG_QUALITY_LOWER_THRESH_LVLS];
88 u32 upper_threshold_valid_count;
89 u32 lower_threshold_valid_count;
90 u32 polling_interval;
91 u8 weight;
92 u8 last_rssi;
93 u8 last_rssi_poll_event;
94};
95
96struct wmi_stats {
97 u32 cmd_len_err;
98 u32 cmd_id_err;
99};
100
101struct wmi_data_sync_bufs {
102 u8 traffic_class;
103 struct sk_buff *skb;
104};
105
106/* WMM stream classes */
107#define WMM_NUM_AC 4
108#define WMM_AC_BE 0 /* best effort */
109#define WMM_AC_BK 1 /* background */
110#define WMM_AC_VI 2 /* video */
111#define WMM_AC_VO 3 /* voice */
112
113struct wmi {
114 bool ready;
115 u16 stream_exist_for_ac[WMM_NUM_AC];
116 u8 fat_pipe_exist;
117 void *parent_dev;
118 struct wmi_stats stat;
119 struct ath6kl_node_table scan_table;
120 u8 bssid[ETH_ALEN];
121 u8 pwr_mode;
122 u8 phy_mode;
123 u8 keep_alive_intvl;
124 spinlock_t lock;
125 enum htc_endpoint_id ep_id;
126 struct sq_threshold_params
127 sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
128 enum crypto_type pair_crypto_type;
129 enum crypto_type grp_crypto_type;
130 bool is_wmm_enabled;
131 u8 ht_allowed[A_NUM_BANDS];
132 u8 traffic_class;
133 bool is_probe_ssid;
134};
135
136struct host_app_area {
137 u32 wmi_protocol_ver;
138};
139
140enum wmi_msg_type {
141 DATA_MSGTYPE = 0x0,
142 CNTL_MSGTYPE,
143 SYNC_MSGTYPE,
144 OPT_MSGTYPE,
145};
146
147/*
148 * Macros for operating on WMI_DATA_HDR (info) field
149 */
150
151#define WMI_DATA_HDR_MSG_TYPE_MASK 0x03
152#define WMI_DATA_HDR_MSG_TYPE_SHIFT 0
153#define WMI_DATA_HDR_UP_MASK 0x07
154#define WMI_DATA_HDR_UP_SHIFT 2
155
156/* In AP mode, the same bit (b5) is used to indicate Power save state in
157 * the Rx dir and More data bit state in the tx direction.
158 */
159#define WMI_DATA_HDR_PS_MASK 0x1
160#define WMI_DATA_HDR_PS_SHIFT 5
161
162#define WMI_DATA_HDR_MORE_MASK 0x1
163#define WMI_DATA_HDR_MORE_SHIFT 5
164
165enum wmi_data_hdr_data_type {
166 WMI_DATA_HDR_DATA_TYPE_802_3 = 0,
167 WMI_DATA_HDR_DATA_TYPE_802_11,
168
169 /* used to be used for the PAL */
170 WMI_DATA_HDR_DATA_TYPE_ACL,
171};
172
173#define WMI_DATA_HDR_DATA_TYPE_MASK 0x3
174#define WMI_DATA_HDR_DATA_TYPE_SHIFT 6
175
176/* Macros for operating on WMI_DATA_HDR (info2) field */
177#define WMI_DATA_HDR_SEQNO_MASK 0xFFF
178#define WMI_DATA_HDR_SEQNO_SHIFT 0
179
180#define WMI_DATA_HDR_AMSDU_MASK 0x1
181#define WMI_DATA_HDR_AMSDU_SHIFT 12
182
183#define WMI_DATA_HDR_META_MASK 0x7
184#define WMI_DATA_HDR_META_SHIFT 13
185
186struct wmi_data_hdr {
187 s8 rssi;
188
189 /*
190 * usage of 'info' field(8-bit):
191 *
192 * b1:b0 - WMI_MSG_TYPE
193 * b4:b3:b2 - UP(tid)
194 * b5 - Used in AP mode.
195 * More-data in tx dir, PS in rx.
196 * b7:b6 - Dot3 header(0),
197 * Dot11 Header(1),
198 * ACL data(2)
199 */
200 u8 info;
201
202 /*
203 * usage of 'info2' field(16-bit):
204 *
205 * b11:b0 - seq_no
206 * b12 - A-MSDU?
207 * b15:b13 - META_DATA_VERSION 0 - 7
208 */
209 __le16 info2;
210 __le16 info3;
211} __packed;
212
213static inline u8 wmi_data_hdr_get_up(struct wmi_data_hdr *dhdr)
214{
215 return (dhdr->info >> WMI_DATA_HDR_UP_SHIFT) & WMI_DATA_HDR_UP_MASK;
216}
217
218static inline void wmi_data_hdr_set_up(struct wmi_data_hdr *dhdr,
219 u8 usr_pri)
220{
221 dhdr->info &= ~(WMI_DATA_HDR_UP_MASK << WMI_DATA_HDR_UP_SHIFT);
222 dhdr->info |= usr_pri << WMI_DATA_HDR_UP_SHIFT;
223}
224
225static inline u8 wmi_data_hdr_get_dot11(struct wmi_data_hdr *dhdr)
226{
227 u8 data_type;
228
229 data_type = (dhdr->info >> WMI_DATA_HDR_DATA_TYPE_SHIFT) &
230 WMI_DATA_HDR_DATA_TYPE_MASK;
231 return (data_type == WMI_DATA_HDR_DATA_TYPE_802_11);
232}
233
234static inline u16 wmi_data_hdr_get_seqno(struct wmi_data_hdr *dhdr)
235{
236 return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_SEQNO_SHIFT) &
237 WMI_DATA_HDR_SEQNO_MASK;
238}
239
240static inline u8 wmi_data_hdr_is_amsdu(struct wmi_data_hdr *dhdr)
241{
242 return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_AMSDU_SHIFT) &
243 WMI_DATA_HDR_AMSDU_MASK;
244}
245
246static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
247{
248 return (le16_to_cpu(dhdr->info2) >> WMI_DATA_HDR_META_SHIFT) &
249 WMI_DATA_HDR_META_MASK;
250}
251
252/* Tx meta version definitions */
253#define WMI_MAX_TX_META_SZ 12
254#define WMI_META_VERSION_1 0x01
255#define WMI_META_VERSION_2 0x02
256
257struct wmi_tx_meta_v1 {
258 /* packet ID to identify the tx request */
259 u8 pkt_id;
260
261 /* rate policy to be used for the tx of this frame */
262 u8 rate_plcy_id;
263} __packed;
264
265struct wmi_tx_meta_v2 {
266 /*
267 * Offset from start of the WMI header for csum calculation to
268 * begin.
269 */
270 u8 csum_start;
271
272 /* offset from start of WMI header where final csum goes */
273 u8 csum_dest;
274
275 /* no of bytes over which csum is calculated */
276 u8 csum_flags;
277} __packed;
278
279struct wmi_rx_meta_v1 {
280 u8 status;
281
282 /* rate index mapped to rate at which this packet was received. */
283 u8 rix;
284
285 /* rssi of packet */
286 u8 rssi;
287
288 /* rf channel during packet reception */
289 u8 channel;
290
291 __le16 flags;
292} __packed;
293
294struct wmi_rx_meta_v2 {
295 __le16 csum;
296
297 /* bit 0 set -partial csum valid bit 1 set -test mode */
298 u8 csum_flags;
299} __packed;
300
301/* Control Path */
302struct wmi_cmd_hdr {
303 __le16 cmd_id;
304
305 /* info1 - 16 bits
306 * b03:b00 - id
307 * b15:b04 - unused */
308 __le16 info1;
309
310 /* for alignment */
311 __le16 reserved;
312} __packed;
313
314/* List of WMI commands */
315enum wmi_cmd_id {
316 WMI_CONNECT_CMDID = 0x0001,
317 WMI_RECONNECT_CMDID,
318 WMI_DISCONNECT_CMDID,
319 WMI_SYNCHRONIZE_CMDID,
320 WMI_CREATE_PSTREAM_CMDID,
321 WMI_DELETE_PSTREAM_CMDID,
322 WMI_START_SCAN_CMDID,
323 WMI_SET_SCAN_PARAMS_CMDID,
324 WMI_SET_BSS_FILTER_CMDID,
325 WMI_SET_PROBED_SSID_CMDID, /* 10 */
326 WMI_SET_LISTEN_INT_CMDID,
327 WMI_SET_BMISS_TIME_CMDID,
328 WMI_SET_DISC_TIMEOUT_CMDID,
329 WMI_GET_CHANNEL_LIST_CMDID,
330 WMI_SET_BEACON_INT_CMDID,
331 WMI_GET_STATISTICS_CMDID,
332 WMI_SET_CHANNEL_PARAMS_CMDID,
333 WMI_SET_POWER_MODE_CMDID,
334 WMI_SET_IBSS_PM_CAPS_CMDID,
335 WMI_SET_POWER_PARAMS_CMDID, /* 20 */
336 WMI_SET_POWERSAVE_TIMERS_POLICY_CMDID,
337 WMI_ADD_CIPHER_KEY_CMDID,
338 WMI_DELETE_CIPHER_KEY_CMDID,
339 WMI_ADD_KRK_CMDID,
340 WMI_DELETE_KRK_CMDID,
341 WMI_SET_PMKID_CMDID,
342 WMI_SET_TX_PWR_CMDID,
343 WMI_GET_TX_PWR_CMDID,
344 WMI_SET_ASSOC_INFO_CMDID,
345 WMI_ADD_BAD_AP_CMDID, /* 30 */
346 WMI_DELETE_BAD_AP_CMDID,
347 WMI_SET_TKIP_COUNTERMEASURES_CMDID,
348 WMI_RSSI_THRESHOLD_PARAMS_CMDID,
349 WMI_TARGET_ERROR_REPORT_BITMASK_CMDID,
350 WMI_SET_ACCESS_PARAMS_CMDID,
351 WMI_SET_RETRY_LIMITS_CMDID,
352 WMI_SET_OPT_MODE_CMDID,
353 WMI_OPT_TX_FRAME_CMDID,
354 WMI_SET_VOICE_PKT_SIZE_CMDID,
355 WMI_SET_MAX_SP_LEN_CMDID, /* 40 */
356 WMI_SET_ROAM_CTRL_CMDID,
357 WMI_GET_ROAM_TBL_CMDID,
358 WMI_GET_ROAM_DATA_CMDID,
359 WMI_ENABLE_RM_CMDID,
360 WMI_SET_MAX_OFFHOME_DURATION_CMDID,
361 WMI_EXTENSION_CMDID, /* Non-wireless extensions */
362 WMI_SNR_THRESHOLD_PARAMS_CMDID,
363 WMI_LQ_THRESHOLD_PARAMS_CMDID,
364 WMI_SET_LPREAMBLE_CMDID,
365 WMI_SET_RTS_CMDID, /* 50 */
366 WMI_CLR_RSSI_SNR_CMDID,
367 WMI_SET_FIXRATES_CMDID,
368 WMI_GET_FIXRATES_CMDID,
369 WMI_SET_AUTH_MODE_CMDID,
370 WMI_SET_REASSOC_MODE_CMDID,
371 WMI_SET_WMM_CMDID,
372 WMI_SET_WMM_TXOP_CMDID,
373 WMI_TEST_CMDID,
374
375 /* COEX AR6002 only */
376 WMI_SET_BT_STATUS_CMDID,
377 WMI_SET_BT_PARAMS_CMDID, /* 60 */
378
379 WMI_SET_KEEPALIVE_CMDID,
380 WMI_GET_KEEPALIVE_CMDID,
381 WMI_SET_APPIE_CMDID,
382 WMI_GET_APPIE_CMDID,
383 WMI_SET_WSC_STATUS_CMDID,
384
385 /* Wake on Wireless */
386 WMI_SET_HOST_SLEEP_MODE_CMDID,
387 WMI_SET_WOW_MODE_CMDID,
388 WMI_GET_WOW_LIST_CMDID,
389 WMI_ADD_WOW_PATTERN_CMDID,
390 WMI_DEL_WOW_PATTERN_CMDID, /* 70 */
391
392 WMI_SET_FRAMERATES_CMDID,
393 WMI_SET_AP_PS_CMDID,
394 WMI_SET_QOS_SUPP_CMDID,
395
396 /* WMI_THIN_RESERVED_... mark the start and end
397 * values for WMI_THIN_RESERVED command IDs. These
398 * command IDs can be found in wmi_thin.h */
399 WMI_THIN_RESERVED_START = 0x8000,
400 WMI_THIN_RESERVED_END = 0x8fff,
401
402 /* Developer commands starts at 0xF000 */
403 WMI_SET_BITRATE_CMDID = 0xF000,
404 WMI_GET_BITRATE_CMDID,
405 WMI_SET_WHALPARAM_CMDID,
406 WMI_SET_MAC_ADDRESS_CMDID,
407 WMI_SET_AKMP_PARAMS_CMDID,
408 WMI_SET_PMKID_LIST_CMDID,
409 WMI_GET_PMKID_LIST_CMDID,
410 WMI_ABORT_SCAN_CMDID,
411 WMI_SET_TARGET_EVENT_REPORT_CMDID,
412
413 /* Unused */
414 WMI_UNUSED1,
415 WMI_UNUSED2,
416
417 /* AP mode commands */
418 WMI_AP_HIDDEN_SSID_CMDID,
419 WMI_AP_SET_NUM_STA_CMDID,
420 WMI_AP_ACL_POLICY_CMDID,
421 WMI_AP_ACL_MAC_LIST_CMDID,
422 WMI_AP_CONFIG_COMMIT_CMDID,
423 WMI_AP_SET_MLME_CMDID,
424 WMI_AP_SET_PVB_CMDID,
425 WMI_AP_CONN_INACT_CMDID,
426 WMI_AP_PROT_SCAN_TIME_CMDID,
427 WMI_AP_SET_COUNTRY_CMDID,
428 WMI_AP_SET_DTIM_CMDID,
429 WMI_AP_MODE_STAT_CMDID,
430
431 WMI_SET_IP_CMDID,
432 WMI_SET_PARAMS_CMDID,
433 WMI_SET_MCAST_FILTER_CMDID,
434 WMI_DEL_MCAST_FILTER_CMDID,
435
436 WMI_ALLOW_AGGR_CMDID,
437 WMI_ADDBA_REQ_CMDID,
438 WMI_DELBA_REQ_CMDID,
439 WMI_SET_HT_CAP_CMDID,
440 WMI_SET_HT_OP_CMDID,
441 WMI_SET_TX_SELECT_RATES_CMDID,
442 WMI_SET_TX_SGI_PARAM_CMDID,
443 WMI_SET_RATE_POLICY_CMDID,
444
445 WMI_HCI_CMD_CMDID,
446 WMI_RX_FRAME_FORMAT_CMDID,
447 WMI_SET_THIN_MODE_CMDID,
448 WMI_SET_BT_WLAN_CONN_PRECEDENCE_CMDID,
449
450 WMI_AP_SET_11BG_RATESET_CMDID,
451 WMI_SET_PMK_CMDID,
452 WMI_MCAST_FILTER_CMDID,
453
454 /* COEX CMDID AR6003 */
455 WMI_SET_BTCOEX_FE_ANT_CMDID,
456 WMI_SET_BTCOEX_COLOCATED_BT_DEV_CMDID,
457 WMI_SET_BTCOEX_SCO_CONFIG_CMDID,
458 WMI_SET_BTCOEX_A2DP_CONFIG_CMDID,
459 WMI_SET_BTCOEX_ACLCOEX_CONFIG_CMDID,
460 WMI_SET_BTCOEX_BTINQUIRY_PAGE_CONFIG_CMDID,
461 WMI_SET_BTCOEX_DEBUG_CMDID,
462 WMI_SET_BTCOEX_BT_OPERATING_STATUS_CMDID,
463 WMI_GET_BTCOEX_STATS_CMDID,
464 WMI_GET_BTCOEX_CONFIG_CMDID,
465
466 WMI_SET_DFS_ENABLE_CMDID, /* F034 */
467 WMI_SET_DFS_MINRSSITHRESH_CMDID,
468 WMI_SET_DFS_MAXPULSEDUR_CMDID,
469 WMI_DFS_RADAR_DETECTED_CMDID,
470
471 /* P2P commands */
472 WMI_P2P_SET_CONFIG_CMDID, /* F038 */
473 WMI_WPS_SET_CONFIG_CMDID,
474 WMI_SET_REQ_DEV_ATTR_CMDID,
475 WMI_P2P_FIND_CMDID,
476 WMI_P2P_STOP_FIND_CMDID,
477 WMI_P2P_GO_NEG_START_CMDID,
478 WMI_P2P_LISTEN_CMDID,
479
480 WMI_CONFIG_TX_MAC_RULES_CMDID, /* F040 */
481 WMI_SET_PROMISCUOUS_MODE_CMDID,
482 WMI_RX_FRAME_FILTER_CMDID,
483 WMI_SET_CHANNEL_CMDID,
484
485 /* WAC commands */
486 WMI_ENABLE_WAC_CMDID,
487 WMI_WAC_SCAN_REPLY_CMDID,
488 WMI_WAC_CTRL_REQ_CMDID,
489 WMI_SET_DIV_PARAMS_CMDID,
490
491 WMI_GET_PMK_CMDID,
492 WMI_SET_PASSPHRASE_CMDID,
493 WMI_SEND_ASSOC_RES_CMDID,
494 WMI_SET_ASSOC_REQ_RELAY_CMDID,
495 WMI_GET_RFKILL_MODE_CMDID,
496
497 /* ACS command, consists of sub-commands */
498 WMI_ACS_CTRL_CMDID,
499
500 /* Ultra low power store / recall commands */
501 WMI_STORERECALL_CONFIGURE_CMDID,
502 WMI_STORERECALL_RECALL_CMDID,
503 WMI_STORERECALL_HOST_READY_CMDID,
504 WMI_FORCE_TARGET_ASSERT_CMDID,
505 WMI_SET_EXCESS_TX_RETRY_THRES_CMDID,
506};
507
508/* WMI_CONNECT_CMDID */
509enum network_type {
510 INFRA_NETWORK = 0x01,
511 ADHOC_NETWORK = 0x02,
512 ADHOC_CREATOR = 0x04,
513 AP_NETWORK = 0x10,
514};
515
516enum dot11_auth_mode {
517 OPEN_AUTH = 0x01,
518 SHARED_AUTH = 0x02,
519
520 /* different from IEEE_AUTH_MODE definitions */
521 LEAP_AUTH = 0x04,
522};
523
524enum {
525 AUTH_IDLE,
526 AUTH_OPEN_IN_PROGRESS,
527};
528
529enum auth_mode {
530 NONE_AUTH = 0x01,
531 WPA_AUTH = 0x02,
532 WPA2_AUTH = 0x04,
533 WPA_PSK_AUTH = 0x08,
534 WPA2_PSK_AUTH = 0x10,
535 WPA_AUTH_CCKM = 0x20,
536 WPA2_AUTH_CCKM = 0x40,
537};
538
539#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
540#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
541
542#define WMI_MIN_KEY_INDEX 0
543#define WMI_MAX_KEY_INDEX 3
544
545#define WMI_MAX_KEY_LEN 32
546
547/*
548 * NB: these values are ordered carefully; there are lots of
549 * of implications in any reordering. In particular beware
550 * that 4 is not used to avoid conflicting with IEEE80211_F_PRIVACY.
551 */
552#define ATH6KL_CIPHER_WEP 0
553#define ATH6KL_CIPHER_TKIP 1
554#define ATH6KL_CIPHER_AES_OCB 2
555#define ATH6KL_CIPHER_AES_CCM 3
556#define ATH6KL_CIPHER_CKIP 5
557#define ATH6KL_CIPHER_CCKM_KRK 6
558#define ATH6KL_CIPHER_NONE 7 /* pseudo value */
559
560/*
561 * 802.11 rate set.
562 */
563#define ATH6KL_RATE_MAXSIZE 15 /* max rates we'll handle */
564
565#define ATH_OUI_TYPE 0x01
566#define WPA_OUI_TYPE 0x01
567#define WMM_PARAM_OUI_SUBTYPE 0x01
568#define WMM_OUI_TYPE 0x02
569#define WSC_OUT_TYPE 0x04
570
571enum wmi_connect_ctrl_flags_bits {
572 CONNECT_ASSOC_POLICY_USER = 0x0001,
573 CONNECT_SEND_REASSOC = 0x0002,
574 CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
575 CONNECT_PROFILE_MATCH_DONE = 0x0008,
576 CONNECT_IGNORE_AAC_BEACON = 0x0010,
577 CONNECT_CSA_FOLLOW_BSS = 0x0020,
578 CONNECT_DO_WPA_OFFLOAD = 0x0040,
579 CONNECT_DO_NOT_DEAUTH = 0x0080,
580};
581
582struct wmi_connect_cmd {
583 u8 nw_type;
584 u8 dot11_auth_mode;
585 u8 auth_mode;
586 u8 prwise_crypto_type;
587 u8 prwise_crypto_len;
588 u8 grp_crypto_type;
589 u8 grp_crypto_len;
590 u8 ssid_len;
591 u8 ssid[IEEE80211_MAX_SSID_LEN];
592 __le16 ch;
593 u8 bssid[ETH_ALEN];
594 __le32 ctrl_flags;
595} __packed;
596
597/* WMI_RECONNECT_CMDID */
598struct wmi_reconnect_cmd {
599 /* channel hint */
600 __le16 channel;
601
602 /* mandatory if set */
603 u8 bssid[ETH_ALEN];
604} __packed;
605
606/* WMI_ADD_CIPHER_KEY_CMDID */
607enum key_usage {
608 PAIRWISE_USAGE = 0x00,
609 GROUP_USAGE = 0x01,
610
611 /* default Tx Key - static WEP only */
612 TX_USAGE = 0x02,
613};
614
615/*
616 * Bit Flag
617 * Bit 0 - Initialise TSC - default is Initialize
618 */
619#define KEY_OP_INIT_TSC 0x01
620#define KEY_OP_INIT_RSC 0x02
621
622/* default initialise the TSC & RSC */
623#define KEY_OP_INIT_VAL 0x03
624#define KEY_OP_VALID_MASK 0x03
625
626struct wmi_add_cipher_key_cmd {
627 u8 key_index;
628 u8 key_type;
629
630 /* enum key_usage */
631 u8 key_usage;
632
633 u8 key_len;
634
635 /* key replay sequence counter */
636 u8 key_rsc[8];
637
638 u8 key[WLAN_MAX_KEY_LEN];
639
640 /* additional key control info */
641 u8 key_op_ctrl;
642
643 u8 key_mac_addr[ETH_ALEN];
644} __packed;
645
646/* WMI_DELETE_CIPHER_KEY_CMDID */
647struct wmi_delete_cipher_key_cmd {
648 u8 key_index;
649} __packed;
650
651#define WMI_KRK_LEN 16
652
653/* WMI_ADD_KRK_CMDID */
654struct wmi_add_krk_cmd {
655 u8 krk[WMI_KRK_LEN];
656} __packed;
657
658/* WMI_SETPMKID_CMDID */
659
660#define WMI_PMKID_LEN 16
661
662enum pmkid_enable_flg {
663 PMKID_DISABLE = 0,
664 PMKID_ENABLE = 1,
665};
666
667struct wmi_setpmkid_cmd {
668 u8 bssid[ETH_ALEN];
669
670 /* enum pmkid_enable_flg */
671 u8 enable;
672
673 u8 pmkid[WMI_PMKID_LEN];
674} __packed;
675
676/* WMI_START_SCAN_CMD */
677enum wmi_scan_type {
678 WMI_LONG_SCAN = 0,
679 WMI_SHORT_SCAN = 1,
680};
681
682struct wmi_start_scan_cmd {
683 __le32 force_fg_scan;
684
685 /* for legacy cisco AP compatibility */
686 __le32 is_legacy;
687
688 /* max duration in the home channel(msec) */
689 __le32 home_dwell_time;
690
691 /* time interval between scans (msec) */
692 __le32 force_scan_intvl;
693
694 /* enum wmi_scan_type */
695 u8 scan_type;
696
697 /* how many channels follow */
698 u8 num_ch;
699
700 /* channels in Mhz */
701 __le16 ch_list[1];
702} __packed;
703
704/* WMI_SET_SCAN_PARAMS_CMDID */
705#define WMI_SHORTSCANRATIO_DEFAULT 3
706
707/*
708 * Warning: scan control flag value of 0xFF is used to disable
709 * all flags in WMI_SCAN_PARAMS_CMD. Do not add any more
710 * flags here
711 */
712enum wmi_scan_ctrl_flags_bits {
713
714 /* set if can scan in the connect cmd */
715 CONNECT_SCAN_CTRL_FLAGS = 0x01,
716
717 /* set if scan for the SSID it is already connected to */
718 SCAN_CONNECTED_CTRL_FLAGS = 0x02,
719
720 /* set if enable active scan */
721 ACTIVE_SCAN_CTRL_FLAGS = 0x04,
722
723 /* set if enable roam scan when bmiss and lowrssi */
724 ROAM_SCAN_CTRL_FLAGS = 0x08,
725
726 /* set if follows customer BSSINFO reporting rule */
727 REPORT_BSSINFO_CTRL_FLAGS = 0x10,
728
729 /* if disabled, target doesn't scan after a disconnect event */
730 ENABLE_AUTO_CTRL_FLAGS = 0x20,
731
732 /*
733 * Scan complete event with canceled status will be generated when
734 * a scan is prempted before it gets completed.
735 */
736 ENABLE_SCAN_ABORT_EVENT = 0x40
737};
738
739#define DEFAULT_SCAN_CTRL_FLAGS \
740 (CONNECT_SCAN_CTRL_FLAGS | \
741 SCAN_CONNECTED_CTRL_FLAGS | \
742 ACTIVE_SCAN_CTRL_FLAGS | \
743 ROAM_SCAN_CTRL_FLAGS | \
744 ENABLE_AUTO_CTRL_FLAGS)
745
746struct wmi_scan_params_cmd {
747 /* sec */
748 __le16 fg_start_period;
749
750 /* sec */
751 __le16 fg_end_period;
752
753 /* sec */
754 __le16 bg_period;
755
756 /* msec */
757 __le16 maxact_chdwell_time;
758
759 /* msec */
760 __le16 pas_chdwell_time;
761
762 /* how many shorts scan for one long */
763 u8 short_scan_ratio;
764
765 u8 scan_ctrl_flags;
766
767 /* msec */
768 __le16 minact_chdwell_time;
769
770 /* max active scans per ssid */
771 __le16 maxact_scan_per_ssid;
772
773 /* msecs */
774 __le32 max_dfsch_act_time;
775} __packed;
776
777/* WMI_SET_BSS_FILTER_CMDID */
778enum wmi_bss_filter {
779 /* no beacons forwarded */
780 NONE_BSS_FILTER = 0x0,
781
782 /* all beacons forwarded */
783 ALL_BSS_FILTER,
784
785 /* only beacons matching profile */
786 PROFILE_FILTER,
787
788 /* all but beacons matching profile */
789 ALL_BUT_PROFILE_FILTER,
790
791 /* only beacons matching current BSS */
792 CURRENT_BSS_FILTER,
793
794 /* all but beacons matching BSS */
795 ALL_BUT_BSS_FILTER,
796
797 /* beacons matching probed ssid */
798 PROBED_SSID_FILTER,
799
800 /* marker only */
801 LAST_BSS_FILTER,
802};
803
804struct wmi_bss_filter_cmd {
805 /* see, enum wmi_bss_filter */
806 u8 bss_filter;
807
808 /* for alignment */
809 u8 reserved1;
810
811 /* for alignment */
812 __le16 reserved2;
813
814 __le32 ie_mask;
815} __packed;
816
817/* WMI_SET_PROBED_SSID_CMDID */
818#define MAX_PROBED_SSID_INDEX 9
819
820enum wmi_ssid_flag {
821 /* disables entry */
822 DISABLE_SSID_FLAG = 0,
823
824 /* probes specified ssid */
825 SPECIFIC_SSID_FLAG = 0x01,
826
827 /* probes for any ssid */
828 ANY_SSID_FLAG = 0x02,
829};
830
831struct wmi_probed_ssid_cmd {
832 /* 0 to MAX_PROBED_SSID_INDEX */
833 u8 entry_index;
834
835 /* see, enum wmi_ssid_flg */
836 u8 flag;
837
838 u8 ssid_len;
839 u8 ssid[IEEE80211_MAX_SSID_LEN];
840} __packed;
841
842/*
843 * WMI_SET_LISTEN_INT_CMDID
844 * The Listen interval is between 15 and 3000 TUs
845 */
846struct wmi_listen_int_cmd {
847 __le16 listen_intvl;
848 __le16 num_beacons;
849} __packed;
850
851/* WMI_SET_POWER_MODE_CMDID */
852enum wmi_power_mode {
853 REC_POWER = 0x01,
854 MAX_PERF_POWER,
855};
856
857struct wmi_power_mode_cmd {
858 /* see, enum wmi_power_mode */
859 u8 pwr_mode;
860} __packed;
861
862/*
863 * Policy to determnine whether power save failure event should be sent to
864 * host during scanning
865 */
866enum power_save_fail_event_policy {
867 SEND_POWER_SAVE_FAIL_EVENT_ALWAYS = 1,
868 IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN = 2,
869};
870
871struct wmi_power_params_cmd {
872 /* msec */
873 __le16 idle_period;
874
875 __le16 pspoll_number;
876 __le16 dtim_policy;
877 __le16 tx_wakeup_policy;
878 __le16 num_tx_to_wakeup;
879 __le16 ps_fail_event_policy;
880} __packed;
881
882/* WMI_SET_DISC_TIMEOUT_CMDID */
883struct wmi_disc_timeout_cmd {
884 /* seconds */
885 u8 discon_timeout;
886} __packed;
887
888enum dir_type {
889 UPLINK_TRAFFIC = 0,
890 DNLINK_TRAFFIC = 1,
891 BIDIR_TRAFFIC = 2,
892};
893
894enum voiceps_cap_type {
895 DISABLE_FOR_THIS_AC = 0,
896 ENABLE_FOR_THIS_AC = 1,
897 ENABLE_FOR_ALL_AC = 2,
898};
899
900enum traffic_type {
901 TRAFFIC_TYPE_APERIODIC = 0,
902 TRAFFIC_TYPE_PERIODIC = 1,
903};
904
905/* WMI_SYNCHRONIZE_CMDID */
906struct wmi_sync_cmd {
907 u8 data_sync_map;
908} __packed;
909
910/* WMI_CREATE_PSTREAM_CMDID */
911struct wmi_create_pstream_cmd {
912 /* msec */
913 __le32 min_service_int;
914
915 /* msec */
916 __le32 max_service_int;
917
918 /* msec */
919 __le32 inactivity_int;
920
921 /* msec */
922 __le32 suspension_int;
923
924 __le32 service_start_time;
925
926 /* in bps */
927 __le32 min_data_rate;
928
929 /* in bps */
930 __le32 mean_data_rate;
931
932 /* in bps */
933 __le32 peak_data_rate;
934
935 __le32 max_burst_size;
936 __le32 delay_bound;
937
938 /* in bps */
939 __le32 min_phy_rate;
940
941 __le32 sba;
942 __le32 medium_time;
943
944 /* in octects */
945 __le16 nominal_msdu;
946
947 /* in octects */
948 __le16 max_msdu;
949
950 u8 traffic_class;
951
952 /* see, enum dir_type */
953 u8 traffic_direc;
954
955 u8 rx_queue_num;
956
957 /* see, enum traffic_type */
958 u8 traffic_type;
959
960 /* see, enum voiceps_cap_type */
961 u8 voice_psc_cap;
962 u8 tsid;
963
964 /* 802.1D user priority */
965 u8 user_pri;
966
967 /* nominal phy rate */
968 u8 nominal_phy;
969} __packed;
970
971/* WMI_DELETE_PSTREAM_CMDID */
972struct wmi_delete_pstream_cmd {
973 u8 tx_queue_num;
974 u8 rx_queue_num;
975 u8 traffic_direc;
976 u8 traffic_class;
977 u8 tsid;
978} __packed;
979
980/* WMI_SET_CHANNEL_PARAMS_CMDID */
981enum wmi_phy_mode {
982 WMI_11A_MODE = 0x1,
983 WMI_11G_MODE = 0x2,
984 WMI_11AG_MODE = 0x3,
985 WMI_11B_MODE = 0x4,
986 WMI_11GONLY_MODE = 0x5,
987};
988
989#define WMI_MAX_CHANNELS 32
990
991/*
992 * WMI_RSSI_THRESHOLD_PARAMS_CMDID
993 * Setting the polltime to 0 would disable polling. Threshold values are
994 * in the ascending order, and should agree to:
995 * (lowThreshold_lowerVal < lowThreshold_upperVal < highThreshold_lowerVal
996 * < highThreshold_upperVal)
997 */
998
999struct wmi_rssi_threshold_params_cmd {
1000 /* polling time as a factor of LI */
1001 __le32 poll_time;
1002
1003 /* lowest of upper */
1004 a_sle16 thresh_above1_val;
1005
1006 a_sle16 thresh_above2_val;
1007 a_sle16 thresh_above3_val;
1008 a_sle16 thresh_above4_val;
1009 a_sle16 thresh_above5_val;
1010
1011 /* highest of upper */
1012 a_sle16 thresh_above6_val;
1013
1014 /* lowest of bellow */
1015 a_sle16 thresh_below1_val;
1016
1017 a_sle16 thresh_below2_val;
1018 a_sle16 thresh_below3_val;
1019 a_sle16 thresh_below4_val;
1020 a_sle16 thresh_below5_val;
1021
1022 /* highest of bellow */
1023 a_sle16 thresh_below6_val;
1024
1025 /* "alpha" */
1026 u8 weight;
1027
1028 u8 reserved[3];
1029} __packed;
1030
1031/*
1032 * WMI_SNR_THRESHOLD_PARAMS_CMDID
1033 * Setting the polltime to 0 would disable polling.
1034 */
1035
1036struct wmi_snr_threshold_params_cmd {
1037 /* polling time as a factor of LI */
1038 __le32 poll_time;
1039
1040 /* "alpha" */
1041 u8 weight;
1042
1043 /* lowest of uppper */
1044 u8 thresh_above1_val;
1045
1046 u8 thresh_above2_val;
1047 u8 thresh_above3_val;
1048
1049 /* highest of upper */
1050 u8 thresh_above4_val;
1051
1052 /* lowest of bellow */
1053 u8 thresh_below1_val;
1054
1055 u8 thresh_below2_val;
1056 u8 thresh_below3_val;
1057
1058 /* highest of bellow */
1059 u8 thresh_below4_val;
1060
1061 u8 reserved[3];
1062} __packed;
1063
1064enum wmi_preamble_policy {
1065 WMI_IGNORE_BARKER_IN_ERP = 0,
1066 WMI_DONOT_IGNORE_BARKER_IN_ERP
1067};
1068
1069struct wmi_set_lpreamble_cmd {
1070 u8 status;
1071 u8 preamble_policy;
1072} __packed;
1073
1074struct wmi_set_rts_cmd {
1075 __le16 threshold;
1076} __packed;
1077
1078/* WMI_SET_TX_PWR_CMDID */
1079struct wmi_set_tx_pwr_cmd {
1080 /* in dbM units */
1081 u8 dbM;
1082} __packed;
1083
1084struct wmi_tx_pwr_reply {
1085 /* in dbM units */
1086 u8 dbM;
1087} __packed;
1088
1089struct wmi_report_sleep_state_event {
1090 __le32 sleep_state;
1091};
1092
1093enum wmi_report_sleep_status {
1094 WMI_REPORT_SLEEP_STATUS_IS_DEEP_SLEEP = 0,
1095 WMI_REPORT_SLEEP_STATUS_IS_AWAKE
1096};
1097enum target_event_report_config {
1098 /* default */
1099 DISCONN_EVT_IN_RECONN = 0,
1100
1101 NO_DISCONN_EVT_IN_RECONN
1102};
1103
1104/* Command Replies */
1105
1106/* WMI_GET_CHANNEL_LIST_CMDID reply */
1107struct wmi_channel_list_reply {
1108 u8 reserved;
1109
1110 /* number of channels in reply */
1111 u8 num_ch;
1112
1113 /* channel in Mhz */
1114 __le16 ch_list[1];
1115} __packed;
1116
1117/* List of Events (target to host) */
1118enum wmi_event_id {
1119 WMI_READY_EVENTID = 0x1001,
1120 WMI_CONNECT_EVENTID,
1121 WMI_DISCONNECT_EVENTID,
1122 WMI_BSSINFO_EVENTID,
1123 WMI_CMDERROR_EVENTID,
1124 WMI_REGDOMAIN_EVENTID,
1125 WMI_PSTREAM_TIMEOUT_EVENTID,
1126 WMI_NEIGHBOR_REPORT_EVENTID,
1127 WMI_TKIP_MICERR_EVENTID,
1128 WMI_SCAN_COMPLETE_EVENTID, /* 0x100a */
1129 WMI_REPORT_STATISTICS_EVENTID,
1130 WMI_RSSI_THRESHOLD_EVENTID,
1131 WMI_ERROR_REPORT_EVENTID,
1132 WMI_OPT_RX_FRAME_EVENTID,
1133 WMI_REPORT_ROAM_TBL_EVENTID,
1134 WMI_EXTENSION_EVENTID,
1135 WMI_CAC_EVENTID,
1136 WMI_SNR_THRESHOLD_EVENTID,
1137 WMI_LQ_THRESHOLD_EVENTID,
1138 WMI_TX_RETRY_ERR_EVENTID, /* 0x1014 */
1139 WMI_REPORT_ROAM_DATA_EVENTID,
1140 WMI_TEST_EVENTID,
1141 WMI_APLIST_EVENTID,
1142 WMI_GET_WOW_LIST_EVENTID,
1143 WMI_GET_PMKID_LIST_EVENTID,
1144 WMI_CHANNEL_CHANGE_EVENTID,
1145 WMI_PEER_NODE_EVENTID,
1146 WMI_PSPOLL_EVENTID,
1147 WMI_DTIMEXPIRY_EVENTID,
1148 WMI_WLAN_VERSION_EVENTID,
1149 WMI_SET_PARAMS_REPLY_EVENTID,
1150 WMI_ADDBA_REQ_EVENTID, /*0x1020 */
1151 WMI_ADDBA_RESP_EVENTID,
1152 WMI_DELBA_REQ_EVENTID,
1153 WMI_TX_COMPLETE_EVENTID,
1154 WMI_HCI_EVENT_EVENTID,
1155 WMI_ACL_DATA_EVENTID,
1156 WMI_REPORT_SLEEP_STATE_EVENTID,
1157 WMI_REPORT_BTCOEX_STATS_EVENTID,
1158 WMI_REPORT_BTCOEX_CONFIG_EVENTID,
1159 WMI_GET_PMK_EVENTID,
1160
1161 /* DFS Events */
1162 WMI_DFS_HOST_ATTACH_EVENTID,
1163 WMI_DFS_HOST_INIT_EVENTID,
1164 WMI_DFS_RESET_DELAYLINES_EVENTID,
1165 WMI_DFS_RESET_RADARQ_EVENTID,
1166 WMI_DFS_RESET_AR_EVENTID,
1167 WMI_DFS_RESET_ARQ_EVENTID,
1168 WMI_DFS_SET_DUR_MULTIPLIER_EVENTID,
1169 WMI_DFS_SET_BANGRADAR_EVENTID,
1170 WMI_DFS_SET_DEBUGLEVEL_EVENTID,
1171 WMI_DFS_PHYERR_EVENTID,
1172
1173 /* CCX Evants */
1174 WMI_CCX_RM_STATUS_EVENTID,
1175
1176 /* P2P Events */
1177 WMI_P2P_GO_NEG_RESULT_EVENTID,
1178
1179 WMI_WAC_SCAN_DONE_EVENTID,
1180 WMI_WAC_REPORT_BSS_EVENTID,
1181 WMI_WAC_START_WPS_EVENTID,
1182 WMI_WAC_CTRL_REQ_REPLY_EVENTID,
1183
1184 /* RFKILL Events */
1185 WMI_RFKILL_STATE_CHANGE_EVENTID,
1186 WMI_RFKILL_GET_MODE_CMD_EVENTID,
1187 WMI_THIN_RESERVED_START_EVENTID = 0x8000,
1188
1189 /*
1190 * Events in this range are reserved for thinmode
1191 * See wmi_thin.h for actual definitions
1192 */
1193 WMI_THIN_RESERVED_END_EVENTID = 0x8fff,
1194
1195 WMI_SET_CHANNEL_EVENTID,
1196 WMI_ASSOC_REQ_EVENTID,
1197
1198 /* Generic ACS event */
1199 WMI_ACS_EVENTID,
1200 WMI_REPORT_WMM_PARAMS_EVENTID
1201};
1202
1203struct wmi_ready_event_2 {
1204 __le32 sw_version;
1205 __le32 abi_version;
1206 u8 mac_addr[ETH_ALEN];
1207 u8 phy_cap;
1208} __packed;
1209
1210/* Connect Event */
1211struct wmi_connect_event {
1212 __le16 ch;
1213 u8 bssid[ETH_ALEN];
1214 __le16 listen_intvl;
1215 __le16 beacon_intvl;
1216 __le32 nw_type;
1217 u8 beacon_ie_len;
1218 u8 assoc_req_len;
1219 u8 assoc_resp_len;
1220 u8 assoc_info[1];
1221} __packed;
1222
1223/* Disconnect Event */
1224enum wmi_disconnect_reason {
1225 NO_NETWORK_AVAIL = 0x01,
1226
1227 /* bmiss */
1228 LOST_LINK = 0x02,
1229
1230 DISCONNECT_CMD = 0x03,
1231 BSS_DISCONNECTED = 0x04,
1232 AUTH_FAILED = 0x05,
1233 ASSOC_FAILED = 0x06,
1234 NO_RESOURCES_AVAIL = 0x07,
1235 CSERV_DISCONNECT = 0x08,
1236 INVALID_PROFILE = 0x0a,
1237 DOT11H_CHANNEL_SWITCH = 0x0b,
1238 PROFILE_MISMATCH = 0x0c,
1239 CONNECTION_EVICTED = 0x0d,
1240 IBSS_MERGE = 0xe,
1241};
1242
1243struct wmi_disconnect_event {
1244 /* reason code, see 802.11 spec. */
1245 __le16 proto_reason_status;
1246
1247 /* set if known */
1248 u8 bssid[ETH_ALEN];
1249
1250 /* see WMI_DISCONNECT_REASON */
1251 u8 disconn_reason;
1252
1253 u8 assoc_resp_len;
1254 u8 assoc_info[1];
1255} __packed;
1256
1257/*
1258 * BSS Info Event.
1259 * Mechanism used to inform host of the presence and characteristic of
1260 * wireless networks present. Consists of bss info header followed by
1261 * the beacon or probe-response frame body. The 802.11 header is no included.
1262 */
1263enum wmi_bi_ftype {
1264 BEACON_FTYPE = 0x1,
1265 PROBERESP_FTYPE,
1266 ACTION_MGMT_FTYPE,
1267 PROBEREQ_FTYPE,
1268};
1269
1270struct wmi_bss_info_hdr {
1271 __le16 ch;
1272
1273 /* see, enum wmi_bi_ftype */
1274 u8 frame_type;
1275
1276 u8 snr;
1277 a_sle16 rssi;
1278 u8 bssid[ETH_ALEN];
1279 __le32 ie_mask;
1280} __packed;
1281
1282/*
1283 * BSS INFO HDR version 2.0
1284 * With 6 bytes HTC header and 6 bytes of WMI header
1285 * WMI_BSS_INFO_HDR cannot be accommodated in the removed 802.11 management
1286 * header space.
1287 * - Reduce the ie_mask to 2 bytes as only two bit flags are used
1288 * - Remove rssi and compute it on the host. rssi = snr - 95
1289 */
1290struct wmi_bss_info_hdr2 {
1291 __le16 ch;
1292
1293 /* see, enum wmi_bi_ftype */
1294 u8 frame_type;
1295
1296 u8 snr;
1297 u8 bssid[ETH_ALEN];
1298 __le16 ie_mask;
1299} __packed;
1300
1301/* Command Error Event */
1302enum wmi_error_code {
1303 INVALID_PARAM = 0x01,
1304 ILLEGAL_STATE = 0x02,
1305 INTERNAL_ERROR = 0x03,
1306};
1307
1308struct wmi_cmd_error_event {
1309 __le16 cmd_id;
1310 u8 err_code;
1311} __packed;
1312
1313struct wmi_pstream_timeout_event {
1314 u8 tx_queue_num;
1315 u8 rx_queue_num;
1316 u8 traffic_direc;
1317 u8 traffic_class;
1318} __packed;
1319
1320/*
1321 * The WMI_NEIGHBOR_REPORT Event is generated by the target to inform
1322 * the host of BSS's it has found that matches the current profile.
1323 * It can be used by the host to cache PMKs and/to initiate pre-authentication
1324 * if the BSS supports it. The first bssid is always the current associated
1325 * BSS.
1326 * The bssid and bssFlags information repeats according to the number
1327 * or APs reported.
1328 */
1329enum wmi_bss_flags {
1330 WMI_DEFAULT_BSS_FLAGS = 0x00,
1331 WMI_PREAUTH_CAPABLE_BSS = 0x01,
1332 WMI_PMKID_VALID_BSS = 0x02,
1333};
1334
1335/* TKIP MIC Error Event */
1336struct wmi_tkip_micerr_event {
1337 u8 key_id;
1338 u8 is_mcast;
1339} __packed;
1340
1341/* WMI_SCAN_COMPLETE_EVENTID */
1342struct wmi_scan_complete_event {
1343 a_sle32 status;
1344} __packed;
1345
1346#define MAX_OPT_DATA_LEN 1400
1347
1348/*
1349 * Special frame receive Event.
1350 * Mechanism used to inform host of the receiption of the special frames.
1351 * Consists of special frame info header followed by special frame body.
1352 * The 802.11 header is not included.
1353 */
1354struct wmi_opt_rx_info_hdr {
1355 __le16 ch;
1356 u8 frame_type;
1357 s8 snr;
1358 u8 src_addr[ETH_ALEN];
1359 u8 bssid[ETH_ALEN];
1360} __packed;
1361
1362/* Reporting statistic */
1363struct tx_stats {
1364 __le32 pkt;
1365 __le32 byte;
1366 __le32 ucast_pkt;
1367 __le32 ucast_byte;
1368 __le32 mcast_pkt;
1369 __le32 mcast_byte;
1370 __le32 bcast_pkt;
1371 __le32 bcast_byte;
1372 __le32 rts_success_cnt;
1373 __le32 pkt_per_ac[4];
1374 __le32 err_per_ac[4];
1375
1376 __le32 err;
1377 __le32 fail_cnt;
1378 __le32 retry_cnt;
1379 __le32 mult_retry_cnt;
1380 __le32 rts_fail_cnt;
1381 a_sle32 ucast_rate;
1382} __packed;
1383
1384struct rx_stats {
1385 __le32 pkt;
1386 __le32 byte;
1387 __le32 ucast_pkt;
1388 __le32 ucast_byte;
1389 __le32 mcast_pkt;
1390 __le32 mcast_byte;
1391 __le32 bcast_pkt;
1392 __le32 bcast_byte;
1393 __le32 frgment_pkt;
1394
1395 __le32 err;
1396 __le32 crc_err;
1397 __le32 key_cache_miss;
1398 __le32 decrypt_err;
1399 __le32 dupl_frame;
1400 a_sle32 ucast_rate;
1401} __packed;
1402
1403struct tkip_ccmp_stats {
1404 __le32 tkip_local_mic_fail;
1405 __le32 tkip_cnter_measures_invoked;
1406 __le32 tkip_replays;
1407 __le32 tkip_fmt_err;
1408 __le32 ccmp_fmt_err;
1409 __le32 ccmp_replays;
1410} __packed;
1411
1412struct pm_stats {
1413 __le32 pwr_save_failure_cnt;
1414 __le16 stop_tx_failure_cnt;
1415 __le16 atim_tx_failure_cnt;
1416 __le16 atim_rx_failure_cnt;
1417 __le16 bcn_rx_failure_cnt;
1418} __packed;
1419
1420struct cserv_stats {
1421 __le32 cs_bmiss_cnt;
1422 __le32 cs_low_rssi_cnt;
1423 __le16 cs_connect_cnt;
1424 __le16 cs_discon_cnt;
1425 a_sle16 cs_ave_beacon_rssi;
1426 __le16 cs_roam_count;
1427 a_sle16 cs_rssi;
1428 u8 cs_snr;
1429 u8 cs_ave_beacon_snr;
1430 u8 cs_last_roam_msec;
1431} __packed;
1432
1433struct wlan_net_stats {
1434 struct tx_stats tx;
1435 struct rx_stats rx;
1436 struct tkip_ccmp_stats tkip_ccmp_stats;
1437} __packed;
1438
1439struct arp_stats {
1440 __le32 arp_received;
1441 __le32 arp_matched;
1442 __le32 arp_replied;
1443} __packed;
1444
1445struct wlan_wow_stats {
1446 __le32 wow_pkt_dropped;
1447 __le16 wow_evt_discarded;
1448 u8 wow_host_pkt_wakeups;
1449 u8 wow_host_evt_wakeups;
1450} __packed;
1451
1452struct wmi_target_stats {
1453 __le32 lq_val;
1454 a_sle32 noise_floor_calib;
1455 struct pm_stats pm_stats;
1456 struct wlan_net_stats stats;
1457 struct wlan_wow_stats wow_stats;
1458 struct arp_stats arp_stats;
1459 struct cserv_stats cserv_stats;
1460} __packed;
1461
1462/*
1463 * WMI_RSSI_THRESHOLD_EVENTID.
1464 * Indicate the RSSI events to host. Events are indicated when we breach a
1465 * thresold value.
1466 */
1467enum wmi_rssi_threshold_val {
1468 WMI_RSSI_THRESHOLD1_ABOVE = 0,
1469 WMI_RSSI_THRESHOLD2_ABOVE,
1470 WMI_RSSI_THRESHOLD3_ABOVE,
1471 WMI_RSSI_THRESHOLD4_ABOVE,
1472 WMI_RSSI_THRESHOLD5_ABOVE,
1473 WMI_RSSI_THRESHOLD6_ABOVE,
1474 WMI_RSSI_THRESHOLD1_BELOW,
1475 WMI_RSSI_THRESHOLD2_BELOW,
1476 WMI_RSSI_THRESHOLD3_BELOW,
1477 WMI_RSSI_THRESHOLD4_BELOW,
1478 WMI_RSSI_THRESHOLD5_BELOW,
1479 WMI_RSSI_THRESHOLD6_BELOW
1480};
1481
1482struct wmi_rssi_threshold_event {
1483 a_sle16 rssi;
1484 u8 range;
1485} __packed;
1486
1487enum wmi_snr_threshold_val {
1488 WMI_SNR_THRESHOLD1_ABOVE = 1,
1489 WMI_SNR_THRESHOLD1_BELOW,
1490 WMI_SNR_THRESHOLD2_ABOVE,
1491 WMI_SNR_THRESHOLD2_BELOW,
1492 WMI_SNR_THRESHOLD3_ABOVE,
1493 WMI_SNR_THRESHOLD3_BELOW,
1494 WMI_SNR_THRESHOLD4_ABOVE,
1495 WMI_SNR_THRESHOLD4_BELOW
1496};
1497
1498struct wmi_snr_threshold_event {
1499 /* see, enum wmi_snr_threshold_val */
1500 u8 range;
1501
1502 u8 snr;
1503} __packed;
1504
1505/* WMI_REPORT_ROAM_TBL_EVENTID */
1506#define MAX_ROAM_TBL_CAND 5
1507
1508struct wmi_bss_roam_info {
1509 a_sle32 roam_util;
1510 u8 bssid[ETH_ALEN];
1511 s8 rssi;
1512 s8 rssidt;
1513 s8 last_rssi;
1514 s8 util;
1515 s8 bias;
1516
1517 /* for alignment */
1518 u8 reserved;
1519} __packed;
1520
1521/* WMI_CAC_EVENTID */
1522enum cac_indication {
1523 CAC_INDICATION_ADMISSION = 0x00,
1524 CAC_INDICATION_ADMISSION_RESP = 0x01,
1525 CAC_INDICATION_DELETE = 0x02,
1526 CAC_INDICATION_NO_RESP = 0x03,
1527};
1528
1529#define WMM_TSPEC_IE_LEN 63
1530
1531struct wmi_cac_event {
1532 u8 ac;
1533 u8 cac_indication;
1534 u8 status_code;
1535 u8 tspec_suggestion[WMM_TSPEC_IE_LEN];
1536} __packed;
1537
1538/* WMI_APLIST_EVENTID */
1539
1540enum aplist_ver {
1541 APLIST_VER1 = 1,
1542};
1543
1544struct wmi_ap_info_v1 {
1545 u8 bssid[ETH_ALEN];
1546 __le16 channel;
1547} __packed;
1548
1549union wmi_ap_info {
1550 struct wmi_ap_info_v1 ap_info_v1;
1551} __packed;
1552
1553struct wmi_aplist_event {
1554 u8 ap_list_ver;
1555 u8 num_ap;
1556 union wmi_ap_info ap_list[1];
1557} __packed;
1558
1559/* Developer Commands */
1560
1561/*
1562 * WMI_SET_BITRATE_CMDID
1563 *
1564 * Get bit rate cmd uses same definition as set bit rate cmd
1565 */
1566enum wmi_bit_rate {
1567 RATE_AUTO = -1,
1568 RATE_1Mb = 0,
1569 RATE_2Mb = 1,
1570 RATE_5_5Mb = 2,
1571 RATE_11Mb = 3,
1572 RATE_6Mb = 4,
1573 RATE_9Mb = 5,
1574 RATE_12Mb = 6,
1575 RATE_18Mb = 7,
1576 RATE_24Mb = 8,
1577 RATE_36Mb = 9,
1578 RATE_48Mb = 10,
1579 RATE_54Mb = 11,
1580 RATE_MCS_0_20 = 12,
1581 RATE_MCS_1_20 = 13,
1582 RATE_MCS_2_20 = 14,
1583 RATE_MCS_3_20 = 15,
1584 RATE_MCS_4_20 = 16,
1585 RATE_MCS_5_20 = 17,
1586 RATE_MCS_6_20 = 18,
1587 RATE_MCS_7_20 = 19,
1588 RATE_MCS_0_40 = 20,
1589 RATE_MCS_1_40 = 21,
1590 RATE_MCS_2_40 = 22,
1591 RATE_MCS_3_40 = 23,
1592 RATE_MCS_4_40 = 24,
1593 RATE_MCS_5_40 = 25,
1594 RATE_MCS_6_40 = 26,
1595 RATE_MCS_7_40 = 27,
1596};
1597
1598struct wmi_bit_rate_reply {
1599 /* see, enum wmi_bit_rate */
1600 s8 rate_index;
1601} __packed;
1602
1603/*
1604 * WMI_SET_FIXRATES_CMDID
1605 *
1606 * Get fix rates cmd uses same definition as set fix rates cmd
1607 */
1608struct wmi_fix_rates_reply {
1609 /* see wmi_bit_rate */
1610 __le32 fix_rate_mask;
1611} __packed;
1612
1613enum roam_data_type {
1614 /* get the roam time data */
1615 ROAM_DATA_TIME = 1,
1616};
1617
1618struct wmi_target_roam_time {
1619 __le32 disassoc_time;
1620 __le32 no_txrx_time;
1621 __le32 assoc_time;
1622 __le32 allow_txrx_time;
1623 u8 disassoc_bssid[ETH_ALEN];
1624 s8 disassoc_bss_rssi;
1625 u8 assoc_bssid[ETH_ALEN];
1626 s8 assoc_bss_rssi;
1627} __packed;
1628
1629enum wmi_txop_cfg {
1630 WMI_TXOP_DISABLED = 0,
1631 WMI_TXOP_ENABLED
1632};
1633
1634struct wmi_set_wmm_txop_cmd {
1635 u8 txop_enable;
1636} __packed;
1637
1638struct wmi_set_keepalive_cmd {
1639 u8 keep_alive_intvl;
1640} __packed;
1641
1642struct wmi_get_keepalive_cmd {
1643 __le32 configured;
1644 u8 keep_alive_intvl;
1645} __packed;
1646
1647/* Notify the WSC registration status to the target */
1648#define WSC_REG_ACTIVE 1
1649#define WSC_REG_INACTIVE 0
1650
1651#define WOW_MAX_FILTER_LISTS 1
1652#define WOW_MAX_FILTERS_PER_LIST 4
1653#define WOW_PATTERN_SIZE 64
1654#define WOW_MASK_SIZE 64
1655
1656#define MAC_MAX_FILTERS_PER_LIST 4
1657
1658struct wow_filter {
1659 u8 wow_valid_filter;
1660 u8 wow_filter_id;
1661 u8 wow_filter_size;
1662 u8 wow_filter_offset;
1663 u8 wow_filter_mask[WOW_MASK_SIZE];
1664 u8 wow_filter_pattern[WOW_PATTERN_SIZE];
1665} __packed;
1666
1667#define MAX_IP_ADDRS 2
1668
1669struct wmi_set_ip_cmd {
1670 /* IP in network byte order */
1671 __le32 ips[MAX_IP_ADDRS];
1672} __packed;
1673
1674/* WMI_GET_WOW_LIST_CMD reply */
1675struct wmi_get_wow_list_reply {
1676 /* number of patterns in reply */
1677 u8 num_filters;
1678
1679 /* this is filter # x of total num_filters */
1680 u8 this_filter_num;
1681
1682 u8 wow_mode;
1683 u8 host_mode;
1684 struct wow_filter wow_filters[1];
1685} __packed;
1686
1687/* WMI_SET_AKMP_PARAMS_CMD */
1688
1689struct wmi_pmkid {
1690 u8 pmkid[WMI_PMKID_LEN];
1691} __packed;
1692
1693/* WMI_GET_PMKID_LIST_CMD Reply */
1694struct wmi_pmkid_list_reply {
1695 __le32 num_pmkid;
1696 u8 bssid_list[ETH_ALEN][1];
1697 struct wmi_pmkid pmkid_list[1];
1698} __packed;
1699
1700/* WMI_ADDBA_REQ_EVENTID */
1701struct wmi_addba_req_event {
1702 u8 tid;
1703 u8 win_sz;
1704 __le16 st_seq_no;
1705
1706 /* f/w response for ADDBA Req; OK (0) or failure (!=0) */
1707 u8 status;
1708} __packed;
1709
1710/* WMI_ADDBA_RESP_EVENTID */
1711struct wmi_addba_resp_event {
1712 u8 tid;
1713
1714 /* OK (0), failure (!=0) */
1715 u8 status;
1716
1717 /* three values: not supported(0), 3839, 8k */
1718 __le16 amsdu_sz;
1719} __packed;
1720
1721/* WMI_DELBA_EVENTID
1722 * f/w received a DELBA for peer and processed it.
1723 * Host is notified of this
1724 */
1725struct wmi_delba_event {
1726 u8 tid;
1727 u8 is_peer_initiator;
1728 __le16 reason_code;
1729} __packed;
1730
1731#define PEER_NODE_JOIN_EVENT 0x00
1732#define PEER_NODE_LEAVE_EVENT 0x01
1733#define PEER_FIRST_NODE_JOIN_EVENT 0x10
1734#define PEER_LAST_NODE_LEAVE_EVENT 0x11
1735
1736struct wmi_peer_node_event {
1737 u8 event_code;
1738 u8 peer_mac_addr[ETH_ALEN];
1739} __packed;
1740
1741/* Transmit complete event data structure(s) */
1742
1743/* version 1 of tx complete msg */
1744struct tx_complete_msg_v1 {
1745#define TX_COMPLETE_STATUS_SUCCESS 0
1746#define TX_COMPLETE_STATUS_RETRIES 1
1747#define TX_COMPLETE_STATUS_NOLINK 2
1748#define TX_COMPLETE_STATUS_TIMEOUT 3
1749#define TX_COMPLETE_STATUS_OTHER 4
1750
1751 u8 status;
1752
1753 /* packet ID to identify parent packet */
1754 u8 pkt_id;
1755
1756 /* rate index on successful transmission */
1757 u8 rate_idx;
1758
1759 /* number of ACK failures in tx attempt */
1760 u8 ack_failures;
1761} __packed;
1762
1763struct wmi_tx_complete_event {
1764 /* no of tx comp msgs following this struct */
1765 u8 num_msg;
1766
1767 /* length in bytes for each individual msg following this struct */
1768 u8 msg_len;
1769
1770 /* version of tx complete msg data following this struct */
1771 u8 msg_type;
1772
1773 /* individual messages follow this header */
1774 u8 reserved;
1775} __packed;
1776
1777/*
1778 * ------- AP Mode definitions --------------
1779 */
1780
1781/*
1782 * !!! Warning !!!
1783 * -Changing the following values needs compilation of both driver and firmware
1784 */
1785#define AP_MAX_NUM_STA 8
1786
1787/* Spl. AID used to set DTIM flag in the beacons */
1788#define MCAST_AID 0xFF
1789
1790#define DEF_AP_COUNTRY_CODE "US "
1791
1792/* Used with WMI_AP_SET_NUM_STA_CMDID */
1793
1794struct wmi_ap_set_pvb_cmd {
1795 __le32 flag;
1796 __le16 aid;
1797} __packed;
1798
1799struct wmi_rx_frame_format_cmd {
1800 /* version of meta data for rx packets <0 = default> (0-7 = valid) */
1801 u8 meta_ver;
1802
1803 /*
1804 * 1 == leave .11 header intact,
1805 * 0 == replace .11 header with .3 <default>
1806 */
1807 u8 dot11_hdr;
1808
1809 /*
1810 * 1 == defragmentation is performed by host,
1811 * 0 == performed by target <default>
1812 */
1813 u8 defrag_on_host;
1814
1815 /* for alignment */
1816 u8 reserved[1];
1817} __packed;
1818
1819/* AP mode events */
1820
1821/* WMI_PS_POLL_EVENT */
1822struct wmi_pspoll_event {
1823 __le16 aid;
1824} __packed;
1825
1826struct wmi_per_sta_stat {
1827 __le32 tx_bytes;
1828 __le32 tx_pkts;
1829 __le32 tx_error;
1830 __le32 tx_discard;
1831 __le32 rx_bytes;
1832 __le32 rx_pkts;
1833 __le32 rx_error;
1834 __le32 rx_discard;
1835 __le32 aid;
1836} __packed;
1837
1838struct wmi_ap_mode_stat {
1839 __le32 action;
1840 struct wmi_per_sta_stat sta[AP_MAX_NUM_STA + 1];
1841} __packed;
1842
1843/* End of AP mode definitions */
1844
1845/* Extended WMI (WMIX)
1846 *
1847 * Extended WMIX commands are encapsulated in a WMI message with
1848 * cmd=WMI_EXTENSION_CMD.
1849 *
1850 * Extended WMI commands are those that are needed during wireless
1851 * operation, but which are not really wireless commands. This allows,
1852 * for instance, platform-specific commands. Extended WMI commands are
1853 * embedded in a WMI command message with WMI_COMMAND_ID=WMI_EXTENSION_CMDID.
1854 * Extended WMI events are similarly embedded in a WMI event message with
1855 * WMI_EVENT_ID=WMI_EXTENSION_EVENTID.
1856 */
1857struct wmix_cmd_hdr {
1858 __le32 cmd_id;
1859} __packed;
1860
1861enum wmix_command_id {
1862 WMIX_DSETOPEN_REPLY_CMDID = 0x2001,
1863 WMIX_DSETDATA_REPLY_CMDID,
1864 WMIX_GPIO_OUTPUT_SET_CMDID,
1865 WMIX_GPIO_INPUT_GET_CMDID,
1866 WMIX_GPIO_REGISTER_SET_CMDID,
1867 WMIX_GPIO_REGISTER_GET_CMDID,
1868 WMIX_GPIO_INTR_ACK_CMDID,
1869 WMIX_HB_CHALLENGE_RESP_CMDID,
1870 WMIX_DBGLOG_CFG_MODULE_CMDID,
1871 WMIX_PROF_CFG_CMDID, /* 0x200a */
1872 WMIX_PROF_ADDR_SET_CMDID,
1873 WMIX_PROF_START_CMDID,
1874 WMIX_PROF_STOP_CMDID,
1875 WMIX_PROF_COUNT_GET_CMDID,
1876};
1877
1878enum wmix_event_id {
1879 WMIX_DSETOPENREQ_EVENTID = 0x3001,
1880 WMIX_DSETCLOSE_EVENTID,
1881 WMIX_DSETDATAREQ_EVENTID,
1882 WMIX_GPIO_INTR_EVENTID,
1883 WMIX_GPIO_DATA_EVENTID,
1884 WMIX_GPIO_ACK_EVENTID,
1885 WMIX_HB_CHALLENGE_RESP_EVENTID,
1886 WMIX_DBGLOG_EVENTID,
1887 WMIX_PROF_COUNT_EVENTID,
1888};
1889
1890/*
1891 * ------Error Detection support-------
1892 */
1893
1894/*
1895 * WMIX_HB_CHALLENGE_RESP_CMDID
1896 * Heartbeat Challenge Response command
1897 */
1898struct wmix_hb_challenge_resp_cmd {
1899 __le32 cookie;
1900 __le32 source;
1901} __packed;
1902
1903/* End of Extended WMI (WMIX) */
1904
1905enum wmi_sync_flag {
1906 NO_SYNC_WMIFLAG = 0,
1907
1908 /* transmit all queued data before cmd */
1909 SYNC_BEFORE_WMIFLAG,
1910
1911 /* any new data waits until cmd execs */
1912 SYNC_AFTER_WMIFLAG,
1913
1914 SYNC_BOTH_WMIFLAG,
1915
1916 /* end marker */
1917 END_WMIFLAG
1918};
1919
1920enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi);
1921void ath6kl_wmi_set_control_ep(struct wmi *wmi, enum htc_endpoint_id ep_id);
1922int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
1923int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
1924 u8 msg_type, bool more_data,
1925 enum wmi_data_hdr_data_type data_type,
1926 u8 meta_ver, void *tx_meta_info);
1927
1928int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
1929int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
1930int ath6kl_wmi_data_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
1931int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
1932 u32 layer2_priority, bool wmm_enabled,
1933 u8 *ac);
1934
1935int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
1936void ath6kl_wmi_iterate_nodes(struct wmi *wmi,
1937 void (*f) (void *arg, struct bss *),
1938 void *arg);
1939struct bss *ath6kl_wmi_find_node(struct wmi *wmi, const u8 *mac_addr);
1940void ath6kl_wmi_node_free(struct wmi *wmi, const u8 *mac_addr);
1941
1942int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
1943 enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
1944
1945int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
1946 enum dot11_auth_mode dot11_auth_mode,
1947 enum auth_mode auth_mode,
1948 enum crypto_type pairwise_crypto,
1949 u8 pairwise_crypto_len,
1950 enum crypto_type group_crypto,
1951 u8 group_crypto_len, int ssid_len, u8 *ssid,
1952 u8 *bssid, u16 channel, u32 ctrl_flags);
1953
1954int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
1955int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
1956int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
1957 u32 force_fgscan, u32 is_legacy,
1958 u32 home_dwell_time, u32 force_scan_interval,
1959 s8 num_chan, u16 *ch_list);
1960int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
1961 u16 fg_end_sec, u16 bg_sec,
1962 u16 minact_chdw_msec, u16 maxact_chdw_msec,
1963 u16 pas_chdw_msec, u8 short_scan_ratio,
1964 u8 scan_ctrl_flag, u32 max_dfsch_act_time,
1965 u16 maxact_scan_per_ssid);
1966int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
1967int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
1968 u8 ssid_len, u8 *ssid);
1969int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
1970 u16 listen_beacons);
1971int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
1972int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
1973 u16 ps_poll_num, u16 dtim_policy,
1974 u16 tx_wakup_policy, u16 num_tx_to_wakeup,
1975 u16 ps_fail_event_policy);
1976int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
1977int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
1978 struct wmi_create_pstream_cmd *pstream);
1979int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
1980
1981int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
1982int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
1983 u8 preamble_policy);
1984
1985int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
1986
1987int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
1988int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
1989 enum crypto_type key_type,
1990 u8 key_usage, u8 key_len,
1991 u8 *key_rsc, u8 *key_material,
1992 u8 key_op_ctrl, u8 *mac_addr,
1993 enum wmi_sync_flag sync_flag);
1994int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
1995int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
1996int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
1997 const u8 *pmkid, bool set);
1998int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
1999int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
2000void ath6kl_wmi_get_current_bssid(struct wmi *wmi, u8 *bssid);
2001
2002int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
2003int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
2004
2005s32 ath6kl_wmi_get_rate(s8 rate_index);
2006
2007int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
2008
2009struct bss *ath6kl_wmi_find_ssid_node(struct wmi *wmi, u8 *ssid,
2010 u32 ssid_len, bool is_wpa2,
2011 bool match_ssid);
2012
2013void ath6kl_wmi_node_return(struct wmi *wmi, struct bss *bss);
2014
2015/* AP mode */
2016int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
2017
2018int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
2019 bool rx_dot11_hdr, bool defrag_on_host);
2020
2021void *ath6kl_wmi_init(void *devt);
2022void ath6kl_wmi_shutdown(struct wmi *wmi);
2023
2024#endif /* WMI_H */