diff options
author | Christian Lamparter <chunkeey@googlemail.com> | 2010-09-05 19:09:20 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-09-14 16:03:42 -0400 |
commit | a84fab3cbfdc427e7d366f1cc844f27b2084c26c (patch) | |
tree | 2cb808ec415a02bc91cf1a8228142bc2ff98f1cd | |
parent | 319da621d5c4e9bd8c34feeb200e864e87d91fe7 (diff) |
carl9170: 802.11 rx/tx processing and usb backend
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r-- | drivers/net/wireless/ath/carl9170/rx.c | 909 | ||||
-rw-r--r-- | drivers/net/wireless/ath/carl9170/tx.c | 1373 | ||||
-rw-r--r-- | drivers/net/wireless/ath/carl9170/usb.c | 1137 |
3 files changed, 3419 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c new file mode 100644 index 000000000000..671dbc429547 --- /dev/null +++ b/drivers/net/wireless/ath/carl9170/rx.c | |||
@@ -0,0 +1,909 @@ | |||
1 | /* | ||
2 | * Atheros CARL9170 driver | ||
3 | * | ||
4 | * 802.11 & command trap routines | ||
5 | * | ||
6 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
7 | * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; see the file COPYING. If not, see | ||
21 | * http://www.gnu.org/licenses/. | ||
22 | * | ||
23 | * This file incorporates work covered by the following copyright and | ||
24 | * permission notice: | ||
25 | * Copyright (c) 2007-2008 Atheros Communications, Inc. | ||
26 | * | ||
27 | * Permission to use, copy, modify, and/or distribute this software for any | ||
28 | * purpose with or without fee is hereby granted, provided that the above | ||
29 | * copyright notice and this permission notice appear in all copies. | ||
30 | * | ||
31 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
32 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
33 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
34 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
35 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
36 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
37 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
38 | */ | ||
39 | |||
40 | #include <linux/init.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/module.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <linux/crc32.h> | ||
45 | #include <net/mac80211.h> | ||
46 | #include "carl9170.h" | ||
47 | #include "hw.h" | ||
48 | #include "cmd.h" | ||
49 | |||
50 | static void carl9170_dbg_message(struct ar9170 *ar, const char *buf, u32 len) | ||
51 | { | ||
52 | bool restart = false; | ||
53 | enum carl9170_restart_reasons reason = CARL9170_RR_NO_REASON; | ||
54 | |||
55 | if (len > 3) { | ||
56 | if (memcmp(buf, CARL9170_ERR_MAGIC, 3) == 0) { | ||
57 | ar->fw.err_counter++; | ||
58 | if (ar->fw.err_counter > 3) { | ||
59 | restart = true; | ||
60 | reason = CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | if (memcmp(buf, CARL9170_BUG_MAGIC, 3) == 0) { | ||
65 | ar->fw.bug_counter++; | ||
66 | restart = true; | ||
67 | reason = CARL9170_RR_FATAL_FIRMWARE_ERROR; | ||
68 | } | ||
69 | } | ||
70 | |||
71 | wiphy_info(ar->hw->wiphy, "FW: %.*s\n", len, buf); | ||
72 | |||
73 | if (restart) | ||
74 | carl9170_restart(ar, reason); | ||
75 | } | ||
76 | |||
77 | static void carl9170_handle_ps(struct ar9170 *ar, struct carl9170_rsp *rsp) | ||
78 | { | ||
79 | u32 ps; | ||
80 | bool new_ps; | ||
81 | |||
82 | ps = le32_to_cpu(rsp->psm.state); | ||
83 | |||
84 | new_ps = (ps & CARL9170_PSM_COUNTER) != CARL9170_PSM_WAKE; | ||
85 | if (ar->ps.state != new_ps) { | ||
86 | if (!new_ps) { | ||
87 | ar->ps.sleep_ms = jiffies_to_msecs(jiffies - | ||
88 | ar->ps.last_action); | ||
89 | } | ||
90 | |||
91 | ar->ps.last_action = jiffies; | ||
92 | |||
93 | ar->ps.state = new_ps; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | static int carl9170_check_sequence(struct ar9170 *ar, unsigned int seq) | ||
98 | { | ||
99 | if (ar->cmd_seq < -1) | ||
100 | return 0; | ||
101 | |||
102 | /* | ||
103 | * Initialize Counter | ||
104 | */ | ||
105 | if (ar->cmd_seq < 0) | ||
106 | ar->cmd_seq = seq; | ||
107 | |||
108 | /* | ||
109 | * The sequence is strictly monotonic increasing and it never skips! | ||
110 | * | ||
111 | * Therefore we can safely assume that whenever we received an | ||
112 | * unexpected sequence we have lost some valuable data. | ||
113 | */ | ||
114 | if (seq != ar->cmd_seq) { | ||
115 | int count; | ||
116 | |||
117 | count = (seq - ar->cmd_seq) % ar->fw.cmd_bufs; | ||
118 | |||
119 | wiphy_err(ar->hw->wiphy, "lost %d command responses/traps! " | ||
120 | "w:%d g:%d\n", count, ar->cmd_seq, seq); | ||
121 | |||
122 | carl9170_restart(ar, CARL9170_RR_LOST_RSP); | ||
123 | return -EIO; | ||
124 | } | ||
125 | |||
126 | ar->cmd_seq = (ar->cmd_seq + 1) % ar->fw.cmd_bufs; | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer) | ||
131 | { | ||
132 | /* | ||
133 | * Some commands may have a variable response length | ||
134 | * and we cannot predict the correct length in advance. | ||
135 | * So we only check if we provided enough space for the data. | ||
136 | */ | ||
137 | if (unlikely(ar->readlen != (len - 4))) { | ||
138 | dev_warn(&ar->udev->dev, "received invalid command response:" | ||
139 | "got %d, instead of %d\n", len - 4, ar->readlen); | ||
140 | print_hex_dump_bytes("carl9170 cmd:", DUMP_PREFIX_OFFSET, | ||
141 | ar->cmd_buf, (ar->cmd.hdr.len + 4) & 0x3f); | ||
142 | print_hex_dump_bytes("carl9170 rsp:", DUMP_PREFIX_OFFSET, | ||
143 | buffer, len); | ||
144 | /* | ||
145 | * Do not complete. The command times out, | ||
146 | * and we get a stack trace from there. | ||
147 | */ | ||
148 | carl9170_restart(ar, CARL9170_RR_INVALID_RSP); | ||
149 | } | ||
150 | |||
151 | spin_lock(&ar->cmd_lock); | ||
152 | if (ar->readbuf) { | ||
153 | if (len >= 4) | ||
154 | memcpy(ar->readbuf, buffer + 4, len - 4); | ||
155 | |||
156 | ar->readbuf = NULL; | ||
157 | } | ||
158 | complete(&ar->cmd_wait); | ||
159 | spin_unlock(&ar->cmd_lock); | ||
160 | } | ||
161 | |||
162 | void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len) | ||
163 | { | ||
164 | struct carl9170_rsp *cmd = (void *) buf; | ||
165 | struct ieee80211_vif *vif; | ||
166 | |||
167 | if (carl9170_check_sequence(ar, cmd->hdr.seq)) | ||
168 | return; | ||
169 | |||
170 | if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) { | ||
171 | if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG)) | ||
172 | carl9170_cmd_callback(ar, len, buf); | ||
173 | |||
174 | return; | ||
175 | } | ||
176 | |||
177 | if (unlikely(cmd->hdr.len != (len - 4))) { | ||
178 | if (net_ratelimit()) { | ||
179 | wiphy_err(ar->hw->wiphy, "FW: received over-/under" | ||
180 | "sized event %x (%d, but should be %d).\n", | ||
181 | cmd->hdr.cmd, cmd->hdr.len, len - 4); | ||
182 | |||
183 | print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, | ||
184 | buf, len); | ||
185 | } | ||
186 | |||
187 | return; | ||
188 | } | ||
189 | |||
190 | /* hardware event handlers */ | ||
191 | switch (cmd->hdr.cmd) { | ||
192 | case CARL9170_RSP_PRETBTT: | ||
193 | /* pre-TBTT event */ | ||
194 | rcu_read_lock(); | ||
195 | vif = carl9170_get_main_vif(ar); | ||
196 | |||
197 | if (!vif) { | ||
198 | rcu_read_unlock(); | ||
199 | break; | ||
200 | } | ||
201 | |||
202 | switch (vif->type) { | ||
203 | case NL80211_IFTYPE_STATION: | ||
204 | carl9170_handle_ps(ar, cmd); | ||
205 | break; | ||
206 | |||
207 | case NL80211_IFTYPE_AP: | ||
208 | case NL80211_IFTYPE_ADHOC: | ||
209 | carl9170_update_beacon(ar, true); | ||
210 | break; | ||
211 | |||
212 | default: | ||
213 | break; | ||
214 | } | ||
215 | rcu_read_unlock(); | ||
216 | |||
217 | break; | ||
218 | |||
219 | |||
220 | case CARL9170_RSP_TXCOMP: | ||
221 | /* TX status notification */ | ||
222 | carl9170_tx_process_status(ar, cmd); | ||
223 | break; | ||
224 | |||
225 | case CARL9170_RSP_BEACON_CONFIG: | ||
226 | /* | ||
227 | * (IBSS) beacon send notification | ||
228 | * bytes: 04 c2 XX YY B4 B3 B2 B1 | ||
229 | * | ||
230 | * XX always 80 | ||
231 | * YY always 00 | ||
232 | * B1-B4 "should" be the number of send out beacons. | ||
233 | */ | ||
234 | break; | ||
235 | |||
236 | case CARL9170_RSP_ATIM: | ||
237 | /* End of Atim Window */ | ||
238 | break; | ||
239 | |||
240 | case CARL9170_RSP_WATCHDOG: | ||
241 | /* Watchdog Interrupt */ | ||
242 | carl9170_restart(ar, CARL9170_RR_WATCHDOG); | ||
243 | break; | ||
244 | |||
245 | case CARL9170_RSP_TEXT: | ||
246 | /* firmware debug */ | ||
247 | carl9170_dbg_message(ar, (char *)buf + 4, len - 4); | ||
248 | break; | ||
249 | |||
250 | case CARL9170_RSP_HEXDUMP: | ||
251 | wiphy_dbg(ar->hw->wiphy, "FW: HD %d\n", len - 4); | ||
252 | print_hex_dump_bytes("FW:", DUMP_PREFIX_NONE, | ||
253 | (char *)buf + 4, len - 4); | ||
254 | break; | ||
255 | |||
256 | case CARL9170_RSP_RADAR: | ||
257 | if (!net_ratelimit()) | ||
258 | break; | ||
259 | |||
260 | wiphy_info(ar->hw->wiphy, "FW: RADAR! Please report this " | ||
261 | "incident to linux-wireless@vger.kernel.org !\n"); | ||
262 | break; | ||
263 | |||
264 | case CARL9170_RSP_GPIO: | ||
265 | #ifdef CONFIG_CARL9170_WPC | ||
266 | if (ar->wps.pbc) { | ||
267 | bool state = !!(cmd->gpio.gpio & cpu_to_le32( | ||
268 | AR9170_GPIO_PORT_WPS_BUTTON_PRESSED)); | ||
269 | |||
270 | if (state != ar->wps.pbc_state) { | ||
271 | ar->wps.pbc_state = state; | ||
272 | input_report_key(ar->wps.pbc, KEY_WPS_BUTTON, | ||
273 | state); | ||
274 | input_sync(ar->wps.pbc); | ||
275 | } | ||
276 | } | ||
277 | #endif /* CONFIG_CARL9170_WPC */ | ||
278 | break; | ||
279 | |||
280 | case CARL9170_RSP_BOOT: | ||
281 | complete(&ar->fw_boot_wait); | ||
282 | break; | ||
283 | |||
284 | default: | ||
285 | wiphy_err(ar->hw->wiphy, "FW: received unhandled event %x\n", | ||
286 | cmd->hdr.cmd); | ||
287 | print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len); | ||
288 | break; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | static int carl9170_rx_mac_status(struct ar9170 *ar, | ||
293 | struct ar9170_rx_head *head, struct ar9170_rx_macstatus *mac, | ||
294 | struct ieee80211_rx_status *status) | ||
295 | { | ||
296 | struct ieee80211_channel *chan; | ||
297 | u8 error, decrypt; | ||
298 | |||
299 | BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12); | ||
300 | BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4); | ||
301 | |||
302 | error = mac->error; | ||
303 | |||
304 | if (error & AR9170_RX_ERROR_WRONG_RA) { | ||
305 | if (!ar->sniffer_enabled) | ||
306 | return -EINVAL; | ||
307 | } | ||
308 | |||
309 | if (error & AR9170_RX_ERROR_PLCP) { | ||
310 | if (!(ar->filter_state & FIF_PLCPFAIL)) | ||
311 | return -EINVAL; | ||
312 | |||
313 | status->flag |= RX_FLAG_FAILED_PLCP_CRC; | ||
314 | } | ||
315 | |||
316 | if (error & AR9170_RX_ERROR_FCS) { | ||
317 | ar->tx_fcs_errors++; | ||
318 | |||
319 | if (!(ar->filter_state & FIF_FCSFAIL)) | ||
320 | return -EINVAL; | ||
321 | |||
322 | status->flag |= RX_FLAG_FAILED_FCS_CRC; | ||
323 | } | ||
324 | |||
325 | decrypt = ar9170_get_decrypt_type(mac); | ||
326 | if (!(decrypt & AR9170_RX_ENC_SOFTWARE) && | ||
327 | decrypt != AR9170_ENC_ALG_NONE) { | ||
328 | if ((decrypt == AR9170_ENC_ALG_TKIP) && | ||
329 | (error & AR9170_RX_ERROR_MMIC)) | ||
330 | status->flag |= RX_FLAG_MMIC_ERROR; | ||
331 | |||
332 | status->flag |= RX_FLAG_DECRYPTED; | ||
333 | } | ||
334 | |||
335 | if (error & AR9170_RX_ERROR_DECRYPT && !ar->sniffer_enabled) | ||
336 | return -ENODATA; | ||
337 | |||
338 | error &= ~(AR9170_RX_ERROR_MMIC | | ||
339 | AR9170_RX_ERROR_FCS | | ||
340 | AR9170_RX_ERROR_WRONG_RA | | ||
341 | AR9170_RX_ERROR_DECRYPT | | ||
342 | AR9170_RX_ERROR_PLCP); | ||
343 | |||
344 | /* drop any other error frames */ | ||
345 | if (unlikely(error)) { | ||
346 | /* TODO: update netdevice's RX dropped/errors statistics */ | ||
347 | |||
348 | if (net_ratelimit()) | ||
349 | wiphy_dbg(ar->hw->wiphy, "received frame with " | ||
350 | "suspicious error code (%#x).\n", error); | ||
351 | |||
352 | return -EINVAL; | ||
353 | } | ||
354 | |||
355 | chan = ar->channel; | ||
356 | if (chan) { | ||
357 | status->band = chan->band; | ||
358 | status->freq = chan->center_freq; | ||
359 | } | ||
360 | |||
361 | switch (mac->status & AR9170_RX_STATUS_MODULATION) { | ||
362 | case AR9170_RX_STATUS_MODULATION_CCK: | ||
363 | if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE) | ||
364 | status->flag |= RX_FLAG_SHORTPRE; | ||
365 | switch (head->plcp[0]) { | ||
366 | case AR9170_RX_PHY_RATE_CCK_1M: | ||
367 | status->rate_idx = 0; | ||
368 | break; | ||
369 | case AR9170_RX_PHY_RATE_CCK_2M: | ||
370 | status->rate_idx = 1; | ||
371 | break; | ||
372 | case AR9170_RX_PHY_RATE_CCK_5M: | ||
373 | status->rate_idx = 2; | ||
374 | break; | ||
375 | case AR9170_RX_PHY_RATE_CCK_11M: | ||
376 | status->rate_idx = 3; | ||
377 | break; | ||
378 | default: | ||
379 | if (net_ratelimit()) { | ||
380 | wiphy_err(ar->hw->wiphy, "invalid plcp cck " | ||
381 | "rate (%x).\n", head->plcp[0]); | ||
382 | } | ||
383 | |||
384 | return -EINVAL; | ||
385 | } | ||
386 | break; | ||
387 | |||
388 | case AR9170_RX_STATUS_MODULATION_DUPOFDM: | ||
389 | case AR9170_RX_STATUS_MODULATION_OFDM: | ||
390 | switch (head->plcp[0] & 0xf) { | ||
391 | case AR9170_TXRX_PHY_RATE_OFDM_6M: | ||
392 | status->rate_idx = 0; | ||
393 | break; | ||
394 | case AR9170_TXRX_PHY_RATE_OFDM_9M: | ||
395 | status->rate_idx = 1; | ||
396 | break; | ||
397 | case AR9170_TXRX_PHY_RATE_OFDM_12M: | ||
398 | status->rate_idx = 2; | ||
399 | break; | ||
400 | case AR9170_TXRX_PHY_RATE_OFDM_18M: | ||
401 | status->rate_idx = 3; | ||
402 | break; | ||
403 | case AR9170_TXRX_PHY_RATE_OFDM_24M: | ||
404 | status->rate_idx = 4; | ||
405 | break; | ||
406 | case AR9170_TXRX_PHY_RATE_OFDM_36M: | ||
407 | status->rate_idx = 5; | ||
408 | break; | ||
409 | case AR9170_TXRX_PHY_RATE_OFDM_48M: | ||
410 | status->rate_idx = 6; | ||
411 | break; | ||
412 | case AR9170_TXRX_PHY_RATE_OFDM_54M: | ||
413 | status->rate_idx = 7; | ||
414 | break; | ||
415 | default: | ||
416 | if (net_ratelimit()) { | ||
417 | wiphy_err(ar->hw->wiphy, "invalid plcp ofdm " | ||
418 | "rate (%x).\n", head->plcp[0]); | ||
419 | } | ||
420 | |||
421 | return -EINVAL; | ||
422 | } | ||
423 | if (status->band == IEEE80211_BAND_2GHZ) | ||
424 | status->rate_idx += 4; | ||
425 | break; | ||
426 | |||
427 | case AR9170_RX_STATUS_MODULATION_HT: | ||
428 | if (head->plcp[3] & 0x80) | ||
429 | status->flag |= RX_FLAG_40MHZ; | ||
430 | if (head->plcp[6] & 0x80) | ||
431 | status->flag |= RX_FLAG_SHORT_GI; | ||
432 | |||
433 | status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f); | ||
434 | status->flag |= RX_FLAG_HT; | ||
435 | break; | ||
436 | |||
437 | default: | ||
438 | BUG(); | ||
439 | return -ENOSYS; | ||
440 | } | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static void carl9170_rx_phy_status(struct ar9170 *ar, | ||
446 | struct ar9170_rx_phystatus *phy, struct ieee80211_rx_status *status) | ||
447 | { | ||
448 | int i; | ||
449 | |||
450 | BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20); | ||
451 | |||
452 | for (i = 0; i < 3; i++) | ||
453 | if (phy->rssi[i] != 0x80) | ||
454 | status->antenna |= BIT(i); | ||
455 | |||
456 | /* post-process RSSI */ | ||
457 | for (i = 0; i < 7; i++) | ||
458 | if (phy->rssi[i] & 0x80) | ||
459 | phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f; | ||
460 | |||
461 | /* TODO: we could do something with phy_errors */ | ||
462 | status->signal = ar->noise[0] + phy->rssi_combined; | ||
463 | } | ||
464 | |||
465 | static struct sk_buff *carl9170_rx_copy_data(u8 *buf, int len) | ||
466 | { | ||
467 | struct sk_buff *skb; | ||
468 | int reserved = 0; | ||
469 | struct ieee80211_hdr *hdr = (void *) buf; | ||
470 | |||
471 | if (ieee80211_is_data_qos(hdr->frame_control)) { | ||
472 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
473 | reserved += NET_IP_ALIGN; | ||
474 | |||
475 | if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) | ||
476 | reserved += NET_IP_ALIGN; | ||
477 | } | ||
478 | |||
479 | if (ieee80211_has_a4(hdr->frame_control)) | ||
480 | reserved += NET_IP_ALIGN; | ||
481 | |||
482 | reserved = 32 + (reserved & NET_IP_ALIGN); | ||
483 | |||
484 | skb = dev_alloc_skb(len + reserved); | ||
485 | if (likely(skb)) { | ||
486 | skb_reserve(skb, reserved); | ||
487 | memcpy(skb_put(skb, len), buf, len); | ||
488 | } | ||
489 | |||
490 | return skb; | ||
491 | } | ||
492 | |||
493 | static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie) | ||
494 | { | ||
495 | struct ieee80211_mgmt *mgmt = (void *)data; | ||
496 | u8 *pos, *end; | ||
497 | |||
498 | pos = (u8 *)mgmt->u.beacon.variable; | ||
499 | end = data + len; | ||
500 | while (pos < end) { | ||
501 | if (pos + 2 + pos[1] > end) | ||
502 | return NULL; | ||
503 | |||
504 | if (pos[0] == ie) | ||
505 | return pos; | ||
506 | |||
507 | pos += 2 + pos[1]; | ||
508 | } | ||
509 | return NULL; | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * NOTE: | ||
514 | * | ||
515 | * The firmware is in charge of waking up the device just before | ||
516 | * the AP is expected to transmit the next beacon. | ||
517 | * | ||
518 | * This leaves the driver with the important task of deciding when | ||
519 | * to set the PHY back to bed again. | ||
520 | */ | ||
521 | static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len) | ||
522 | { | ||
523 | struct ieee80211_hdr *hdr = (void *) data; | ||
524 | struct ieee80211_tim_ie *tim_ie; | ||
525 | u8 *tim; | ||
526 | u8 tim_len; | ||
527 | bool cam; | ||
528 | |||
529 | if (likely(!(ar->hw->conf.flags & IEEE80211_CONF_PS))) | ||
530 | return; | ||
531 | |||
532 | /* check if this really is a beacon */ | ||
533 | if (!ieee80211_is_beacon(hdr->frame_control)) | ||
534 | return; | ||
535 | |||
536 | /* min. beacon length + FCS_LEN */ | ||
537 | if (len <= 40 + FCS_LEN) | ||
538 | return; | ||
539 | |||
540 | /* and only beacons from the associated BSSID, please */ | ||
541 | if (compare_ether_addr(hdr->addr3, ar->common.curbssid) || | ||
542 | !ar->common.curaid) | ||
543 | return; | ||
544 | |||
545 | ar->ps.last_beacon = jiffies; | ||
546 | |||
547 | tim = carl9170_find_ie(data, len - FCS_LEN, WLAN_EID_TIM); | ||
548 | if (!tim) | ||
549 | return; | ||
550 | |||
551 | if (tim[1] < sizeof(*tim_ie)) | ||
552 | return; | ||
553 | |||
554 | tim_len = tim[1]; | ||
555 | tim_ie = (struct ieee80211_tim_ie *) &tim[2]; | ||
556 | |||
557 | if (!WARN_ON_ONCE(!ar->hw->conf.ps_dtim_period)) | ||
558 | ar->ps.dtim_counter = (tim_ie->dtim_count - 1) % | ||
559 | ar->hw->conf.ps_dtim_period; | ||
560 | |||
561 | /* Check whenever the PHY can be turned off again. */ | ||
562 | |||
563 | /* 1. What about buffered unicast traffic for our AID? */ | ||
564 | cam = ieee80211_check_tim(tim_ie, tim_len, ar->common.curaid); | ||
565 | |||
566 | /* 2. Maybe the AP wants to send multicast/broadcast data? */ | ||
567 | cam = !!(tim_ie->bitmap_ctrl & 0x01); | ||
568 | |||
569 | if (!cam) { | ||
570 | /* back to low-power land. */ | ||
571 | ar->ps.off_override &= ~PS_OFF_BCN; | ||
572 | carl9170_ps_check(ar); | ||
573 | } else { | ||
574 | /* force CAM */ | ||
575 | ar->ps.off_override |= PS_OFF_BCN; | ||
576 | } | ||
577 | } | ||
578 | |||
579 | /* | ||
580 | * If the frame alignment is right (or the kernel has | ||
581 | * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there | ||
582 | * is only a single MPDU in the USB frame, then we could | ||
583 | * submit to mac80211 the SKB directly. However, since | ||
584 | * there may be multiple packets in one SKB in stream | ||
585 | * mode, and we need to observe the proper ordering, | ||
586 | * this is non-trivial. | ||
587 | */ | ||
588 | |||
589 | static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len) | ||
590 | { | ||
591 | struct ar9170_rx_head *head; | ||
592 | struct ar9170_rx_macstatus *mac; | ||
593 | struct ar9170_rx_phystatus *phy = NULL; | ||
594 | struct ieee80211_rx_status status; | ||
595 | struct sk_buff *skb; | ||
596 | int mpdu_len; | ||
597 | |||
598 | if (!IS_STARTED(ar)) | ||
599 | return; | ||
600 | |||
601 | if (unlikely(len < sizeof(*mac))) { | ||
602 | ar->rx_dropped++; | ||
603 | return; | ||
604 | } | ||
605 | |||
606 | mpdu_len = len - sizeof(*mac); | ||
607 | |||
608 | mac = (void *)(buf + mpdu_len); | ||
609 | if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) { | ||
610 | ar->rx_dropped++; | ||
611 | return; | ||
612 | } | ||
613 | |||
614 | switch (mac->status & AR9170_RX_STATUS_MPDU) { | ||
615 | case AR9170_RX_STATUS_MPDU_FIRST: | ||
616 | /* Aggregated MPDUs start with an PLCP header */ | ||
617 | if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) { | ||
618 | head = (void *) buf; | ||
619 | |||
620 | /* | ||
621 | * The PLCP header needs to be cached for the | ||
622 | * following MIDDLE + LAST A-MPDU packets. | ||
623 | * | ||
624 | * So, if you are wondering why all frames seem | ||
625 | * to share a common RX status information, | ||
626 | * then you have the answer right here... | ||
627 | */ | ||
628 | memcpy(&ar->rx_plcp, (void *) buf, | ||
629 | sizeof(struct ar9170_rx_head)); | ||
630 | |||
631 | mpdu_len -= sizeof(struct ar9170_rx_head); | ||
632 | buf += sizeof(struct ar9170_rx_head); | ||
633 | |||
634 | ar->rx_has_plcp = true; | ||
635 | } else { | ||
636 | if (net_ratelimit()) { | ||
637 | wiphy_err(ar->hw->wiphy, "plcp info " | ||
638 | "is clipped.\n"); | ||
639 | } | ||
640 | |||
641 | ar->rx_dropped++; | ||
642 | return; | ||
643 | } | ||
644 | break; | ||
645 | |||
646 | case AR9170_RX_STATUS_MPDU_LAST: | ||
647 | /* | ||
648 | * The last frame of an A-MPDU has an extra tail | ||
649 | * which does contain the phy status of the whole | ||
650 | * aggregate. | ||
651 | */ | ||
652 | |||
653 | if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) { | ||
654 | mpdu_len -= sizeof(struct ar9170_rx_phystatus); | ||
655 | phy = (void *)(buf + mpdu_len); | ||
656 | } else { | ||
657 | if (net_ratelimit()) { | ||
658 | wiphy_err(ar->hw->wiphy, "frame tail " | ||
659 | "is clipped.\n"); | ||
660 | } | ||
661 | |||
662 | ar->rx_dropped++; | ||
663 | return; | ||
664 | } | ||
665 | |||
666 | case AR9170_RX_STATUS_MPDU_MIDDLE: | ||
667 | /* These are just data + mac status */ | ||
668 | if (unlikely(!ar->rx_has_plcp)) { | ||
669 | if (!net_ratelimit()) | ||
670 | return; | ||
671 | |||
672 | wiphy_err(ar->hw->wiphy, "rx stream does not start " | ||
673 | "with a first_mpdu frame tag.\n"); | ||
674 | |||
675 | ar->rx_dropped++; | ||
676 | return; | ||
677 | } | ||
678 | |||
679 | head = &ar->rx_plcp; | ||
680 | break; | ||
681 | |||
682 | case AR9170_RX_STATUS_MPDU_SINGLE: | ||
683 | /* single mpdu has both: plcp (head) and phy status (tail) */ | ||
684 | head = (void *) buf; | ||
685 | |||
686 | mpdu_len -= sizeof(struct ar9170_rx_head); | ||
687 | mpdu_len -= sizeof(struct ar9170_rx_phystatus); | ||
688 | |||
689 | buf += sizeof(struct ar9170_rx_head); | ||
690 | phy = (void *)(buf + mpdu_len); | ||
691 | break; | ||
692 | |||
693 | default: | ||
694 | BUG_ON(1); | ||
695 | break; | ||
696 | } | ||
697 | |||
698 | /* FC + DU + RA + FCS */ | ||
699 | if (unlikely(mpdu_len < (2 + 2 + 6 + FCS_LEN))) { | ||
700 | ar->rx_dropped++; | ||
701 | return; | ||
702 | } | ||
703 | |||
704 | memset(&status, 0, sizeof(status)); | ||
705 | if (unlikely(carl9170_rx_mac_status(ar, head, mac, &status))) { | ||
706 | ar->rx_dropped++; | ||
707 | return; | ||
708 | } | ||
709 | |||
710 | if (phy) | ||
711 | carl9170_rx_phy_status(ar, phy, &status); | ||
712 | |||
713 | carl9170_ps_beacon(ar, buf, mpdu_len); | ||
714 | |||
715 | skb = carl9170_rx_copy_data(buf, mpdu_len); | ||
716 | if (likely(skb)) { | ||
717 | memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); | ||
718 | ieee80211_rx(ar->hw, skb); | ||
719 | } else { | ||
720 | ar->rx_dropped++; | ||
721 | } | ||
722 | } | ||
723 | |||
724 | static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf, | ||
725 | const unsigned int resplen) | ||
726 | { | ||
727 | struct carl9170_rsp *cmd; | ||
728 | int i = 0; | ||
729 | |||
730 | while (i < resplen) { | ||
731 | cmd = (void *) &respbuf[i]; | ||
732 | |||
733 | i += cmd->hdr.len + 4; | ||
734 | if (unlikely(i > resplen)) | ||
735 | break; | ||
736 | |||
737 | carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4); | ||
738 | } | ||
739 | |||
740 | if (unlikely(i != resplen)) { | ||
741 | if (!net_ratelimit()) | ||
742 | return; | ||
743 | |||
744 | wiphy_err(ar->hw->wiphy, "malformed firmware trap:\n"); | ||
745 | print_hex_dump_bytes("rxcmd:", DUMP_PREFIX_OFFSET, | ||
746 | respbuf, resplen); | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len) | ||
751 | { | ||
752 | unsigned int i = 0; | ||
753 | |||
754 | /* weird thing, but this is the same in the original driver */ | ||
755 | while (len > 2 && i < 12 && buf[0] == 0xff && buf[1] == 0xff) { | ||
756 | i += 2; | ||
757 | len -= 2; | ||
758 | buf += 2; | ||
759 | } | ||
760 | |||
761 | if (unlikely(len < 4)) | ||
762 | return; | ||
763 | |||
764 | /* found the 6 * 0xffff marker? */ | ||
765 | if (i == 12) | ||
766 | carl9170_rx_untie_cmds(ar, buf, len); | ||
767 | else | ||
768 | carl9170_handle_mpdu(ar, buf, len); | ||
769 | } | ||
770 | |||
771 | static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len) | ||
772 | { | ||
773 | unsigned int tlen, wlen = 0, clen = 0; | ||
774 | struct ar9170_stream *rx_stream; | ||
775 | u8 *tbuf; | ||
776 | |||
777 | tbuf = buf; | ||
778 | tlen = len; | ||
779 | |||
780 | while (tlen >= 4) { | ||
781 | rx_stream = (void *) tbuf; | ||
782 | clen = le16_to_cpu(rx_stream->length); | ||
783 | wlen = ALIGN(clen, 4); | ||
784 | |||
785 | /* check if this is stream has a valid tag.*/ | ||
786 | if (rx_stream->tag != cpu_to_le16(AR9170_RX_STREAM_TAG)) { | ||
787 | /* | ||
788 | * TODO: handle the highly unlikely event that the | ||
789 | * corrupted stream has the TAG at the right position. | ||
790 | */ | ||
791 | |||
792 | /* check if the frame can be repaired. */ | ||
793 | if (!ar->rx_failover_missing) { | ||
794 | |||
795 | /* this is not "short read". */ | ||
796 | if (net_ratelimit()) { | ||
797 | wiphy_err(ar->hw->wiphy, | ||
798 | "missing tag!\n"); | ||
799 | } | ||
800 | |||
801 | __carl9170_rx(ar, tbuf, tlen); | ||
802 | return; | ||
803 | } | ||
804 | |||
805 | if (ar->rx_failover_missing > tlen) { | ||
806 | if (net_ratelimit()) { | ||
807 | wiphy_err(ar->hw->wiphy, | ||
808 | "possible multi " | ||
809 | "stream corruption!\n"); | ||
810 | goto err_telluser; | ||
811 | } else { | ||
812 | goto err_silent; | ||
813 | } | ||
814 | } | ||
815 | |||
816 | memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen); | ||
817 | ar->rx_failover_missing -= tlen; | ||
818 | |||
819 | if (ar->rx_failover_missing <= 0) { | ||
820 | /* | ||
821 | * nested carl9170_rx_stream call! | ||
822 | * | ||
823 | * termination is guranteed, even when the | ||
824 | * combined frame also have an element with | ||
825 | * a bad tag. | ||
826 | */ | ||
827 | |||
828 | ar->rx_failover_missing = 0; | ||
829 | carl9170_rx_stream(ar, ar->rx_failover->data, | ||
830 | ar->rx_failover->len); | ||
831 | |||
832 | skb_reset_tail_pointer(ar->rx_failover); | ||
833 | skb_trim(ar->rx_failover, 0); | ||
834 | } | ||
835 | |||
836 | return; | ||
837 | } | ||
838 | |||
839 | /* check if stream is clipped */ | ||
840 | if (wlen > tlen - 4) { | ||
841 | if (ar->rx_failover_missing) { | ||
842 | /* TODO: handle double stream corruption. */ | ||
843 | if (net_ratelimit()) { | ||
844 | wiphy_err(ar->hw->wiphy, "double rx " | ||
845 | "stream corruption!\n"); | ||
846 | goto err_telluser; | ||
847 | } else { | ||
848 | goto err_silent; | ||
849 | } | ||
850 | } | ||
851 | |||
852 | /* | ||
853 | * save incomplete data set. | ||
854 | * the firmware will resend the missing bits when | ||
855 | * the rx - descriptor comes round again. | ||
856 | */ | ||
857 | |||
858 | memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen); | ||
859 | ar->rx_failover_missing = clen - tlen; | ||
860 | return; | ||
861 | } | ||
862 | __carl9170_rx(ar, rx_stream->payload, clen); | ||
863 | |||
864 | tbuf += wlen + 4; | ||
865 | tlen -= wlen + 4; | ||
866 | } | ||
867 | |||
868 | if (tlen) { | ||
869 | if (net_ratelimit()) { | ||
870 | wiphy_err(ar->hw->wiphy, "%d bytes of unprocessed " | ||
871 | "data left in rx stream!\n", tlen); | ||
872 | } | ||
873 | |||
874 | goto err_telluser; | ||
875 | } | ||
876 | |||
877 | return; | ||
878 | |||
879 | err_telluser: | ||
880 | wiphy_err(ar->hw->wiphy, "damaged RX stream data [want:%d, " | ||
881 | "data:%d, rx:%d, pending:%d ]\n", clen, wlen, tlen, | ||
882 | ar->rx_failover_missing); | ||
883 | |||
884 | if (ar->rx_failover_missing) | ||
885 | print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET, | ||
886 | ar->rx_failover->data, | ||
887 | ar->rx_failover->len); | ||
888 | |||
889 | print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET, | ||
890 | buf, len); | ||
891 | |||
892 | wiphy_err(ar->hw->wiphy, "please check your hardware and cables, if " | ||
893 | "you see this message frequently.\n"); | ||
894 | |||
895 | err_silent: | ||
896 | if (ar->rx_failover_missing) { | ||
897 | skb_reset_tail_pointer(ar->rx_failover); | ||
898 | skb_trim(ar->rx_failover, 0); | ||
899 | ar->rx_failover_missing = 0; | ||
900 | } | ||
901 | } | ||
902 | |||
903 | void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len) | ||
904 | { | ||
905 | if (ar->fw.rx_stream) | ||
906 | carl9170_rx_stream(ar, buf, len); | ||
907 | else | ||
908 | __carl9170_rx(ar, buf, len); | ||
909 | } | ||
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c new file mode 100644 index 000000000000..e0d2374e0c77 --- /dev/null +++ b/drivers/net/wireless/ath/carl9170/tx.c | |||
@@ -0,0 +1,1373 @@ | |||
1 | /* | ||
2 | * Atheros CARL9170 driver | ||
3 | * | ||
4 | * 802.11 xmit & status routines | ||
5 | * | ||
6 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
7 | * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; see the file COPYING. If not, see | ||
21 | * http://www.gnu.org/licenses/. | ||
22 | * | ||
23 | * This file incorporates work covered by the following copyright and | ||
24 | * permission notice: | ||
25 | * Copyright (c) 2007-2008 Atheros Communications, Inc. | ||
26 | * | ||
27 | * Permission to use, copy, modify, and/or distribute this software for any | ||
28 | * purpose with or without fee is hereby granted, provided that the above | ||
29 | * copyright notice and this permission notice appear in all copies. | ||
30 | * | ||
31 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
32 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
33 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
34 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
35 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
36 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
37 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
38 | */ | ||
39 | |||
40 | #include <linux/init.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/module.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <net/mac80211.h> | ||
45 | #include "carl9170.h" | ||
46 | #include "hw.h" | ||
47 | #include "cmd.h" | ||
48 | |||
49 | static inline unsigned int __carl9170_get_queue(struct ar9170 *ar, | ||
50 | unsigned int queue) | ||
51 | { | ||
52 | if (unlikely(modparam_noht)) { | ||
53 | return queue; | ||
54 | } else { | ||
55 | /* | ||
56 | * This is just another workaround, until | ||
57 | * someone figures out how to get QoS and | ||
58 | * AMPDU to play nicely together. | ||
59 | */ | ||
60 | |||
61 | return 2; /* AC_BE */ | ||
62 | } | ||
63 | } | ||
64 | |||
65 | static inline unsigned int carl9170_get_queue(struct ar9170 *ar, | ||
66 | struct sk_buff *skb) | ||
67 | { | ||
68 | return __carl9170_get_queue(ar, skb_get_queue_mapping(skb)); | ||
69 | } | ||
70 | |||
71 | static bool is_mem_full(struct ar9170 *ar) | ||
72 | { | ||
73 | return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) > | ||
74 | atomic_read(&ar->mem_free_blocks)); | ||
75 | } | ||
76 | |||
77 | static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) | ||
78 | { | ||
79 | int queue, i; | ||
80 | bool mem_full; | ||
81 | |||
82 | atomic_inc(&ar->tx_total_queued); | ||
83 | |||
84 | queue = skb_get_queue_mapping(skb); | ||
85 | spin_lock_bh(&ar->tx_stats_lock); | ||
86 | |||
87 | /* | ||
88 | * The driver has to accept the frame, regardless if the queue is | ||
89 | * full to the brim, or not. We have to do the queuing internally, | ||
90 | * since mac80211 assumes that a driver which can operate with | ||
91 | * aggregated frames does not reject frames for this reason. | ||
92 | */ | ||
93 | ar->tx_stats[queue].len++; | ||
94 | ar->tx_stats[queue].count++; | ||
95 | |||
96 | mem_full = is_mem_full(ar); | ||
97 | for (i = 0; i < ar->hw->queues; i++) { | ||
98 | if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) { | ||
99 | ieee80211_stop_queue(ar->hw, i); | ||
100 | ar->queue_stop_timeout[i] = jiffies; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | spin_unlock_bh(&ar->tx_stats_lock); | ||
105 | } | ||
106 | |||
107 | static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) | ||
108 | { | ||
109 | struct ieee80211_tx_info *txinfo; | ||
110 | int queue; | ||
111 | |||
112 | txinfo = IEEE80211_SKB_CB(skb); | ||
113 | queue = skb_get_queue_mapping(skb); | ||
114 | |||
115 | spin_lock_bh(&ar->tx_stats_lock); | ||
116 | |||
117 | ar->tx_stats[queue].len--; | ||
118 | |||
119 | if (!is_mem_full(ar)) { | ||
120 | unsigned int i; | ||
121 | for (i = 0; i < ar->hw->queues; i++) { | ||
122 | if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT) | ||
123 | continue; | ||
124 | |||
125 | if (ieee80211_queue_stopped(ar->hw, i)) { | ||
126 | unsigned long tmp; | ||
127 | |||
128 | tmp = jiffies - ar->queue_stop_timeout[i]; | ||
129 | if (tmp > ar->max_queue_stop_timeout[i]) | ||
130 | ar->max_queue_stop_timeout[i] = tmp; | ||
131 | } | ||
132 | |||
133 | ieee80211_wake_queue(ar->hw, i); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | spin_unlock_bh(&ar->tx_stats_lock); | ||
138 | if (atomic_dec_and_test(&ar->tx_total_queued)) | ||
139 | complete(&ar->tx_flush); | ||
140 | } | ||
141 | |||
142 | static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) | ||
143 | { | ||
144 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
145 | unsigned int chunks; | ||
146 | int cookie = -1; | ||
147 | |||
148 | atomic_inc(&ar->mem_allocs); | ||
149 | |||
150 | chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); | ||
151 | if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { | ||
152 | atomic_add(chunks, &ar->mem_free_blocks); | ||
153 | return -ENOSPC; | ||
154 | } | ||
155 | |||
156 | spin_lock_bh(&ar->mem_lock); | ||
157 | cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0); | ||
158 | spin_unlock_bh(&ar->mem_lock); | ||
159 | |||
160 | if (unlikely(cookie < 0)) { | ||
161 | atomic_add(chunks, &ar->mem_free_blocks); | ||
162 | return -ENOSPC; | ||
163 | } | ||
164 | |||
165 | super = (void *) skb->data; | ||
166 | |||
167 | /* | ||
168 | * Cookie #0 serves two special purposes: | ||
169 | * 1. The firmware might use it generate BlockACK frames | ||
170 | * in responds of an incoming BlockAckReqs. | ||
171 | * | ||
172 | * 2. Prevent double-free bugs. | ||
173 | */ | ||
174 | super->s.cookie = (u8) cookie + 1; | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) | ||
179 | { | ||
180 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
181 | int cookie; | ||
182 | |||
183 | /* make a local copy of the cookie */ | ||
184 | cookie = super->s.cookie; | ||
185 | /* invalidate cookie */ | ||
186 | super->s.cookie = 0; | ||
187 | |||
188 | /* | ||
189 | * Do a out-of-bounds check on the cookie: | ||
190 | * | ||
191 | * * cookie "0" is reserved and won't be assigned to any | ||
192 | * out-going frame. Internally however, it is used to | ||
193 | * mark no longer/un-accounted frames and serves as a | ||
194 | * cheap way of preventing frames from being freed | ||
195 | * twice by _accident_. NB: There is a tiny race... | ||
196 | * | ||
197 | * * obviously, cookie number is limited by the amount | ||
198 | * of available memory blocks, so the number can | ||
199 | * never execeed the mem_blocks count. | ||
200 | */ | ||
201 | if (unlikely(WARN_ON_ONCE(cookie == 0) || | ||
202 | WARN_ON_ONCE(cookie > ar->fw.mem_blocks))) | ||
203 | return; | ||
204 | |||
205 | atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), | ||
206 | &ar->mem_free_blocks); | ||
207 | |||
208 | spin_lock_bh(&ar->mem_lock); | ||
209 | bitmap_release_region(ar->mem_bitmap, cookie - 1, 0); | ||
210 | spin_unlock_bh(&ar->mem_lock); | ||
211 | } | ||
212 | |||
213 | /* Called from any context */ | ||
214 | static void carl9170_tx_release(struct kref *ref) | ||
215 | { | ||
216 | struct ar9170 *ar; | ||
217 | struct carl9170_tx_info *arinfo; | ||
218 | struct ieee80211_tx_info *txinfo; | ||
219 | struct sk_buff *skb; | ||
220 | |||
221 | arinfo = container_of(ref, struct carl9170_tx_info, ref); | ||
222 | txinfo = container_of((void *) arinfo, struct ieee80211_tx_info, | ||
223 | rate_driver_data); | ||
224 | skb = container_of((void *) txinfo, struct sk_buff, cb); | ||
225 | |||
226 | ar = arinfo->ar; | ||
227 | if (WARN_ON_ONCE(!ar)) | ||
228 | return; | ||
229 | |||
230 | BUILD_BUG_ON( | ||
231 | offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23); | ||
232 | |||
233 | memset(&txinfo->status.ampdu_ack_len, 0, | ||
234 | sizeof(struct ieee80211_tx_info) - | ||
235 | offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); | ||
236 | |||
237 | if (atomic_read(&ar->tx_total_queued)) | ||
238 | ar->tx_schedule = true; | ||
239 | |||
240 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) { | ||
241 | if (!atomic_read(&ar->tx_ampdu_upload)) | ||
242 | ar->tx_ampdu_schedule = true; | ||
243 | |||
244 | if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) { | ||
245 | txinfo->status.ampdu_len = txinfo->pad[0]; | ||
246 | txinfo->status.ampdu_ack_len = txinfo->pad[1]; | ||
247 | txinfo->pad[0] = txinfo->pad[1] = 0; | ||
248 | } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { | ||
249 | /* | ||
250 | * drop redundant tx_status reports: | ||
251 | * | ||
252 | * 1. ampdu_ack_len of the final tx_status does | ||
253 | * include the feedback of this particular frame. | ||
254 | * | ||
255 | * 2. tx_status_irqsafe only queues up to 128 | ||
256 | * tx feedback reports and discards the rest. | ||
257 | * | ||
258 | * 3. minstrel_ht is picky, it only accepts | ||
259 | * reports of frames with the TX_STATUS_AMPDU flag. | ||
260 | */ | ||
261 | |||
262 | dev_kfree_skb_any(skb); | ||
263 | return; | ||
264 | } else { | ||
265 | /* | ||
266 | * Frame has failed, but we want to keep it in | ||
267 | * case it was lost due to a power-state | ||
268 | * transition. | ||
269 | */ | ||
270 | } | ||
271 | } | ||
272 | |||
273 | skb_pull(skb, sizeof(struct _carl9170_tx_superframe)); | ||
274 | ieee80211_tx_status_irqsafe(ar->hw, skb); | ||
275 | } | ||
276 | |||
277 | void carl9170_tx_get_skb(struct sk_buff *skb) | ||
278 | { | ||
279 | struct carl9170_tx_info *arinfo = (void *) | ||
280 | (IEEE80211_SKB_CB(skb))->rate_driver_data; | ||
281 | kref_get(&arinfo->ref); | ||
282 | } | ||
283 | |||
284 | int carl9170_tx_put_skb(struct sk_buff *skb) | ||
285 | { | ||
286 | struct carl9170_tx_info *arinfo = (void *) | ||
287 | (IEEE80211_SKB_CB(skb))->rate_driver_data; | ||
288 | |||
289 | return kref_put(&arinfo->ref, carl9170_tx_release); | ||
290 | } | ||
291 | |||
292 | /* Caller must hold the tid_info->lock & rcu_read_lock */ | ||
293 | static void carl9170_tx_shift_bm(struct ar9170 *ar, | ||
294 | struct carl9170_sta_tid *tid_info, u16 seq) | ||
295 | { | ||
296 | u16 off; | ||
297 | |||
298 | off = SEQ_DIFF(seq, tid_info->bsn); | ||
299 | |||
300 | if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) | ||
301 | return; | ||
302 | |||
303 | /* | ||
304 | * Sanity check. For each MPDU we set the bit in bitmap and | ||
305 | * clear it once we received the tx_status. | ||
306 | * But if the bit is already cleared then we've been bitten | ||
307 | * by a bug. | ||
308 | */ | ||
309 | WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap)); | ||
310 | |||
311 | off = SEQ_DIFF(tid_info->snx, tid_info->bsn); | ||
312 | if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) | ||
313 | return; | ||
314 | |||
315 | if (!bitmap_empty(tid_info->bitmap, off)) | ||
316 | off = find_first_bit(tid_info->bitmap, off); | ||
317 | |||
318 | tid_info->bsn += off; | ||
319 | tid_info->bsn &= 0x0fff; | ||
320 | |||
321 | bitmap_shift_right(tid_info->bitmap, tid_info->bitmap, | ||
322 | off, CARL9170_BAW_BITS); | ||
323 | } | ||
324 | |||
325 | static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, | ||
326 | struct sk_buff *skb, struct ieee80211_tx_info *txinfo) | ||
327 | { | ||
328 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
329 | struct ieee80211_hdr *hdr = (void *) super->frame_data; | ||
330 | struct ieee80211_tx_info *tx_info; | ||
331 | struct carl9170_tx_info *ar_info; | ||
332 | struct carl9170_sta_info *sta_info; | ||
333 | struct ieee80211_sta *sta; | ||
334 | struct carl9170_sta_tid *tid_info; | ||
335 | struct ieee80211_vif *vif; | ||
336 | unsigned int vif_id; | ||
337 | u8 tid; | ||
338 | |||
339 | if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || | ||
340 | txinfo->flags & IEEE80211_TX_CTL_INJECTED) | ||
341 | return; | ||
342 | |||
343 | tx_info = IEEE80211_SKB_CB(skb); | ||
344 | ar_info = (void *) tx_info->rate_driver_data; | ||
345 | |||
346 | vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >> | ||
347 | CARL9170_TX_SUPER_MISC_VIF_ID_S; | ||
348 | |||
349 | if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC)) | ||
350 | return; | ||
351 | |||
352 | rcu_read_lock(); | ||
353 | vif = rcu_dereference(ar->vif_priv[vif_id].vif); | ||
354 | if (unlikely(!vif)) | ||
355 | goto out_rcu; | ||
356 | |||
357 | /* | ||
358 | * Normally we should use wrappers like ieee80211_get_DA to get | ||
359 | * the correct peer ieee80211_sta. | ||
360 | * | ||
361 | * But there is a problem with indirect traffic (broadcasts, or | ||
362 | * data which is designated for other stations) in station mode. | ||
363 | * The frame will be directed to the AP for distribution and not | ||
364 | * to the actual destination. | ||
365 | */ | ||
366 | sta = ieee80211_find_sta(vif, hdr->addr1); | ||
367 | if (unlikely(!sta)) | ||
368 | goto out_rcu; | ||
369 | |||
370 | tid = get_tid_h(hdr); | ||
371 | |||
372 | sta_info = (void *) sta->drv_priv; | ||
373 | tid_info = rcu_dereference(sta_info->agg[tid]); | ||
374 | if (!tid_info) | ||
375 | goto out_rcu; | ||
376 | |||
377 | spin_lock_bh(&tid_info->lock); | ||
378 | if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE)) | ||
379 | carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr)); | ||
380 | |||
381 | if (sta_info->stats[tid].clear) { | ||
382 | sta_info->stats[tid].clear = false; | ||
383 | sta_info->stats[tid].ampdu_len = 0; | ||
384 | sta_info->stats[tid].ampdu_ack_len = 0; | ||
385 | } | ||
386 | |||
387 | sta_info->stats[tid].ampdu_len++; | ||
388 | if (txinfo->status.rates[0].count == 1) | ||
389 | sta_info->stats[tid].ampdu_ack_len++; | ||
390 | |||
391 | if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) { | ||
392 | txinfo->pad[0] = sta_info->stats[tid].ampdu_len; | ||
393 | txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len; | ||
394 | txinfo->flags |= IEEE80211_TX_STAT_AMPDU; | ||
395 | sta_info->stats[tid].clear = true; | ||
396 | } | ||
397 | spin_unlock_bh(&tid_info->lock); | ||
398 | |||
399 | out_rcu: | ||
400 | rcu_read_unlock(); | ||
401 | } | ||
402 | |||
403 | void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, | ||
404 | const bool success) | ||
405 | { | ||
406 | struct ieee80211_tx_info *txinfo; | ||
407 | |||
408 | carl9170_tx_accounting_free(ar, skb); | ||
409 | |||
410 | txinfo = IEEE80211_SKB_CB(skb); | ||
411 | |||
412 | if (success) | ||
413 | txinfo->flags |= IEEE80211_TX_STAT_ACK; | ||
414 | else | ||
415 | ar->tx_ack_failures++; | ||
416 | |||
417 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) | ||
418 | carl9170_tx_status_process_ampdu(ar, skb, txinfo); | ||
419 | |||
420 | carl9170_tx_put_skb(skb); | ||
421 | } | ||
422 | |||
423 | /* This function may be called form any context */ | ||
424 | void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) | ||
425 | { | ||
426 | struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); | ||
427 | |||
428 | atomic_dec(&ar->tx_total_pending); | ||
429 | |||
430 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) | ||
431 | atomic_dec(&ar->tx_ampdu_upload); | ||
432 | |||
433 | if (carl9170_tx_put_skb(skb)) | ||
434 | tasklet_hi_schedule(&ar->usb_tasklet); | ||
435 | } | ||
436 | |||
437 | static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, | ||
438 | struct sk_buff_head *queue) | ||
439 | { | ||
440 | struct sk_buff *skb; | ||
441 | |||
442 | spin_lock_bh(&queue->lock); | ||
443 | skb_queue_walk(queue, skb) { | ||
444 | struct _carl9170_tx_superframe *txc = (void *) skb->data; | ||
445 | |||
446 | if (txc->s.cookie != cookie) | ||
447 | continue; | ||
448 | |||
449 | __skb_unlink(skb, queue); | ||
450 | spin_unlock_bh(&queue->lock); | ||
451 | |||
452 | carl9170_release_dev_space(ar, skb); | ||
453 | return skb; | ||
454 | } | ||
455 | spin_unlock_bh(&queue->lock); | ||
456 | |||
457 | return NULL; | ||
458 | } | ||
459 | |||
460 | static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix, | ||
461 | unsigned int tries, struct ieee80211_tx_info *txinfo) | ||
462 | { | ||
463 | unsigned int i; | ||
464 | |||
465 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | ||
466 | if (txinfo->status.rates[i].idx < 0) | ||
467 | break; | ||
468 | |||
469 | if (i == rix) { | ||
470 | txinfo->status.rates[i].count = tries; | ||
471 | i++; | ||
472 | break; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | for (; i < IEEE80211_TX_MAX_RATES; i++) { | ||
477 | txinfo->status.rates[i].idx = -1; | ||
478 | txinfo->status.rates[i].count = 0; | ||
479 | } | ||
480 | } | ||
481 | |||
482 | static void carl9170_check_queue_stop_timeout(struct ar9170 *ar) | ||
483 | { | ||
484 | int i; | ||
485 | struct sk_buff *skb; | ||
486 | struct ieee80211_tx_info *txinfo; | ||
487 | struct carl9170_tx_info *arinfo; | ||
488 | bool restart = false; | ||
489 | |||
490 | for (i = 0; i < ar->hw->queues; i++) { | ||
491 | spin_lock_bh(&ar->tx_status[i].lock); | ||
492 | |||
493 | skb = skb_peek(&ar->tx_status[i]); | ||
494 | |||
495 | if (!skb) | ||
496 | goto next; | ||
497 | |||
498 | txinfo = IEEE80211_SKB_CB(skb); | ||
499 | arinfo = (void *) txinfo->rate_driver_data; | ||
500 | |||
501 | if (time_is_before_jiffies(arinfo->timeout + | ||
502 | msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true) | ||
503 | restart = true; | ||
504 | |||
505 | next: | ||
506 | spin_unlock_bh(&ar->tx_status[i].lock); | ||
507 | } | ||
508 | |||
509 | if (restart) { | ||
510 | /* | ||
511 | * At least one queue has been stuck for long enough. | ||
512 | * Give the device a kick and hope it gets back to | ||
513 | * work. | ||
514 | * | ||
515 | * possible reasons may include: | ||
516 | * - frames got lost/corrupted (bad connection to the device) | ||
517 | * - stalled rx processing/usb controller hiccups | ||
518 | * - firmware errors/bugs | ||
519 | * - every bug you can think of. | ||
520 | * - all bugs you can't... | ||
521 | * - ... | ||
522 | */ | ||
523 | carl9170_restart(ar, CARL9170_RR_STUCK_TX); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | void carl9170_tx_janitor(struct work_struct *work) | ||
528 | { | ||
529 | struct ar9170 *ar = container_of(work, struct ar9170, | ||
530 | tx_janitor.work); | ||
531 | if (!IS_STARTED(ar)) | ||
532 | return; | ||
533 | |||
534 | ar->tx_janitor_last_run = jiffies; | ||
535 | |||
536 | carl9170_check_queue_stop_timeout(ar); | ||
537 | |||
538 | if (!atomic_read(&ar->tx_total_queued)) | ||
539 | return; | ||
540 | |||
541 | ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, | ||
542 | msecs_to_jiffies(CARL9170_TX_TIMEOUT)); | ||
543 | } | ||
544 | |||
545 | static void __carl9170_tx_process_status(struct ar9170 *ar, | ||
546 | const uint8_t cookie, const uint8_t info) | ||
547 | { | ||
548 | struct sk_buff *skb; | ||
549 | struct ieee80211_tx_info *txinfo; | ||
550 | struct carl9170_tx_info *arinfo; | ||
551 | unsigned int r, t, q; | ||
552 | bool success = true; | ||
553 | |||
554 | q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE]; | ||
555 | |||
556 | skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]); | ||
557 | if (!skb) { | ||
558 | /* | ||
559 | * We have lost the race to another thread. | ||
560 | */ | ||
561 | |||
562 | return ; | ||
563 | } | ||
564 | |||
565 | txinfo = IEEE80211_SKB_CB(skb); | ||
566 | arinfo = (void *) txinfo->rate_driver_data; | ||
567 | |||
568 | if (!(info & CARL9170_TX_STATUS_SUCCESS)) | ||
569 | success = false; | ||
570 | |||
571 | r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S; | ||
572 | t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S; | ||
573 | |||
574 | carl9170_tx_fill_rateinfo(ar, r, t, txinfo); | ||
575 | carl9170_tx_status(ar, skb, success); | ||
576 | } | ||
577 | |||
578 | void carl9170_tx_process_status(struct ar9170 *ar, | ||
579 | const struct carl9170_rsp *cmd) | ||
580 | { | ||
581 | unsigned int i; | ||
582 | |||
583 | for (i = 0; i < cmd->hdr.ext; i++) { | ||
584 | if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) { | ||
585 | print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE, | ||
586 | (void *) cmd, cmd->hdr.len + 4); | ||
587 | break; | ||
588 | } | ||
589 | |||
590 | __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie, | ||
591 | cmd->_tx_status[i].info); | ||
592 | } | ||
593 | } | ||
594 | |||
595 | static __le32 carl9170_tx_physet(struct ar9170 *ar, | ||
596 | struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate) | ||
597 | { | ||
598 | struct ieee80211_rate *rate = NULL; | ||
599 | u32 power, chains; | ||
600 | __le32 tmp; | ||
601 | |||
602 | tmp = cpu_to_le32(0); | ||
603 | |||
604 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
605 | tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ << | ||
606 | AR9170_TX_PHY_BW_S); | ||
607 | /* this works because 40 MHz is 2 and dup is 3 */ | ||
608 | if (txrate->flags & IEEE80211_TX_RC_DUP_DATA) | ||
609 | tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP << | ||
610 | AR9170_TX_PHY_BW_S); | ||
611 | |||
612 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) | ||
613 | tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI); | ||
614 | |||
615 | if (txrate->flags & IEEE80211_TX_RC_MCS) { | ||
616 | u32 r = txrate->idx; | ||
617 | u8 *txpower; | ||
618 | |||
619 | /* heavy clip control */ | ||
620 | tmp |= cpu_to_le32((r & 0x7) << | ||
621 | AR9170_TX_PHY_TX_HEAVY_CLIP_S); | ||
622 | |||
623 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { | ||
624 | if (info->band == IEEE80211_BAND_5GHZ) | ||
625 | txpower = ar->power_5G_ht40; | ||
626 | else | ||
627 | txpower = ar->power_2G_ht40; | ||
628 | } else { | ||
629 | if (info->band == IEEE80211_BAND_5GHZ) | ||
630 | txpower = ar->power_5G_ht20; | ||
631 | else | ||
632 | txpower = ar->power_2G_ht20; | ||
633 | } | ||
634 | |||
635 | power = txpower[r & 7]; | ||
636 | |||
637 | /* +1 dBm for HT40 */ | ||
638 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
639 | power += 2; | ||
640 | |||
641 | r <<= AR9170_TX_PHY_MCS_S; | ||
642 | BUG_ON(r & ~AR9170_TX_PHY_MCS); | ||
643 | |||
644 | tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS); | ||
645 | tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); | ||
646 | |||
647 | /* | ||
648 | * green field preamble does not work. | ||
649 | * | ||
650 | * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) | ||
651 | * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); | ||
652 | */ | ||
653 | } else { | ||
654 | u8 *txpower; | ||
655 | u32 mod; | ||
656 | u32 phyrate; | ||
657 | u8 idx = txrate->idx; | ||
658 | |||
659 | if (info->band != IEEE80211_BAND_2GHZ) { | ||
660 | idx += 4; | ||
661 | txpower = ar->power_5G_leg; | ||
662 | mod = AR9170_TX_PHY_MOD_OFDM; | ||
663 | } else { | ||
664 | if (idx < 4) { | ||
665 | txpower = ar->power_2G_cck; | ||
666 | mod = AR9170_TX_PHY_MOD_CCK; | ||
667 | } else { | ||
668 | mod = AR9170_TX_PHY_MOD_OFDM; | ||
669 | txpower = ar->power_2G_ofdm; | ||
670 | } | ||
671 | } | ||
672 | |||
673 | rate = &__carl9170_ratetable[idx]; | ||
674 | |||
675 | phyrate = rate->hw_value & 0xF; | ||
676 | power = txpower[(rate->hw_value & 0x30) >> 4]; | ||
677 | phyrate <<= AR9170_TX_PHY_MCS_S; | ||
678 | |||
679 | tmp |= cpu_to_le32(mod); | ||
680 | tmp |= cpu_to_le32(phyrate); | ||
681 | |||
682 | /* | ||
683 | * short preamble seems to be broken too. | ||
684 | * | ||
685 | * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | ||
686 | * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE); | ||
687 | */ | ||
688 | } | ||
689 | power <<= AR9170_TX_PHY_TX_PWR_S; | ||
690 | power &= AR9170_TX_PHY_TX_PWR; | ||
691 | tmp |= cpu_to_le32(power); | ||
692 | |||
693 | /* set TX chains */ | ||
694 | if (ar->eeprom.tx_mask == 1) { | ||
695 | chains = AR9170_TX_PHY_TXCHAIN_1; | ||
696 | } else { | ||
697 | chains = AR9170_TX_PHY_TXCHAIN_2; | ||
698 | |||
699 | /* >= 36M legacy OFDM - use only one chain */ | ||
700 | if (rate && rate->bitrate >= 360 && | ||
701 | !(txrate->flags & IEEE80211_TX_RC_MCS)) | ||
702 | chains = AR9170_TX_PHY_TXCHAIN_1; | ||
703 | } | ||
704 | tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S); | ||
705 | |||
706 | return tmp; | ||
707 | } | ||
708 | |||
709 | static bool carl9170_tx_rts_check(struct ar9170 *ar, | ||
710 | struct ieee80211_tx_rate *rate, | ||
711 | bool ampdu, bool multi) | ||
712 | { | ||
713 | switch (ar->erp_mode) { | ||
714 | case CARL9170_ERP_AUTO: | ||
715 | if (ampdu) | ||
716 | break; | ||
717 | |||
718 | case CARL9170_ERP_MAC80211: | ||
719 | if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)) | ||
720 | break; | ||
721 | |||
722 | case CARL9170_ERP_RTS: | ||
723 | if (likely(!multi)) | ||
724 | return true; | ||
725 | |||
726 | default: | ||
727 | break; | ||
728 | } | ||
729 | |||
730 | return false; | ||
731 | } | ||
732 | |||
733 | static bool carl9170_tx_cts_check(struct ar9170 *ar, | ||
734 | struct ieee80211_tx_rate *rate) | ||
735 | { | ||
736 | switch (ar->erp_mode) { | ||
737 | case CARL9170_ERP_AUTO: | ||
738 | case CARL9170_ERP_MAC80211: | ||
739 | if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) | ||
740 | break; | ||
741 | |||
742 | case CARL9170_ERP_CTS: | ||
743 | return true; | ||
744 | |||
745 | default: | ||
746 | break; | ||
747 | } | ||
748 | |||
749 | return false; | ||
750 | } | ||
751 | |||
752 | static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) | ||
753 | { | ||
754 | struct ieee80211_hdr *hdr; | ||
755 | struct _carl9170_tx_superframe *txc; | ||
756 | struct carl9170_vif_info *cvif; | ||
757 | struct ieee80211_tx_info *info; | ||
758 | struct ieee80211_tx_rate *txrate; | ||
759 | struct ieee80211_sta *sta; | ||
760 | struct carl9170_tx_info *arinfo; | ||
761 | unsigned int hw_queue; | ||
762 | int i; | ||
763 | u16 keytype = 0; | ||
764 | u16 len, icv = 0; | ||
765 | bool ampdu, no_ack; | ||
766 | |||
767 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); | ||
768 | BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != | ||
769 | CARL9170_TX_SUPERDESC_LEN); | ||
770 | |||
771 | BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != | ||
772 | AR9170_TX_HWDESC_LEN); | ||
773 | |||
774 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); | ||
775 | |||
776 | hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)]; | ||
777 | |||
778 | hdr = (void *)skb->data; | ||
779 | info = IEEE80211_SKB_CB(skb); | ||
780 | len = skb->len; | ||
781 | |||
782 | /* | ||
783 | * Note: If the frame was sent through a monitor interface, | ||
784 | * the ieee80211_vif pointer can be NULL. | ||
785 | */ | ||
786 | if (likely(info->control.vif)) | ||
787 | cvif = (void *) info->control.vif->drv_priv; | ||
788 | else | ||
789 | cvif = NULL; | ||
790 | |||
791 | sta = info->control.sta; | ||
792 | |||
793 | txc = (void *)skb_push(skb, sizeof(*txc)); | ||
794 | memset(txc, 0, sizeof(*txc)); | ||
795 | |||
796 | ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); | ||
797 | no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); | ||
798 | |||
799 | if (info->control.hw_key) { | ||
800 | icv = info->control.hw_key->icv_len; | ||
801 | |||
802 | switch (info->control.hw_key->cipher) { | ||
803 | case WLAN_CIPHER_SUITE_WEP40: | ||
804 | case WLAN_CIPHER_SUITE_WEP104: | ||
805 | case WLAN_CIPHER_SUITE_TKIP: | ||
806 | keytype = AR9170_TX_MAC_ENCR_RC4; | ||
807 | break; | ||
808 | case WLAN_CIPHER_SUITE_CCMP: | ||
809 | keytype = AR9170_TX_MAC_ENCR_AES; | ||
810 | break; | ||
811 | default: | ||
812 | WARN_ON(1); | ||
813 | goto err_out; | ||
814 | } | ||
815 | } | ||
816 | |||
817 | BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > | ||
818 | ((CARL9170_TX_SUPER_MISC_VIF_ID >> | ||
819 | CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); | ||
820 | |||
821 | txc->s.len = cpu_to_le16(len + sizeof(*txc)); | ||
822 | txc->f.length = cpu_to_le16(len + icv + 4); | ||
823 | SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, | ||
824 | cvif ? cvif->id : 0); | ||
825 | |||
826 | txc->f.mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | | ||
827 | AR9170_TX_MAC_BACKOFF); | ||
828 | |||
829 | SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue); | ||
830 | |||
831 | txc->f.mac_control |= cpu_to_le16(hw_queue << AR9170_TX_MAC_QOS_S); | ||
832 | txc->f.mac_control |= cpu_to_le16(keytype); | ||
833 | txc->f.phy_control = cpu_to_le32(0); | ||
834 | |||
835 | if (no_ack) | ||
836 | txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); | ||
837 | |||
838 | if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) | ||
839 | txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; | ||
840 | |||
841 | txrate = &info->control.rates[0]; | ||
842 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
843 | txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); | ||
844 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
845 | txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); | ||
846 | |||
847 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count); | ||
848 | txc->f.phy_control |= carl9170_tx_physet(ar, info, txrate); | ||
849 | |||
850 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { | ||
851 | for (i = 1; i < CARL9170_TX_MAX_RATES; i++) { | ||
852 | txrate = &info->control.rates[i]; | ||
853 | if (txrate->idx >= 0) | ||
854 | continue; | ||
855 | |||
856 | txrate->idx = 0; | ||
857 | txrate->count = ar->hw->max_rate_tries; | ||
858 | } | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * NOTE: For the first rate, the ERP & AMPDU flags are directly | ||
863 | * taken from mac_control. For all fallback rate, the firmware | ||
864 | * updates the mac_control flags from the rate info field. | ||
865 | */ | ||
866 | for (i = 1; i < CARL9170_TX_MAX_RATES; i++) { | ||
867 | txrate = &info->control.rates[i]; | ||
868 | if (txrate->idx < 0) | ||
869 | break; | ||
870 | |||
871 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], | ||
872 | txrate->count); | ||
873 | |||
874 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
875 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << | ||
876 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
877 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
878 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << | ||
879 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
880 | |||
881 | /* | ||
882 | * unaggregated fallback, in case aggregation | ||
883 | * proves to be unsuccessful and unreliable. | ||
884 | */ | ||
885 | if (ampdu && i < 3) | ||
886 | txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU; | ||
887 | |||
888 | txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate); | ||
889 | } | ||
890 | |||
891 | if (ieee80211_is_probe_resp(hdr->frame_control)) | ||
892 | txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; | ||
893 | |||
894 | if (ampdu) { | ||
895 | unsigned int density, factor; | ||
896 | |||
897 | if (unlikely(!sta || !cvif)) | ||
898 | goto err_out; | ||
899 | |||
900 | density = info->control.sta->ht_cap.ampdu_density; | ||
901 | factor = info->control.sta->ht_cap.ampdu_factor; | ||
902 | |||
903 | if (density) { | ||
904 | /* | ||
905 | * Watch out! | ||
906 | * | ||
907 | * Otus uses slightly different density values than | ||
908 | * those from the 802.11n spec. | ||
909 | */ | ||
910 | |||
911 | density = max_t(unsigned int, density + 1, 7u); | ||
912 | } | ||
913 | |||
914 | factor = min_t(unsigned int, 1u, factor); | ||
915 | |||
916 | SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY, | ||
917 | txc->s.ampdu_settings, density); | ||
918 | |||
919 | SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR, | ||
920 | txc->s.ampdu_settings, factor); | ||
921 | |||
922 | if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) { | ||
923 | txc->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR); | ||
924 | } else { | ||
925 | /* | ||
926 | * Not sure if it's even possible to aggregate | ||
927 | * non-ht rates with this HW. | ||
928 | */ | ||
929 | WARN_ON_ONCE(1); | ||
930 | } | ||
931 | } | ||
932 | |||
933 | arinfo = (void *)info->rate_driver_data; | ||
934 | arinfo->timeout = jiffies; | ||
935 | arinfo->ar = ar; | ||
936 | kref_init(&arinfo->ref); | ||
937 | return 0; | ||
938 | |||
939 | err_out: | ||
940 | skb_pull(skb, sizeof(*txc)); | ||
941 | return -EINVAL; | ||
942 | } | ||
943 | |||
944 | static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb) | ||
945 | { | ||
946 | struct _carl9170_tx_superframe *super; | ||
947 | |||
948 | super = (void *) skb->data; | ||
949 | super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA); | ||
950 | } | ||
951 | |||
952 | static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) | ||
953 | { | ||
954 | struct _carl9170_tx_superframe *super; | ||
955 | int tmp; | ||
956 | |||
957 | super = (void *) skb->data; | ||
958 | |||
959 | tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) << | ||
960 | CARL9170_TX_SUPER_AMPDU_DENSITY_S; | ||
961 | |||
962 | /* | ||
963 | * If you haven't noticed carl9170_tx_prepare has already filled | ||
964 | * in all ampdu spacing & factor parameters. | ||
965 | * Now it's the time to check whenever the settings have to be | ||
966 | * updated by the firmware, or if everything is still the same. | ||
967 | * | ||
968 | * There's no sane way to handle different density values with | ||
969 | * this hardware, so we may as well just do the compare in the | ||
970 | * driver. | ||
971 | */ | ||
972 | |||
973 | if (tmp != ar->current_density) { | ||
974 | ar->current_density = tmp; | ||
975 | super->s.ampdu_settings |= | ||
976 | CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY; | ||
977 | } | ||
978 | |||
979 | tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) << | ||
980 | CARL9170_TX_SUPER_AMPDU_FACTOR_S; | ||
981 | |||
982 | if (tmp != ar->current_factor) { | ||
983 | ar->current_factor = tmp; | ||
984 | super->s.ampdu_settings |= | ||
985 | CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR; | ||
986 | } | ||
987 | } | ||
988 | |||
989 | static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest, | ||
990 | struct sk_buff *_src) | ||
991 | { | ||
992 | struct _carl9170_tx_superframe *dest, *src; | ||
993 | |||
994 | dest = (void *) _dest->data; | ||
995 | src = (void *) _src->data; | ||
996 | |||
997 | /* | ||
998 | * The mac80211 rate control algorithm expects that all MPDUs in | ||
999 | * an AMPDU share the same tx vectors. | ||
1000 | * This is not really obvious right now, because the hardware | ||
1001 | * does the AMPDU setup according to its own rulebook. | ||
1002 | * Our nicely assembled, strictly monotonic increasing mpdu | ||
1003 | * chains will be broken up, mashed back together... | ||
1004 | */ | ||
1005 | |||
1006 | return (dest->f.phy_control == src->f.phy_control); | ||
1007 | } | ||
1008 | |||
1009 | static void carl9170_tx_ampdu(struct ar9170 *ar) | ||
1010 | { | ||
1011 | struct sk_buff_head agg; | ||
1012 | struct carl9170_sta_tid *tid_info; | ||
1013 | struct sk_buff *skb, *first; | ||
1014 | unsigned int i = 0, done_ampdus = 0; | ||
1015 | u16 seq, queue, tmpssn; | ||
1016 | |||
1017 | atomic_inc(&ar->tx_ampdu_scheduler); | ||
1018 | ar->tx_ampdu_schedule = false; | ||
1019 | |||
1020 | if (atomic_read(&ar->tx_ampdu_upload)) | ||
1021 | return; | ||
1022 | |||
1023 | if (!ar->tx_ampdu_list_len) | ||
1024 | return; | ||
1025 | |||
1026 | __skb_queue_head_init(&agg); | ||
1027 | |||
1028 | rcu_read_lock(); | ||
1029 | tid_info = rcu_dereference(ar->tx_ampdu_iter); | ||
1030 | if (WARN_ON_ONCE(!tid_info)) { | ||
1031 | rcu_read_unlock(); | ||
1032 | return; | ||
1033 | } | ||
1034 | |||
1035 | retry: | ||
1036 | list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) { | ||
1037 | i++; | ||
1038 | |||
1039 | if (tid_info->state < CARL9170_TID_STATE_PROGRESS) | ||
1040 | continue; | ||
1041 | |||
1042 | queue = TID_TO_WME_AC(tid_info->tid); | ||
1043 | |||
1044 | spin_lock_bh(&tid_info->lock); | ||
1045 | if (tid_info->state != CARL9170_TID_STATE_XMIT) { | ||
1046 | first = skb_peek(&tid_info->queue); | ||
1047 | if (first) { | ||
1048 | struct ieee80211_tx_info *txinfo; | ||
1049 | struct carl9170_tx_info *arinfo; | ||
1050 | |||
1051 | txinfo = IEEE80211_SKB_CB(first); | ||
1052 | arinfo = (void *) txinfo->rate_driver_data; | ||
1053 | |||
1054 | if (time_is_after_jiffies(arinfo->timeout + | ||
1055 | msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)) | ||
1056 | == true) | ||
1057 | goto processed; | ||
1058 | |||
1059 | /* | ||
1060 | * We've been waiting for the frame which | ||
1061 | * matches "snx" (start sequence of the | ||
1062 | * next aggregate) for some time now. | ||
1063 | * | ||
1064 | * But it never arrived. Therefore | ||
1065 | * jump to the next available frame | ||
1066 | * and kick-start the transmission. | ||
1067 | * | ||
1068 | * Note: This might induce odd latency | ||
1069 | * spikes because the receiver will be | ||
1070 | * waiting for the lost frame too. | ||
1071 | */ | ||
1072 | ar->tx_ampdu_timeout++; | ||
1073 | |||
1074 | tid_info->snx = carl9170_get_seq(first); | ||
1075 | tid_info->state = CARL9170_TID_STATE_XMIT; | ||
1076 | } else { | ||
1077 | goto processed; | ||
1078 | } | ||
1079 | } | ||
1080 | |||
1081 | tid_info->counter++; | ||
1082 | first = skb_peek(&tid_info->queue); | ||
1083 | tmpssn = carl9170_get_seq(first); | ||
1084 | seq = tid_info->snx; | ||
1085 | |||
1086 | if (unlikely(tmpssn != seq)) { | ||
1087 | tid_info->state = CARL9170_TID_STATE_IDLE; | ||
1088 | |||
1089 | goto processed; | ||
1090 | } | ||
1091 | |||
1092 | while ((skb = skb_peek(&tid_info->queue))) { | ||
1093 | /* strict 0, 1, ..., n - 1, n frame sequence order */ | ||
1094 | if (unlikely(carl9170_get_seq(skb) != seq)) | ||
1095 | break; | ||
1096 | |||
1097 | /* don't upload more than AMPDU FACTOR allows. */ | ||
1098 | if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >= | ||
1099 | (tid_info->max - 1))) | ||
1100 | break; | ||
1101 | |||
1102 | if (!carl9170_tx_rate_check(ar, skb, first)) | ||
1103 | break; | ||
1104 | |||
1105 | atomic_inc(&ar->tx_ampdu_upload); | ||
1106 | tid_info->snx = seq = SEQ_NEXT(seq); | ||
1107 | __skb_unlink(skb, &tid_info->queue); | ||
1108 | |||
1109 | __skb_queue_tail(&agg, skb); | ||
1110 | |||
1111 | if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX) | ||
1112 | break; | ||
1113 | } | ||
1114 | |||
1115 | if (skb_queue_empty(&tid_info->queue) || | ||
1116 | carl9170_get_seq(skb_peek(&tid_info->queue)) != | ||
1117 | tid_info->snx) { | ||
1118 | /* | ||
1119 | * stop TID, if A-MPDU frames are still missing, | ||
1120 | * or whenever the queue is empty. | ||
1121 | */ | ||
1122 | |||
1123 | tid_info->state = CARL9170_TID_STATE_IDLE; | ||
1124 | } | ||
1125 | done_ampdus++; | ||
1126 | |||
1127 | processed: | ||
1128 | spin_unlock_bh(&tid_info->lock); | ||
1129 | |||
1130 | if (skb_queue_empty(&agg)) | ||
1131 | continue; | ||
1132 | |||
1133 | /* apply ampdu spacing & factor settings */ | ||
1134 | carl9170_set_ampdu_params(ar, skb_peek(&agg)); | ||
1135 | |||
1136 | /* set aggregation push bit */ | ||
1137 | carl9170_set_immba(ar, skb_peek_tail(&agg)); | ||
1138 | |||
1139 | spin_lock_bh(&ar->tx_pending[queue].lock); | ||
1140 | skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); | ||
1141 | spin_unlock_bh(&ar->tx_pending[queue].lock); | ||
1142 | ar->tx_schedule = true; | ||
1143 | } | ||
1144 | if ((done_ampdus++ == 0) && (i++ == 0)) | ||
1145 | goto retry; | ||
1146 | |||
1147 | rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); | ||
1148 | rcu_read_unlock(); | ||
1149 | } | ||
1150 | |||
1151 | static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar, | ||
1152 | struct sk_buff_head *queue) | ||
1153 | { | ||
1154 | struct sk_buff *skb; | ||
1155 | struct ieee80211_tx_info *info; | ||
1156 | struct carl9170_tx_info *arinfo; | ||
1157 | |||
1158 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); | ||
1159 | |||
1160 | spin_lock_bh(&queue->lock); | ||
1161 | skb = skb_peek(queue); | ||
1162 | if (unlikely(!skb)) | ||
1163 | goto err_unlock; | ||
1164 | |||
1165 | if (carl9170_alloc_dev_space(ar, skb)) | ||
1166 | goto err_unlock; | ||
1167 | |||
1168 | __skb_unlink(skb, queue); | ||
1169 | spin_unlock_bh(&queue->lock); | ||
1170 | |||
1171 | info = IEEE80211_SKB_CB(skb); | ||
1172 | arinfo = (void *) info->rate_driver_data; | ||
1173 | |||
1174 | arinfo->timeout = jiffies; | ||
1175 | |||
1176 | /* | ||
1177 | * increase ref count to "2". | ||
1178 | * Ref counting is the easiest way to solve the race between | ||
1179 | * the the urb's completion routine: carl9170_tx_callback and | ||
1180 | * wlan tx status functions: carl9170_tx_status/janitor. | ||
1181 | */ | ||
1182 | carl9170_tx_get_skb(skb); | ||
1183 | |||
1184 | return skb; | ||
1185 | |||
1186 | err_unlock: | ||
1187 | spin_unlock_bh(&queue->lock); | ||
1188 | return NULL; | ||
1189 | } | ||
1190 | |||
1191 | void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb) | ||
1192 | { | ||
1193 | struct _carl9170_tx_superframe *super; | ||
1194 | uint8_t q = 0; | ||
1195 | |||
1196 | ar->tx_dropped++; | ||
1197 | |||
1198 | super = (void *)skb->data; | ||
1199 | SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q, | ||
1200 | ar9170_qmap[carl9170_get_queue(ar, skb)]); | ||
1201 | __carl9170_tx_process_status(ar, super->s.cookie, q); | ||
1202 | } | ||
1203 | |||
1204 | static void carl9170_tx(struct ar9170 *ar) | ||
1205 | { | ||
1206 | struct sk_buff *skb; | ||
1207 | unsigned int i, q; | ||
1208 | bool schedule_garbagecollector = false; | ||
1209 | |||
1210 | ar->tx_schedule = false; | ||
1211 | |||
1212 | if (unlikely(!IS_STARTED(ar))) | ||
1213 | return; | ||
1214 | |||
1215 | carl9170_usb_handle_tx_err(ar); | ||
1216 | |||
1217 | for (i = 0; i < ar->hw->queues; i++) { | ||
1218 | while (!skb_queue_empty(&ar->tx_pending[i])) { | ||
1219 | skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]); | ||
1220 | if (unlikely(!skb)) | ||
1221 | break; | ||
1222 | |||
1223 | atomic_inc(&ar->tx_total_pending); | ||
1224 | |||
1225 | q = __carl9170_get_queue(ar, i); | ||
1226 | /* | ||
1227 | * NB: tx_status[i] vs. tx_status[q], | ||
1228 | * TODO: Move into pick_skb or alloc_dev_space. | ||
1229 | */ | ||
1230 | skb_queue_tail(&ar->tx_status[q], skb); | ||
1231 | |||
1232 | carl9170_usb_tx(ar, skb); | ||
1233 | schedule_garbagecollector = true; | ||
1234 | } | ||
1235 | } | ||
1236 | |||
1237 | if (!schedule_garbagecollector) | ||
1238 | return; | ||
1239 | |||
1240 | ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, | ||
1241 | msecs_to_jiffies(CARL9170_TX_TIMEOUT)); | ||
1242 | } | ||
1243 | |||
1244 | static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, | ||
1245 | struct ieee80211_sta *sta, struct sk_buff *skb) | ||
1246 | { | ||
1247 | struct carl9170_sta_info *sta_info; | ||
1248 | struct carl9170_sta_tid *agg; | ||
1249 | struct sk_buff *iter; | ||
1250 | unsigned int max; | ||
1251 | u16 tid, seq, qseq, off; | ||
1252 | bool run = false; | ||
1253 | |||
1254 | tid = carl9170_get_tid(skb); | ||
1255 | seq = carl9170_get_seq(skb); | ||
1256 | sta_info = (void *) sta->drv_priv; | ||
1257 | |||
1258 | rcu_read_lock(); | ||
1259 | agg = rcu_dereference(sta_info->agg[tid]); | ||
1260 | max = sta_info->ampdu_max_len; | ||
1261 | |||
1262 | if (!agg) | ||
1263 | goto err_unlock_rcu; | ||
1264 | |||
1265 | spin_lock_bh(&agg->lock); | ||
1266 | if (unlikely(agg->state < CARL9170_TID_STATE_IDLE)) | ||
1267 | goto err_unlock; | ||
1268 | |||
1269 | /* check if sequence is within the BA window */ | ||
1270 | if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq))) | ||
1271 | goto err_unlock; | ||
1272 | |||
1273 | if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq))) | ||
1274 | goto err_unlock; | ||
1275 | |||
1276 | off = SEQ_DIFF(seq, agg->bsn); | ||
1277 | if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap))) | ||
1278 | goto err_unlock; | ||
1279 | |||
1280 | if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) { | ||
1281 | __skb_queue_tail(&agg->queue, skb); | ||
1282 | agg->hsn = seq; | ||
1283 | goto queued; | ||
1284 | } | ||
1285 | |||
1286 | skb_queue_reverse_walk(&agg->queue, iter) { | ||
1287 | qseq = carl9170_get_seq(iter); | ||
1288 | |||
1289 | if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) { | ||
1290 | __skb_queue_after(&agg->queue, iter, skb); | ||
1291 | goto queued; | ||
1292 | } | ||
1293 | } | ||
1294 | |||
1295 | __skb_queue_head(&agg->queue, skb); | ||
1296 | queued: | ||
1297 | |||
1298 | if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) { | ||
1299 | if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) { | ||
1300 | agg->state = CARL9170_TID_STATE_XMIT; | ||
1301 | run = true; | ||
1302 | } | ||
1303 | } | ||
1304 | |||
1305 | spin_unlock_bh(&agg->lock); | ||
1306 | rcu_read_unlock(); | ||
1307 | |||
1308 | return run; | ||
1309 | |||
1310 | err_unlock: | ||
1311 | spin_unlock_bh(&agg->lock); | ||
1312 | |||
1313 | err_unlock_rcu: | ||
1314 | rcu_read_unlock(); | ||
1315 | carl9170_tx_status(ar, skb, false); | ||
1316 | ar->tx_dropped++; | ||
1317 | return false; | ||
1318 | } | ||
1319 | |||
1320 | int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | ||
1321 | { | ||
1322 | struct ar9170 *ar = hw->priv; | ||
1323 | struct ieee80211_tx_info *info; | ||
1324 | struct ieee80211_sta *sta; | ||
1325 | bool run; | ||
1326 | |||
1327 | if (unlikely(!IS_STARTED(ar))) | ||
1328 | goto err_free; | ||
1329 | |||
1330 | info = IEEE80211_SKB_CB(skb); | ||
1331 | sta = info->control.sta; | ||
1332 | |||
1333 | if (unlikely(carl9170_tx_prepare(ar, skb))) | ||
1334 | goto err_free; | ||
1335 | |||
1336 | carl9170_tx_accounting(ar, skb); | ||
1337 | /* | ||
1338 | * from now on, one has to use carl9170_tx_status to free | ||
1339 | * all ressouces which are associated with the frame. | ||
1340 | */ | ||
1341 | |||
1342 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { | ||
1343 | if (WARN_ON_ONCE(!sta)) | ||
1344 | goto err_free; | ||
1345 | |||
1346 | run = carl9170_tx_ampdu_queue(ar, sta, skb); | ||
1347 | if (run) | ||
1348 | carl9170_tx_ampdu(ar); | ||
1349 | |||
1350 | } else { | ||
1351 | unsigned int queue = skb_get_queue_mapping(skb); | ||
1352 | |||
1353 | skb_queue_tail(&ar->tx_pending[queue], skb); | ||
1354 | } | ||
1355 | |||
1356 | carl9170_tx(ar); | ||
1357 | return NETDEV_TX_OK; | ||
1358 | |||
1359 | err_free: | ||
1360 | ar->tx_dropped++; | ||
1361 | dev_kfree_skb_any(skb); | ||
1362 | return NETDEV_TX_OK; | ||
1363 | } | ||
1364 | |||
1365 | void carl9170_tx_scheduler(struct ar9170 *ar) | ||
1366 | { | ||
1367 | |||
1368 | if (ar->tx_ampdu_schedule) | ||
1369 | carl9170_tx_ampdu(ar); | ||
1370 | |||
1371 | if (ar->tx_schedule) | ||
1372 | carl9170_tx(ar); | ||
1373 | } | ||
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c new file mode 100644 index 000000000000..fde918d0120b --- /dev/null +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -0,0 +1,1137 @@ | |||
1 | /* | ||
2 | * Atheros CARL9170 driver | ||
3 | * | ||
4 | * USB - frontend | ||
5 | * | ||
6 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
7 | * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; see the file COPYING. If not, see | ||
21 | * http://www.gnu.org/licenses/. | ||
22 | * | ||
23 | * This file incorporates work covered by the following copyright and | ||
24 | * permission notice: | ||
25 | * Copyright (c) 2007-2008 Atheros Communications, Inc. | ||
26 | * | ||
27 | * Permission to use, copy, modify, and/or distribute this software for any | ||
28 | * purpose with or without fee is hereby granted, provided that the above | ||
29 | * copyright notice and this permission notice appear in all copies. | ||
30 | * | ||
31 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
32 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
33 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
34 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
35 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
36 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
37 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
38 | */ | ||
39 | |||
40 | #include <linux/module.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/usb.h> | ||
43 | #include <linux/firmware.h> | ||
44 | #include <linux/etherdevice.h> | ||
45 | #include <linux/device.h> | ||
46 | #include <net/mac80211.h> | ||
47 | #include "carl9170.h" | ||
48 | #include "cmd.h" | ||
49 | #include "hw.h" | ||
50 | #include "fwcmd.h" | ||
51 | |||
52 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
53 | MODULE_AUTHOR("Christian Lamparter <chunkeey@googlemail.com>"); | ||
54 | MODULE_LICENSE("GPL"); | ||
55 | MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless"); | ||
56 | MODULE_FIRMWARE(CARL9170FW_NAME); | ||
57 | MODULE_ALIAS("ar9170usb"); | ||
58 | MODULE_ALIAS("arusb_lnx"); | ||
59 | |||
60 | /* | ||
61 | * Note: | ||
62 | * | ||
63 | * Always update our wiki's device list (located at: | ||
64 | * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ), | ||
65 | * whenever you add a new device. | ||
66 | */ | ||
67 | static struct usb_device_id carl9170_usb_ids[] = { | ||
68 | /* Atheros 9170 */ | ||
69 | { USB_DEVICE(0x0cf3, 0x9170) }, | ||
70 | /* Atheros TG121N */ | ||
71 | { USB_DEVICE(0x0cf3, 0x1001) }, | ||
72 | /* TP-Link TL-WN821N v2 */ | ||
73 | { USB_DEVICE(0x0cf3, 0x1002), .driver_info = CARL9170_WPS_BUTTON | | ||
74 | CARL9170_ONE_LED }, | ||
75 | /* 3Com Dual Band 802.11n USB Adapter */ | ||
76 | { USB_DEVICE(0x0cf3, 0x1010) }, | ||
77 | /* H3C Dual Band 802.11n USB Adapter */ | ||
78 | { USB_DEVICE(0x0cf3, 0x1011) }, | ||
79 | /* Cace Airpcap NX */ | ||
80 | { USB_DEVICE(0xcace, 0x0300) }, | ||
81 | /* D-Link DWA 160 A1 */ | ||
82 | { USB_DEVICE(0x07d1, 0x3c10) }, | ||
83 | /* D-Link DWA 160 A2 */ | ||
84 | { USB_DEVICE(0x07d1, 0x3a09) }, | ||
85 | /* Netgear WNA1000 */ | ||
86 | { USB_DEVICE(0x0846, 0x9040) }, | ||
87 | /* Netgear WNDA3100 */ | ||
88 | { USB_DEVICE(0x0846, 0x9010) }, | ||
89 | /* Netgear WN111 v2 */ | ||
90 | { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, | ||
91 | /* Zydas ZD1221 */ | ||
92 | { USB_DEVICE(0x0ace, 0x1221) }, | ||
93 | /* Proxim ORiNOCO 802.11n USB */ | ||
94 | { USB_DEVICE(0x1435, 0x0804) }, | ||
95 | /* WNC Generic 11n USB Dongle */ | ||
96 | { USB_DEVICE(0x1435, 0x0326) }, | ||
97 | /* ZyXEL NWD271N */ | ||
98 | { USB_DEVICE(0x0586, 0x3417) }, | ||
99 | /* Z-Com UB81 BG */ | ||
100 | { USB_DEVICE(0x0cde, 0x0023) }, | ||
101 | /* Z-Com UB82 ABG */ | ||
102 | { USB_DEVICE(0x0cde, 0x0026) }, | ||
103 | /* Sphairon Homelink 1202 */ | ||
104 | { USB_DEVICE(0x0cde, 0x0027) }, | ||
105 | /* Arcadyan WN7512 */ | ||
106 | { USB_DEVICE(0x083a, 0xf522) }, | ||
107 | /* Planex GWUS300 */ | ||
108 | { USB_DEVICE(0x2019, 0x5304) }, | ||
109 | /* IO-Data WNGDNUS2 */ | ||
110 | { USB_DEVICE(0x04bb, 0x093f) }, | ||
111 | /* NEC WL300NU-G */ | ||
112 | { USB_DEVICE(0x0409, 0x0249) }, | ||
113 | /* AVM FRITZ!WLAN USB Stick N */ | ||
114 | { USB_DEVICE(0x057c, 0x8401) }, | ||
115 | /* AVM FRITZ!WLAN USB Stick N 2.4 */ | ||
116 | { USB_DEVICE(0x057c, 0x8402) }, | ||
117 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ | ||
118 | { USB_DEVICE(0x1668, 0x1200) }, | ||
119 | |||
120 | /* terminate */ | ||
121 | {} | ||
122 | }; | ||
123 | MODULE_DEVICE_TABLE(usb, carl9170_usb_ids); | ||
124 | |||
125 | static void carl9170_usb_submit_data_urb(struct ar9170 *ar) | ||
126 | { | ||
127 | struct urb *urb; | ||
128 | int err; | ||
129 | |||
130 | if (atomic_inc_return(&ar->tx_anch_urbs) > AR9170_NUM_TX_URBS) | ||
131 | goto err_acc; | ||
132 | |||
133 | urb = usb_get_from_anchor(&ar->tx_wait); | ||
134 | if (!urb) | ||
135 | goto err_acc; | ||
136 | |||
137 | usb_anchor_urb(urb, &ar->tx_anch); | ||
138 | |||
139 | err = usb_submit_urb(urb, GFP_ATOMIC); | ||
140 | if (unlikely(err)) { | ||
141 | if (net_ratelimit()) { | ||
142 | dev_err(&ar->udev->dev, "tx submit failed (%d)\n", | ||
143 | urb->status); | ||
144 | } | ||
145 | |||
146 | usb_unanchor_urb(urb); | ||
147 | usb_anchor_urb(urb, &ar->tx_err); | ||
148 | } | ||
149 | |||
150 | usb_free_urb(urb); | ||
151 | |||
152 | if (likely(err == 0)) | ||
153 | return; | ||
154 | |||
155 | err_acc: | ||
156 | atomic_dec(&ar->tx_anch_urbs); | ||
157 | } | ||
158 | |||
159 | static void carl9170_usb_tx_data_complete(struct urb *urb) | ||
160 | { | ||
161 | struct ar9170 *ar = (struct ar9170 *) | ||
162 | usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); | ||
163 | |||
164 | if (WARN_ON_ONCE(!ar)) { | ||
165 | dev_kfree_skb_irq(urb->context); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | atomic_dec(&ar->tx_anch_urbs); | ||
170 | |||
171 | switch (urb->status) { | ||
172 | /* everything is fine */ | ||
173 | case 0: | ||
174 | carl9170_tx_callback(ar, (void *)urb->context); | ||
175 | break; | ||
176 | |||
177 | /* disconnect */ | ||
178 | case -ENOENT: | ||
179 | case -ECONNRESET: | ||
180 | case -ENODEV: | ||
181 | case -ESHUTDOWN: | ||
182 | /* | ||
183 | * Defer the frame clean-up to the tasklet worker. | ||
184 | * This is necessary, because carl9170_tx_drop | ||
185 | * does not work in an irqsave context. | ||
186 | */ | ||
187 | usb_anchor_urb(urb, &ar->tx_err); | ||
188 | return; | ||
189 | |||
190 | /* a random transmission error has occurred? */ | ||
191 | default: | ||
192 | if (net_ratelimit()) { | ||
193 | dev_err(&ar->udev->dev, "tx failed (%d)\n", | ||
194 | urb->status); | ||
195 | } | ||
196 | |||
197 | usb_anchor_urb(urb, &ar->tx_err); | ||
198 | break; | ||
199 | } | ||
200 | |||
201 | if (likely(IS_STARTED(ar))) | ||
202 | carl9170_usb_submit_data_urb(ar); | ||
203 | } | ||
204 | |||
205 | static int carl9170_usb_submit_cmd_urb(struct ar9170 *ar) | ||
206 | { | ||
207 | struct urb *urb; | ||
208 | int err; | ||
209 | |||
210 | if (atomic_inc_return(&ar->tx_cmd_urbs) != 1) { | ||
211 | atomic_dec(&ar->tx_cmd_urbs); | ||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | urb = usb_get_from_anchor(&ar->tx_cmd); | ||
216 | if (!urb) { | ||
217 | atomic_dec(&ar->tx_cmd_urbs); | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | usb_anchor_urb(urb, &ar->tx_anch); | ||
222 | err = usb_submit_urb(urb, GFP_ATOMIC); | ||
223 | if (unlikely(err)) { | ||
224 | usb_unanchor_urb(urb); | ||
225 | atomic_dec(&ar->tx_cmd_urbs); | ||
226 | } | ||
227 | usb_free_urb(urb); | ||
228 | |||
229 | return err; | ||
230 | } | ||
231 | |||
232 | static void carl9170_usb_cmd_complete(struct urb *urb) | ||
233 | { | ||
234 | struct ar9170 *ar = urb->context; | ||
235 | int err = 0; | ||
236 | |||
237 | if (WARN_ON_ONCE(!ar)) | ||
238 | return; | ||
239 | |||
240 | atomic_dec(&ar->tx_cmd_urbs); | ||
241 | |||
242 | switch (urb->status) { | ||
243 | /* everything is fine */ | ||
244 | case 0: | ||
245 | break; | ||
246 | |||
247 | /* disconnect */ | ||
248 | case -ENOENT: | ||
249 | case -ECONNRESET: | ||
250 | case -ENODEV: | ||
251 | case -ESHUTDOWN: | ||
252 | return; | ||
253 | |||
254 | default: | ||
255 | err = urb->status; | ||
256 | break; | ||
257 | } | ||
258 | |||
259 | if (!IS_INITIALIZED(ar)) | ||
260 | return; | ||
261 | |||
262 | if (err) | ||
263 | dev_err(&ar->udev->dev, "submit cmd cb failed (%d).\n", err); | ||
264 | |||
265 | err = carl9170_usb_submit_cmd_urb(ar); | ||
266 | if (err) | ||
267 | dev_err(&ar->udev->dev, "submit cmd failed (%d).\n", err); | ||
268 | } | ||
269 | |||
270 | static void carl9170_usb_rx_irq_complete(struct urb *urb) | ||
271 | { | ||
272 | struct ar9170 *ar = urb->context; | ||
273 | |||
274 | if (WARN_ON_ONCE(!ar)) | ||
275 | return; | ||
276 | |||
277 | switch (urb->status) { | ||
278 | /* everything is fine */ | ||
279 | case 0: | ||
280 | break; | ||
281 | |||
282 | /* disconnect */ | ||
283 | case -ENOENT: | ||
284 | case -ECONNRESET: | ||
285 | case -ENODEV: | ||
286 | case -ESHUTDOWN: | ||
287 | return; | ||
288 | |||
289 | default: | ||
290 | goto resubmit; | ||
291 | } | ||
292 | |||
293 | carl9170_handle_command_response(ar, urb->transfer_buffer, | ||
294 | urb->actual_length); | ||
295 | |||
296 | resubmit: | ||
297 | usb_anchor_urb(urb, &ar->rx_anch); | ||
298 | if (unlikely(usb_submit_urb(urb, GFP_ATOMIC))) | ||
299 | usb_unanchor_urb(urb); | ||
300 | } | ||
301 | |||
302 | static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp) | ||
303 | { | ||
304 | struct urb *urb; | ||
305 | int err = 0, runs = 0; | ||
306 | |||
307 | while ((atomic_read(&ar->rx_anch_urbs) < AR9170_NUM_RX_URBS) && | ||
308 | (runs++ < AR9170_NUM_RX_URBS)) { | ||
309 | err = -ENOSPC; | ||
310 | urb = usb_get_from_anchor(&ar->rx_pool); | ||
311 | if (urb) { | ||
312 | usb_anchor_urb(urb, &ar->rx_anch); | ||
313 | err = usb_submit_urb(urb, gfp); | ||
314 | if (unlikely(err)) { | ||
315 | usb_unanchor_urb(urb); | ||
316 | usb_anchor_urb(urb, &ar->rx_pool); | ||
317 | } else { | ||
318 | atomic_dec(&ar->rx_pool_urbs); | ||
319 | atomic_inc(&ar->rx_anch_urbs); | ||
320 | } | ||
321 | usb_free_urb(urb); | ||
322 | } | ||
323 | } | ||
324 | |||
325 | return err; | ||
326 | } | ||
327 | |||
328 | static void carl9170_usb_rx_work(struct ar9170 *ar) | ||
329 | { | ||
330 | struct urb *urb; | ||
331 | int i; | ||
332 | |||
333 | for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) { | ||
334 | urb = usb_get_from_anchor(&ar->rx_work); | ||
335 | if (!urb) | ||
336 | break; | ||
337 | |||
338 | atomic_dec(&ar->rx_work_urbs); | ||
339 | if (IS_INITIALIZED(ar)) { | ||
340 | carl9170_rx(ar, urb->transfer_buffer, | ||
341 | urb->actual_length); | ||
342 | } | ||
343 | |||
344 | usb_anchor_urb(urb, &ar->rx_pool); | ||
345 | atomic_inc(&ar->rx_pool_urbs); | ||
346 | |||
347 | usb_free_urb(urb); | ||
348 | |||
349 | carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | void carl9170_usb_handle_tx_err(struct ar9170 *ar) | ||
354 | { | ||
355 | struct urb *urb; | ||
356 | |||
357 | while ((urb = usb_get_from_anchor(&ar->tx_err))) { | ||
358 | struct sk_buff *skb = (void *)urb->context; | ||
359 | |||
360 | carl9170_tx_drop(ar, skb); | ||
361 | carl9170_tx_callback(ar, skb); | ||
362 | usb_free_urb(urb); | ||
363 | } | ||
364 | } | ||
365 | |||
366 | static void carl9170_usb_tasklet(unsigned long data) | ||
367 | { | ||
368 | struct ar9170 *ar = (struct ar9170 *) data; | ||
369 | |||
370 | carl9170_usb_rx_work(ar); | ||
371 | |||
372 | /* | ||
373 | * Strictly speaking: The tx scheduler is not part of the USB system. | ||
374 | * But the rx worker returns frames back to the mac80211-stack and | ||
375 | * this is the _perfect_ place to generate the next transmissions. | ||
376 | */ | ||
377 | if (IS_STARTED(ar)) | ||
378 | carl9170_tx_scheduler(ar); | ||
379 | } | ||
380 | |||
381 | static void carl9170_usb_rx_complete(struct urb *urb) | ||
382 | { | ||
383 | struct ar9170 *ar = (struct ar9170 *)urb->context; | ||
384 | int err; | ||
385 | |||
386 | if (WARN_ON_ONCE(!ar)) | ||
387 | return; | ||
388 | |||
389 | atomic_dec(&ar->rx_anch_urbs); | ||
390 | |||
391 | switch (urb->status) { | ||
392 | case 0: | ||
393 | /* rx path */ | ||
394 | usb_anchor_urb(urb, &ar->rx_work); | ||
395 | atomic_inc(&ar->rx_work_urbs); | ||
396 | break; | ||
397 | |||
398 | case -ENOENT: | ||
399 | case -ECONNRESET: | ||
400 | case -ENODEV: | ||
401 | case -ESHUTDOWN: | ||
402 | /* handle disconnect events*/ | ||
403 | return; | ||
404 | |||
405 | default: | ||
406 | /* handle all other errors */ | ||
407 | usb_anchor_urb(urb, &ar->rx_pool); | ||
408 | atomic_inc(&ar->rx_pool_urbs); | ||
409 | break; | ||
410 | } | ||
411 | |||
412 | err = carl9170_usb_submit_rx_urb(ar, GFP_ATOMIC); | ||
413 | if (unlikely(err)) { | ||
414 | /* | ||
415 | * usb_submit_rx_urb reported a problem. | ||
416 | * In case this is due to a rx buffer shortage, | ||
417 | * elevate the tasklet worker priority to | ||
418 | * the highest available level. | ||
419 | */ | ||
420 | tasklet_hi_schedule(&ar->usb_tasklet); | ||
421 | |||
422 | if (atomic_read(&ar->rx_anch_urbs) == 0) { | ||
423 | /* | ||
424 | * The system is too slow to cope with | ||
425 | * the enormous workload. We have simply | ||
426 | * run out of active rx urbs and this | ||
427 | * unfortunatly leads to an unpredictable | ||
428 | * device. | ||
429 | */ | ||
430 | |||
431 | carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM); | ||
432 | } | ||
433 | } else { | ||
434 | /* | ||
435 | * Using anything less than _high_ priority absolutely | ||
436 | * kills the rx performance my UP-System... | ||
437 | */ | ||
438 | tasklet_hi_schedule(&ar->usb_tasklet); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp) | ||
443 | { | ||
444 | struct urb *urb; | ||
445 | void *buf; | ||
446 | |||
447 | buf = kmalloc(ar->fw.rx_size, gfp); | ||
448 | if (!buf) | ||
449 | return NULL; | ||
450 | |||
451 | urb = usb_alloc_urb(0, gfp); | ||
452 | if (!urb) { | ||
453 | kfree(buf); | ||
454 | return NULL; | ||
455 | } | ||
456 | |||
457 | usb_fill_bulk_urb(urb, ar->udev, usb_rcvbulkpipe(ar->udev, | ||
458 | AR9170_USB_EP_RX), buf, ar->fw.rx_size, | ||
459 | carl9170_usb_rx_complete, ar); | ||
460 | |||
461 | urb->transfer_flags |= URB_FREE_BUFFER; | ||
462 | |||
463 | return urb; | ||
464 | } | ||
465 | |||
466 | static int carl9170_usb_send_rx_irq_urb(struct ar9170 *ar) | ||
467 | { | ||
468 | struct urb *urb = NULL; | ||
469 | void *ibuf; | ||
470 | int err = -ENOMEM; | ||
471 | |||
472 | urb = usb_alloc_urb(0, GFP_KERNEL); | ||
473 | if (!urb) | ||
474 | goto out; | ||
475 | |||
476 | ibuf = kmalloc(AR9170_USB_EP_CTRL_MAX, GFP_KERNEL); | ||
477 | if (!ibuf) | ||
478 | goto out; | ||
479 | |||
480 | usb_fill_int_urb(urb, ar->udev, usb_rcvintpipe(ar->udev, | ||
481 | AR9170_USB_EP_IRQ), ibuf, AR9170_USB_EP_CTRL_MAX, | ||
482 | carl9170_usb_rx_irq_complete, ar, 1); | ||
483 | |||
484 | urb->transfer_flags |= URB_FREE_BUFFER; | ||
485 | |||
486 | usb_anchor_urb(urb, &ar->rx_anch); | ||
487 | err = usb_submit_urb(urb, GFP_KERNEL); | ||
488 | if (err) | ||
489 | usb_unanchor_urb(urb); | ||
490 | |||
491 | out: | ||
492 | usb_free_urb(urb); | ||
493 | return err; | ||
494 | } | ||
495 | |||
496 | static int carl9170_usb_init_rx_bulk_urbs(struct ar9170 *ar) | ||
497 | { | ||
498 | struct urb *urb; | ||
499 | int i, err = -EINVAL; | ||
500 | |||
501 | /* | ||
502 | * The driver actively maintains a second shadow | ||
503 | * pool for inactive, but fully-prepared rx urbs. | ||
504 | * | ||
505 | * The pool should help the driver to master huge | ||
506 | * workload spikes without running the risk of | ||
507 | * undersupplying the hardware or wasting time by | ||
508 | * processing rx data (streams) inside the urb | ||
509 | * completion (hardirq context). | ||
510 | */ | ||
511 | for (i = 0; i < AR9170_NUM_RX_URBS_POOL; i++) { | ||
512 | urb = carl9170_usb_alloc_rx_urb(ar, GFP_KERNEL); | ||
513 | if (!urb) { | ||
514 | err = -ENOMEM; | ||
515 | goto err_out; | ||
516 | } | ||
517 | |||
518 | usb_anchor_urb(urb, &ar->rx_pool); | ||
519 | atomic_inc(&ar->rx_pool_urbs); | ||
520 | usb_free_urb(urb); | ||
521 | } | ||
522 | |||
523 | err = carl9170_usb_submit_rx_urb(ar, GFP_KERNEL); | ||
524 | if (err) | ||
525 | goto err_out; | ||
526 | |||
527 | /* the device now waiting for the firmware. */ | ||
528 | carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE); | ||
529 | return 0; | ||
530 | |||
531 | err_out: | ||
532 | |||
533 | usb_scuttle_anchored_urbs(&ar->rx_pool); | ||
534 | usb_scuttle_anchored_urbs(&ar->rx_work); | ||
535 | usb_kill_anchored_urbs(&ar->rx_anch); | ||
536 | return err; | ||
537 | } | ||
538 | |||
539 | static int carl9170_usb_flush(struct ar9170 *ar) | ||
540 | { | ||
541 | struct urb *urb; | ||
542 | int ret, err = 0; | ||
543 | |||
544 | while ((urb = usb_get_from_anchor(&ar->tx_wait))) { | ||
545 | struct sk_buff *skb = (void *)urb->context; | ||
546 | carl9170_tx_drop(ar, skb); | ||
547 | carl9170_tx_callback(ar, skb); | ||
548 | usb_free_urb(urb); | ||
549 | } | ||
550 | |||
551 | ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ); | ||
552 | if (ret == 0) | ||
553 | err = -ETIMEDOUT; | ||
554 | |||
555 | /* lets wait a while until the tx - queues are dried out */ | ||
556 | ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ); | ||
557 | if (ret == 0) | ||
558 | err = -ETIMEDOUT; | ||
559 | |||
560 | usb_kill_anchored_urbs(&ar->tx_anch); | ||
561 | carl9170_usb_handle_tx_err(ar); | ||
562 | |||
563 | return err; | ||
564 | } | ||
565 | |||
566 | static void carl9170_usb_cancel_urbs(struct ar9170 *ar) | ||
567 | { | ||
568 | int err; | ||
569 | |||
570 | carl9170_set_state(ar, CARL9170_UNKNOWN_STATE); | ||
571 | |||
572 | err = carl9170_usb_flush(ar); | ||
573 | if (err) | ||
574 | dev_err(&ar->udev->dev, "stuck tx urbs!\n"); | ||
575 | |||
576 | usb_poison_anchored_urbs(&ar->tx_anch); | ||
577 | carl9170_usb_handle_tx_err(ar); | ||
578 | usb_poison_anchored_urbs(&ar->rx_anch); | ||
579 | |||
580 | tasklet_kill(&ar->usb_tasklet); | ||
581 | |||
582 | usb_scuttle_anchored_urbs(&ar->rx_work); | ||
583 | usb_scuttle_anchored_urbs(&ar->rx_pool); | ||
584 | usb_scuttle_anchored_urbs(&ar->tx_cmd); | ||
585 | } | ||
586 | |||
587 | int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd, | ||
588 | const bool free_buf) | ||
589 | { | ||
590 | struct urb *urb; | ||
591 | |||
592 | if (!IS_INITIALIZED(ar)) | ||
593 | return -EPERM; | ||
594 | |||
595 | if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) | ||
596 | return -EINVAL; | ||
597 | |||
598 | urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
599 | if (!urb) | ||
600 | return -ENOMEM; | ||
601 | |||
602 | usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev, | ||
603 | AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4, | ||
604 | carl9170_usb_cmd_complete, ar, 1); | ||
605 | |||
606 | urb->transfer_flags |= URB_ZERO_PACKET; | ||
607 | |||
608 | if (free_buf) | ||
609 | urb->transfer_flags |= URB_FREE_BUFFER; | ||
610 | |||
611 | usb_anchor_urb(urb, &ar->tx_cmd); | ||
612 | usb_free_urb(urb); | ||
613 | |||
614 | return carl9170_usb_submit_cmd_urb(ar); | ||
615 | } | ||
616 | |||
617 | int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd, | ||
618 | unsigned int plen, void *payload, unsigned int outlen, void *out) | ||
619 | { | ||
620 | int err = -ENOMEM; | ||
621 | |||
622 | if (!IS_ACCEPTING_CMD(ar)) | ||
623 | return -EIO; | ||
624 | |||
625 | if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) | ||
626 | might_sleep(); | ||
627 | |||
628 | ar->cmd.hdr.len = plen; | ||
629 | ar->cmd.hdr.cmd = cmd; | ||
630 | /* writing multiple regs fills this buffer already */ | ||
631 | if (plen && payload != (u8 *)(ar->cmd.data)) | ||
632 | memcpy(ar->cmd.data, payload, plen); | ||
633 | |||
634 | spin_lock_bh(&ar->cmd_lock); | ||
635 | ar->readbuf = (u8 *)out; | ||
636 | ar->readlen = outlen; | ||
637 | spin_unlock_bh(&ar->cmd_lock); | ||
638 | |||
639 | err = __carl9170_exec_cmd(ar, &ar->cmd, false); | ||
640 | |||
641 | if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) { | ||
642 | err = wait_for_completion_timeout(&ar->cmd_wait, HZ); | ||
643 | if (err == 0) { | ||
644 | err = -ETIMEDOUT; | ||
645 | goto err_unbuf; | ||
646 | } | ||
647 | |||
648 | if (ar->readlen != outlen) { | ||
649 | err = -EMSGSIZE; | ||
650 | goto err_unbuf; | ||
651 | } | ||
652 | } | ||
653 | |||
654 | return 0; | ||
655 | |||
656 | err_unbuf: | ||
657 | /* Maybe the device was removed in the moment we were waiting? */ | ||
658 | if (IS_STARTED(ar)) { | ||
659 | dev_err(&ar->udev->dev, "no command feedback " | ||
660 | "received (%d).\n", err); | ||
661 | |||
662 | /* provide some maybe useful debug information */ | ||
663 | print_hex_dump_bytes("carl9170 cmd: ", DUMP_PREFIX_NONE, | ||
664 | &ar->cmd, plen + 4); | ||
665 | |||
666 | carl9170_restart(ar, CARL9170_RR_COMMAND_TIMEOUT); | ||
667 | } | ||
668 | |||
669 | /* invalidate to avoid completing the next command prematurely */ | ||
670 | spin_lock_bh(&ar->cmd_lock); | ||
671 | ar->readbuf = NULL; | ||
672 | ar->readlen = 0; | ||
673 | spin_unlock_bh(&ar->cmd_lock); | ||
674 | |||
675 | return err; | ||
676 | } | ||
677 | |||
678 | void carl9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb) | ||
679 | { | ||
680 | struct urb *urb; | ||
681 | struct ar9170_stream *tx_stream; | ||
682 | void *data; | ||
683 | unsigned int len; | ||
684 | |||
685 | if (!IS_STARTED(ar)) | ||
686 | goto err_drop; | ||
687 | |||
688 | urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
689 | if (!urb) | ||
690 | goto err_drop; | ||
691 | |||
692 | if (ar->fw.tx_stream) { | ||
693 | tx_stream = (void *) (skb->data - sizeof(*tx_stream)); | ||
694 | |||
695 | len = skb->len + sizeof(*tx_stream); | ||
696 | tx_stream->length = cpu_to_le16(len); | ||
697 | tx_stream->tag = cpu_to_le16(AR9170_TX_STREAM_TAG); | ||
698 | data = tx_stream; | ||
699 | } else { | ||
700 | data = skb->data; | ||
701 | len = skb->len; | ||
702 | } | ||
703 | |||
704 | usb_fill_bulk_urb(urb, ar->udev, usb_sndbulkpipe(ar->udev, | ||
705 | AR9170_USB_EP_TX), data, len, | ||
706 | carl9170_usb_tx_data_complete, skb); | ||
707 | |||
708 | urb->transfer_flags |= URB_ZERO_PACKET; | ||
709 | |||
710 | usb_anchor_urb(urb, &ar->tx_wait); | ||
711 | |||
712 | usb_free_urb(urb); | ||
713 | |||
714 | carl9170_usb_submit_data_urb(ar); | ||
715 | return; | ||
716 | |||
717 | err_drop: | ||
718 | carl9170_tx_drop(ar, skb); | ||
719 | carl9170_tx_callback(ar, skb); | ||
720 | } | ||
721 | |||
722 | static void carl9170_release_firmware(struct ar9170 *ar) | ||
723 | { | ||
724 | if (ar->fw.fw) { | ||
725 | release_firmware(ar->fw.fw); | ||
726 | memset(&ar->fw, 0, sizeof(ar->fw)); | ||
727 | } | ||
728 | } | ||
729 | |||
730 | void carl9170_usb_stop(struct ar9170 *ar) | ||
731 | { | ||
732 | int ret; | ||
733 | |||
734 | carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STOPPED); | ||
735 | |||
736 | ret = carl9170_usb_flush(ar); | ||
737 | if (ret) | ||
738 | dev_err(&ar->udev->dev, "kill pending tx urbs.\n"); | ||
739 | |||
740 | usb_poison_anchored_urbs(&ar->tx_anch); | ||
741 | carl9170_usb_handle_tx_err(ar); | ||
742 | |||
743 | /* kill any pending command */ | ||
744 | spin_lock_bh(&ar->cmd_lock); | ||
745 | ar->readlen = 0; | ||
746 | spin_unlock_bh(&ar->cmd_lock); | ||
747 | complete_all(&ar->cmd_wait); | ||
748 | |||
749 | /* This is required to prevent an early completion on _start */ | ||
750 | INIT_COMPLETION(ar->cmd_wait); | ||
751 | |||
752 | /* | ||
753 | * Note: | ||
754 | * So far we freed all tx urbs, but we won't dare to touch any rx urbs. | ||
755 | * Else we would end up with a unresponsive device... | ||
756 | */ | ||
757 | } | ||
758 | |||
759 | int carl9170_usb_open(struct ar9170 *ar) | ||
760 | { | ||
761 | usb_unpoison_anchored_urbs(&ar->tx_anch); | ||
762 | |||
763 | carl9170_set_state_when(ar, CARL9170_STOPPED, CARL9170_IDLE); | ||
764 | return 0; | ||
765 | } | ||
766 | |||
767 | static int carl9170_usb_load_firmware(struct ar9170 *ar) | ||
768 | { | ||
769 | const u8 *data; | ||
770 | u8 *buf; | ||
771 | unsigned int transfer; | ||
772 | size_t len; | ||
773 | u32 addr; | ||
774 | int err = 0; | ||
775 | |||
776 | buf = kmalloc(4096, GFP_KERNEL); | ||
777 | if (!buf) { | ||
778 | err = -ENOMEM; | ||
779 | goto err_out; | ||
780 | } | ||
781 | |||
782 | data = ar->fw.fw->data; | ||
783 | len = ar->fw.fw->size; | ||
784 | addr = ar->fw.address; | ||
785 | |||
786 | /* this removes the miniboot image */ | ||
787 | data += ar->fw.offset; | ||
788 | len -= ar->fw.offset; | ||
789 | |||
790 | while (len) { | ||
791 | transfer = min_t(unsigned int, len, 4096u); | ||
792 | memcpy(buf, data, transfer); | ||
793 | |||
794 | err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), | ||
795 | 0x30 /* FW DL */, 0x40 | USB_DIR_OUT, | ||
796 | addr >> 8, 0, buf, transfer, 100); | ||
797 | |||
798 | if (err < 0) { | ||
799 | kfree(buf); | ||
800 | goto err_out; | ||
801 | } | ||
802 | |||
803 | len -= transfer; | ||
804 | data += transfer; | ||
805 | addr += transfer; | ||
806 | } | ||
807 | kfree(buf); | ||
808 | |||
809 | err = usb_control_msg(ar->udev, usb_sndctrlpipe(ar->udev, 0), | ||
810 | 0x31 /* FW DL COMPLETE */, | ||
811 | 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 200); | ||
812 | |||
813 | if (wait_for_completion_timeout(&ar->fw_boot_wait, HZ) == 0) { | ||
814 | err = -ETIMEDOUT; | ||
815 | goto err_out; | ||
816 | } | ||
817 | |||
818 | err = carl9170_echo_test(ar, 0x4a110123); | ||
819 | if (err) | ||
820 | goto err_out; | ||
821 | |||
822 | /* firmware restarts cmd counter */ | ||
823 | ar->cmd_seq = -1; | ||
824 | |||
825 | return 0; | ||
826 | |||
827 | err_out: | ||
828 | dev_err(&ar->udev->dev, "firmware upload failed (%d).\n", err); | ||
829 | return err; | ||
830 | } | ||
831 | |||
832 | int carl9170_usb_restart(struct ar9170 *ar) | ||
833 | { | ||
834 | int err = 0; | ||
835 | |||
836 | if (ar->intf->condition != USB_INTERFACE_BOUND) | ||
837 | return 0; | ||
838 | |||
839 | /* Disable command response sequence counter. */ | ||
840 | ar->cmd_seq = -2; | ||
841 | |||
842 | err = carl9170_reboot(ar); | ||
843 | |||
844 | carl9170_usb_stop(ar); | ||
845 | |||
846 | carl9170_set_state(ar, CARL9170_UNKNOWN_STATE); | ||
847 | |||
848 | if (err) | ||
849 | goto err_out; | ||
850 | |||
851 | tasklet_schedule(&ar->usb_tasklet); | ||
852 | |||
853 | /* The reboot procedure can take quite a while to complete. */ | ||
854 | msleep(1100); | ||
855 | |||
856 | err = carl9170_usb_open(ar); | ||
857 | if (err) | ||
858 | goto err_out; | ||
859 | |||
860 | err = carl9170_usb_load_firmware(ar); | ||
861 | if (err) | ||
862 | goto err_out; | ||
863 | |||
864 | return 0; | ||
865 | |||
866 | err_out: | ||
867 | carl9170_usb_cancel_urbs(ar); | ||
868 | return err; | ||
869 | } | ||
870 | |||
871 | void carl9170_usb_reset(struct ar9170 *ar) | ||
872 | { | ||
873 | /* | ||
874 | * This is the last resort to get the device going again | ||
875 | * without any *user replugging action*. | ||
876 | * | ||
877 | * But there is a catch: usb_reset really is like a physical | ||
878 | * *reconnect*. The mac80211 state will be lost in the process. | ||
879 | * Therefore a userspace application, which is monitoring | ||
880 | * the link must step in. | ||
881 | */ | ||
882 | carl9170_usb_cancel_urbs(ar); | ||
883 | |||
884 | carl9170_usb_stop(ar); | ||
885 | |||
886 | usb_queue_reset_device(ar->intf); | ||
887 | } | ||
888 | |||
889 | static int carl9170_usb_init_device(struct ar9170 *ar) | ||
890 | { | ||
891 | int err; | ||
892 | |||
893 | err = carl9170_usb_send_rx_irq_urb(ar); | ||
894 | if (err) | ||
895 | goto err_out; | ||
896 | |||
897 | err = carl9170_usb_init_rx_bulk_urbs(ar); | ||
898 | if (err) | ||
899 | goto err_unrx; | ||
900 | |||
901 | mutex_lock(&ar->mutex); | ||
902 | err = carl9170_usb_load_firmware(ar); | ||
903 | mutex_unlock(&ar->mutex); | ||
904 | if (err) | ||
905 | goto err_unrx; | ||
906 | |||
907 | return 0; | ||
908 | |||
909 | err_unrx: | ||
910 | carl9170_usb_cancel_urbs(ar); | ||
911 | |||
912 | err_out: | ||
913 | return err; | ||
914 | } | ||
915 | |||
916 | static void carl9170_usb_firmware_failed(struct ar9170 *ar) | ||
917 | { | ||
918 | struct device *parent = ar->udev->dev.parent; | ||
919 | struct usb_device *udev; | ||
920 | |||
921 | /* | ||
922 | * Store a copy of the usb_device pointer locally. | ||
923 | * This is because device_release_driver initiates | ||
924 | * carl9170_usb_disconnect, which in turn frees our | ||
925 | * driver context (ar). | ||
926 | */ | ||
927 | udev = ar->udev; | ||
928 | |||
929 | complete(&ar->fw_load_wait); | ||
930 | |||
931 | /* unbind anything failed */ | ||
932 | if (parent) | ||
933 | device_lock(parent); | ||
934 | |||
935 | device_release_driver(&udev->dev); | ||
936 | if (parent) | ||
937 | device_unlock(parent); | ||
938 | |||
939 | usb_put_dev(udev); | ||
940 | } | ||
941 | |||
942 | static void carl9170_usb_firmware_finish(struct ar9170 *ar) | ||
943 | { | ||
944 | int err; | ||
945 | |||
946 | err = carl9170_parse_firmware(ar); | ||
947 | if (err) | ||
948 | goto err_freefw; | ||
949 | |||
950 | err = carl9170_usb_init_device(ar); | ||
951 | if (err) | ||
952 | goto err_freefw; | ||
953 | |||
954 | err = carl9170_usb_open(ar); | ||
955 | if (err) | ||
956 | goto err_unrx; | ||
957 | |||
958 | err = carl9170_register(ar); | ||
959 | |||
960 | carl9170_usb_stop(ar); | ||
961 | if (err) | ||
962 | goto err_unrx; | ||
963 | |||
964 | complete(&ar->fw_load_wait); | ||
965 | usb_put_dev(ar->udev); | ||
966 | return; | ||
967 | |||
968 | err_unrx: | ||
969 | carl9170_usb_cancel_urbs(ar); | ||
970 | |||
971 | err_freefw: | ||
972 | carl9170_release_firmware(ar); | ||
973 | carl9170_usb_firmware_failed(ar); | ||
974 | } | ||
975 | |||
976 | static void carl9170_usb_firmware_step2(const struct firmware *fw, | ||
977 | void *context) | ||
978 | { | ||
979 | struct ar9170 *ar = context; | ||
980 | |||
981 | if (fw) { | ||
982 | ar->fw.fw = fw; | ||
983 | carl9170_usb_firmware_finish(ar); | ||
984 | return; | ||
985 | } | ||
986 | |||
987 | dev_err(&ar->udev->dev, "firmware not found.\n"); | ||
988 | carl9170_usb_firmware_failed(ar); | ||
989 | } | ||
990 | |||
991 | static int carl9170_usb_probe(struct usb_interface *intf, | ||
992 | const struct usb_device_id *id) | ||
993 | { | ||
994 | struct ar9170 *ar; | ||
995 | struct usb_device *udev; | ||
996 | int err; | ||
997 | |||
998 | err = usb_reset_device(interface_to_usbdev(intf)); | ||
999 | if (err) | ||
1000 | return err; | ||
1001 | |||
1002 | ar = carl9170_alloc(sizeof(*ar)); | ||
1003 | if (IS_ERR(ar)) | ||
1004 | return PTR_ERR(ar); | ||
1005 | |||
1006 | udev = interface_to_usbdev(intf); | ||
1007 | usb_get_dev(udev); | ||
1008 | ar->udev = udev; | ||
1009 | ar->intf = intf; | ||
1010 | ar->features = id->driver_info; | ||
1011 | |||
1012 | usb_set_intfdata(intf, ar); | ||
1013 | SET_IEEE80211_DEV(ar->hw, &intf->dev); | ||
1014 | |||
1015 | init_usb_anchor(&ar->rx_anch); | ||
1016 | init_usb_anchor(&ar->rx_pool); | ||
1017 | init_usb_anchor(&ar->rx_work); | ||
1018 | init_usb_anchor(&ar->tx_wait); | ||
1019 | init_usb_anchor(&ar->tx_anch); | ||
1020 | init_usb_anchor(&ar->tx_cmd); | ||
1021 | init_usb_anchor(&ar->tx_err); | ||
1022 | init_completion(&ar->cmd_wait); | ||
1023 | init_completion(&ar->fw_boot_wait); | ||
1024 | init_completion(&ar->fw_load_wait); | ||
1025 | tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet, | ||
1026 | (unsigned long)ar); | ||
1027 | |||
1028 | atomic_set(&ar->tx_cmd_urbs, 0); | ||
1029 | atomic_set(&ar->tx_anch_urbs, 0); | ||
1030 | atomic_set(&ar->rx_work_urbs, 0); | ||
1031 | atomic_set(&ar->rx_anch_urbs, 0); | ||
1032 | atomic_set(&ar->rx_pool_urbs, 0); | ||
1033 | ar->cmd_seq = -2; | ||
1034 | |||
1035 | usb_get_dev(ar->udev); | ||
1036 | |||
1037 | carl9170_set_state(ar, CARL9170_STOPPED); | ||
1038 | |||
1039 | return request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME, | ||
1040 | &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2); | ||
1041 | } | ||
1042 | |||
1043 | static void carl9170_usb_disconnect(struct usb_interface *intf) | ||
1044 | { | ||
1045 | struct ar9170 *ar = usb_get_intfdata(intf); | ||
1046 | struct usb_device *udev; | ||
1047 | |||
1048 | if (WARN_ON(!ar)) | ||
1049 | return; | ||
1050 | |||
1051 | udev = ar->udev; | ||
1052 | wait_for_completion(&ar->fw_load_wait); | ||
1053 | |||
1054 | if (IS_INITIALIZED(ar)) { | ||
1055 | carl9170_reboot(ar); | ||
1056 | carl9170_usb_stop(ar); | ||
1057 | } | ||
1058 | |||
1059 | carl9170_usb_cancel_urbs(ar); | ||
1060 | carl9170_unregister(ar); | ||
1061 | |||
1062 | usb_set_intfdata(intf, NULL); | ||
1063 | |||
1064 | carl9170_release_firmware(ar); | ||
1065 | carl9170_free(ar); | ||
1066 | usb_put_dev(udev); | ||
1067 | } | ||
1068 | |||
1069 | #ifdef CONFIG_PM | ||
1070 | static int carl9170_usb_suspend(struct usb_interface *intf, | ||
1071 | pm_message_t message) | ||
1072 | { | ||
1073 | struct ar9170 *ar = usb_get_intfdata(intf); | ||
1074 | |||
1075 | if (!ar) | ||
1076 | return -ENODEV; | ||
1077 | |||
1078 | carl9170_usb_cancel_urbs(ar); | ||
1079 | |||
1080 | /* | ||
1081 | * firmware automatically reboots for usb suspend. | ||
1082 | */ | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | static int carl9170_usb_resume(struct usb_interface *intf) | ||
1088 | { | ||
1089 | struct ar9170 *ar = usb_get_intfdata(intf); | ||
1090 | int err; | ||
1091 | |||
1092 | if (!ar) | ||
1093 | return -ENODEV; | ||
1094 | |||
1095 | usb_unpoison_anchored_urbs(&ar->rx_anch); | ||
1096 | |||
1097 | err = carl9170_usb_init_device(ar); | ||
1098 | if (err) | ||
1099 | goto err_unrx; | ||
1100 | |||
1101 | err = carl9170_usb_open(ar); | ||
1102 | if (err) | ||
1103 | goto err_unrx; | ||
1104 | |||
1105 | return 0; | ||
1106 | |||
1107 | err_unrx: | ||
1108 | carl9170_usb_cancel_urbs(ar); | ||
1109 | |||
1110 | return err; | ||
1111 | } | ||
1112 | #endif /* CONFIG_PM */ | ||
1113 | |||
1114 | static struct usb_driver carl9170_driver = { | ||
1115 | .name = KBUILD_MODNAME, | ||
1116 | .probe = carl9170_usb_probe, | ||
1117 | .disconnect = carl9170_usb_disconnect, | ||
1118 | .id_table = carl9170_usb_ids, | ||
1119 | .soft_unbind = 1, | ||
1120 | #ifdef CONFIG_PM | ||
1121 | .suspend = carl9170_usb_suspend, | ||
1122 | .resume = carl9170_usb_resume, | ||
1123 | #endif /* CONFIG_PM */ | ||
1124 | }; | ||
1125 | |||
1126 | static int __init carl9170_usb_init(void) | ||
1127 | { | ||
1128 | return usb_register(&carl9170_driver); | ||
1129 | } | ||
1130 | |||
1131 | static void __exit carl9170_usb_exit(void) | ||
1132 | { | ||
1133 | usb_deregister(&carl9170_driver); | ||
1134 | } | ||
1135 | |||
1136 | module_init(carl9170_usb_init); | ||
1137 | module_exit(carl9170_usb_exit); | ||