diff options
author | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2011-07-11 10:39:46 -0400 |
---|---|---|
committer | Wey-Yi Guy <wey-yi.w.guy@intel.com> | 2011-07-16 10:38:59 -0400 |
commit | 253a634ccd1b291282cd0cade219bd90eb0371eb (patch) | |
tree | ecb708eb830aaf1404e7ca7137784d8163c26b60 /drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | |
parent | ab697a9f1e73ba817955e15bd899a8a0627f9fd6 (diff) |
iwlagn: move tx transport functions to iwl-trans-tx-pcie.c
There are still a few functions here and there that should be
put in the transport layer. Mainly the functions that are related to the reclaim flow.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | 808 |
1 files changed, 808 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c new file mode 100644 index 000000000000..f3b531b34475 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | |||
@@ -0,0 +1,808 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | #include <linux/etherdevice.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <net/mac80211.h> | ||
33 | |||
34 | #include "iwl-agn.h" | ||
35 | #include "iwl-dev.h" | ||
36 | #include "iwl-core.h" | ||
37 | #include "iwl-io.h" | ||
38 | #include "iwl-helpers.h" | ||
39 | #include "iwl-trans-int-pcie.h" | ||
40 | |||
41 | /** | ||
42 | * iwl_txq_update_write_ptr - Send new write index to hardware | ||
43 | */ | ||
44 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
45 | { | ||
46 | u32 reg = 0; | ||
47 | int txq_id = txq->q.id; | ||
48 | |||
49 | if (txq->need_update == 0) | ||
50 | return; | ||
51 | |||
52 | if (priv->cfg->base_params->shadow_reg_enable) { | ||
53 | /* shadow register enabled */ | ||
54 | iwl_write32(priv, HBUS_TARG_WRPTR, | ||
55 | txq->q.write_ptr | (txq_id << 8)); | ||
56 | } else { | ||
57 | /* if we're trying to save power */ | ||
58 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
59 | /* wake up nic if it's powered down ... | ||
60 | * uCode will wake up, and interrupt us again, so next | ||
61 | * time we'll skip this part. */ | ||
62 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
63 | |||
64 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
65 | IWL_DEBUG_INFO(priv, | ||
66 | "Tx queue %d requesting wakeup," | ||
67 | " GP1 = 0x%x\n", txq_id, reg); | ||
68 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
69 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
70 | return; | ||
71 | } | ||
72 | |||
73 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
74 | txq->q.write_ptr | (txq_id << 8)); | ||
75 | |||
76 | /* | ||
77 | * else not in power-save mode, | ||
78 | * uCode will never sleep when we're | ||
79 | * trying to tx (during RFKILL, we're not trying to tx). | ||
80 | */ | ||
81 | } else | ||
82 | iwl_write32(priv, HBUS_TARG_WRPTR, | ||
83 | txq->q.write_ptr | (txq_id << 8)); | ||
84 | } | ||
85 | txq->need_update = 0; | ||
86 | } | ||
87 | |||
88 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | ||
89 | { | ||
90 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
91 | |||
92 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | ||
93 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
94 | addr |= | ||
95 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | ||
96 | |||
97 | return addr; | ||
98 | } | ||
99 | |||
100 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | ||
101 | { | ||
102 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
103 | |||
104 | return le16_to_cpu(tb->hi_n_len) >> 4; | ||
105 | } | ||
106 | |||
107 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | ||
108 | dma_addr_t addr, u16 len) | ||
109 | { | ||
110 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
111 | u16 hi_n_len = len << 4; | ||
112 | |||
113 | put_unaligned_le32(addr, &tb->lo); | ||
114 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
115 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | ||
116 | |||
117 | tb->hi_n_len = cpu_to_le16(hi_n_len); | ||
118 | |||
119 | tfd->num_tbs = idx + 1; | ||
120 | } | ||
121 | |||
122 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | ||
123 | { | ||
124 | return tfd->num_tbs & 0x1f; | ||
125 | } | ||
126 | |||
127 | static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, | ||
128 | struct iwl_tfd *tfd, enum dma_data_direction dma_dir) | ||
129 | { | ||
130 | int i; | ||
131 | int num_tbs; | ||
132 | |||
133 | /* Sanity check on number of chunks */ | ||
134 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
135 | |||
136 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
137 | IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); | ||
138 | /* @todo issue fatal error, it is quite serious situation */ | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | /* Unmap tx_cmd */ | ||
143 | if (num_tbs) | ||
144 | dma_unmap_single(priv->bus.dev, | ||
145 | dma_unmap_addr(meta, mapping), | ||
146 | dma_unmap_len(meta, len), | ||
147 | DMA_BIDIRECTIONAL); | ||
148 | |||
149 | /* Unmap chunks, if any. */ | ||
150 | for (i = 1; i < num_tbs; i++) | ||
151 | dma_unmap_single(priv->bus.dev, iwl_tfd_tb_get_addr(tfd, i), | ||
152 | iwl_tfd_tb_get_len(tfd, i), dma_dir); | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | ||
157 | * @priv - driver private data | ||
158 | * @txq - tx queue | ||
159 | * @index - the index of the TFD to be freed | ||
160 | * | ||
161 | * Does NOT advance any TFD circular buffer read/write indexes | ||
162 | * Does NOT free the TFD itself (which is within circular buffer) | ||
163 | */ | ||
164 | void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
165 | int index) | ||
166 | { | ||
167 | struct iwl_tfd *tfd_tmp = txq->tfds; | ||
168 | |||
169 | iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index], | ||
170 | DMA_TO_DEVICE); | ||
171 | |||
172 | /* free SKB */ | ||
173 | if (txq->txb) { | ||
174 | struct sk_buff *skb; | ||
175 | |||
176 | skb = txq->txb[index].skb; | ||
177 | |||
178 | /* can be called from irqs-disabled context */ | ||
179 | if (skb) { | ||
180 | dev_kfree_skb_any(skb); | ||
181 | txq->txb[index].skb = NULL; | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | |||
186 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, | ||
187 | struct iwl_tx_queue *txq, | ||
188 | dma_addr_t addr, u16 len, | ||
189 | u8 reset) | ||
190 | { | ||
191 | struct iwl_queue *q; | ||
192 | struct iwl_tfd *tfd, *tfd_tmp; | ||
193 | u32 num_tbs; | ||
194 | |||
195 | q = &txq->q; | ||
196 | tfd_tmp = txq->tfds; | ||
197 | tfd = &tfd_tmp[q->write_ptr]; | ||
198 | |||
199 | if (reset) | ||
200 | memset(tfd, 0, sizeof(*tfd)); | ||
201 | |||
202 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
203 | |||
204 | /* Each TFD can point to a maximum 20 Tx buffers */ | ||
205 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
206 | IWL_ERR(priv, "Error can not send more than %d chunks\n", | ||
207 | IWL_NUM_OF_TBS); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | if (WARN_ON(addr & ~DMA_BIT_MASK(36))) | ||
212 | return -EINVAL; | ||
213 | |||
214 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) | ||
215 | IWL_ERR(priv, "Unaligned address = %llx\n", | ||
216 | (unsigned long long)addr); | ||
217 | |||
218 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | ||
224 | * DMA services | ||
225 | * | ||
226 | * Theory of operation | ||
227 | * | ||
228 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | ||
229 | * of buffer descriptors, each of which points to one or more data buffers for | ||
230 | * the device to read from or fill. Driver and device exchange status of each | ||
231 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
232 | * entries in each circular buffer, to protect against confusing empty and full | ||
233 | * queue states. | ||
234 | * | ||
235 | * The device reads or writes the data in the queues via the device's several | ||
236 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
237 | * | ||
238 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
239 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
240 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
241 | * Tx queue resumed. | ||
242 | * | ||
243 | ***************************************************/ | ||
244 | |||
245 | int iwl_queue_space(const struct iwl_queue *q) | ||
246 | { | ||
247 | int s = q->read_ptr - q->write_ptr; | ||
248 | |||
249 | if (q->read_ptr > q->write_ptr) | ||
250 | s -= q->n_bd; | ||
251 | |||
252 | if (s <= 0) | ||
253 | s += q->n_window; | ||
254 | /* keep some reserve to not confuse empty and full situations */ | ||
255 | s -= 2; | ||
256 | if (s < 0) | ||
257 | s = 0; | ||
258 | return s; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | ||
263 | */ | ||
264 | int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | ||
265 | int count, int slots_num, u32 id) | ||
266 | { | ||
267 | q->n_bd = count; | ||
268 | q->n_window = slots_num; | ||
269 | q->id = id; | ||
270 | |||
271 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
272 | * and iwl_queue_dec_wrap are broken. */ | ||
273 | if (WARN_ON(!is_power_of_2(count))) | ||
274 | return -EINVAL; | ||
275 | |||
276 | /* slots_num must be power-of-two size, otherwise | ||
277 | * get_cmd_index is broken. */ | ||
278 | if (WARN_ON(!is_power_of_2(slots_num))) | ||
279 | return -EINVAL; | ||
280 | |||
281 | q->low_mark = q->n_window / 4; | ||
282 | if (q->low_mark < 4) | ||
283 | q->low_mark = 4; | ||
284 | |||
285 | q->high_mark = q->n_window / 8; | ||
286 | if (q->high_mark < 2) | ||
287 | q->high_mark = 2; | ||
288 | |||
289 | q->write_ptr = q->read_ptr = 0; | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
295 | |||
296 | /** | ||
297 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
298 | * @priv: device private data point | ||
299 | * @cmd: a point to the ucode command structure | ||
300 | * | ||
301 | * The function returns < 0 values to indicate the operation is | ||
302 | * failed. On success, it turns the index (> 0) of command in the | ||
303 | * command queue. | ||
304 | */ | ||
305 | static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
306 | { | ||
307 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | ||
308 | struct iwl_queue *q = &txq->q; | ||
309 | struct iwl_device_cmd *out_cmd; | ||
310 | struct iwl_cmd_meta *out_meta; | ||
311 | dma_addr_t phys_addr; | ||
312 | unsigned long flags; | ||
313 | u32 idx; | ||
314 | u16 copy_size, cmd_size; | ||
315 | bool is_ct_kill = false; | ||
316 | bool had_nocopy = false; | ||
317 | int i; | ||
318 | u8 *cmd_dest; | ||
319 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
320 | const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; | ||
321 | int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; | ||
322 | int trace_idx; | ||
323 | #endif | ||
324 | |||
325 | if (test_bit(STATUS_FW_ERROR, &priv->status)) { | ||
326 | IWL_WARN(priv, "fw recovery, no hcmd send\n"); | ||
327 | return -EIO; | ||
328 | } | ||
329 | |||
330 | if ((priv->ucode_owner == IWL_OWNERSHIP_TM) && | ||
331 | !(cmd->flags & CMD_ON_DEMAND)) { | ||
332 | IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n"); | ||
333 | return -EIO; | ||
334 | } | ||
335 | |||
336 | copy_size = sizeof(out_cmd->hdr); | ||
337 | cmd_size = sizeof(out_cmd->hdr); | ||
338 | |||
339 | /* need one for the header if the first is NOCOPY */ | ||
340 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); | ||
341 | |||
342 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
343 | if (!cmd->len[i]) | ||
344 | continue; | ||
345 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | ||
346 | had_nocopy = true; | ||
347 | } else { | ||
348 | /* NOCOPY must not be followed by normal! */ | ||
349 | if (WARN_ON(had_nocopy)) | ||
350 | return -EINVAL; | ||
351 | copy_size += cmd->len[i]; | ||
352 | } | ||
353 | cmd_size += cmd->len[i]; | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * If any of the command structures end up being larger than | ||
358 | * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically | ||
359 | * allocated into separate TFDs, then we will need to | ||
360 | * increase the size of the buffers. | ||
361 | */ | ||
362 | if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) | ||
363 | return -EINVAL; | ||
364 | |||
365 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { | ||
366 | IWL_WARN(priv, "Not sending command - %s KILL\n", | ||
367 | iwl_is_rfkill(priv) ? "RF" : "CT"); | ||
368 | return -EIO; | ||
369 | } | ||
370 | |||
371 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
372 | |||
373 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | ||
374 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
375 | |||
376 | IWL_ERR(priv, "No space in command queue\n"); | ||
377 | is_ct_kill = iwl_check_for_ct_kill(priv); | ||
378 | if (!is_ct_kill) { | ||
379 | IWL_ERR(priv, "Restarting adapter due to queue full\n"); | ||
380 | iwlagn_fw_error(priv, false); | ||
381 | } | ||
382 | return -ENOSPC; | ||
383 | } | ||
384 | |||
385 | idx = get_cmd_index(q, q->write_ptr); | ||
386 | out_cmd = txq->cmd[idx]; | ||
387 | out_meta = &txq->meta[idx]; | ||
388 | |||
389 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ | ||
390 | if (cmd->flags & CMD_WANT_SKB) | ||
391 | out_meta->source = cmd; | ||
392 | if (cmd->flags & CMD_ASYNC) | ||
393 | out_meta->callback = cmd->callback; | ||
394 | |||
395 | /* set up the header */ | ||
396 | |||
397 | out_cmd->hdr.cmd = cmd->id; | ||
398 | out_cmd->hdr.flags = 0; | ||
399 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | | ||
400 | INDEX_TO_SEQ(q->write_ptr)); | ||
401 | |||
402 | /* and copy the data that needs to be copied */ | ||
403 | |||
404 | cmd_dest = &out_cmd->cmd.payload[0]; | ||
405 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
406 | if (!cmd->len[i]) | ||
407 | continue; | ||
408 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) | ||
409 | break; | ||
410 | memcpy(cmd_dest, cmd->data[i], cmd->len[i]); | ||
411 | cmd_dest += cmd->len[i]; | ||
412 | } | ||
413 | |||
414 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " | ||
415 | "%d bytes at %d[%d]:%d\n", | ||
416 | get_cmd_string(out_cmd->hdr.cmd), | ||
417 | out_cmd->hdr.cmd, | ||
418 | le16_to_cpu(out_cmd->hdr.sequence), cmd_size, | ||
419 | q->write_ptr, idx, priv->cmd_queue); | ||
420 | |||
421 | phys_addr = dma_map_single(priv->bus.dev, &out_cmd->hdr, copy_size, | ||
422 | DMA_BIDIRECTIONAL); | ||
423 | if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) { | ||
424 | idx = -ENOMEM; | ||
425 | goto out; | ||
426 | } | ||
427 | |||
428 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | ||
429 | dma_unmap_len_set(out_meta, len, copy_size); | ||
430 | |||
431 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); | ||
432 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
433 | trace_bufs[0] = &out_cmd->hdr; | ||
434 | trace_lens[0] = copy_size; | ||
435 | trace_idx = 1; | ||
436 | #endif | ||
437 | |||
438 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
439 | if (!cmd->len[i]) | ||
440 | continue; | ||
441 | if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) | ||
442 | continue; | ||
443 | phys_addr = dma_map_single(priv->bus.dev, (void *)cmd->data[i], | ||
444 | cmd->len[i], DMA_BIDIRECTIONAL); | ||
445 | if (dma_mapping_error(priv->bus.dev, phys_addr)) { | ||
446 | iwlagn_unmap_tfd(priv, out_meta, | ||
447 | &txq->tfds[q->write_ptr], | ||
448 | DMA_BIDIRECTIONAL); | ||
449 | idx = -ENOMEM; | ||
450 | goto out; | ||
451 | } | ||
452 | |||
453 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, | ||
454 | cmd->len[i], 0); | ||
455 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
456 | trace_bufs[trace_idx] = cmd->data[i]; | ||
457 | trace_lens[trace_idx] = cmd->len[i]; | ||
458 | trace_idx++; | ||
459 | #endif | ||
460 | } | ||
461 | |||
462 | out_meta->flags = cmd->flags; | ||
463 | |||
464 | txq->need_update = 1; | ||
465 | |||
466 | /* check that tracing gets all possible blocks */ | ||
467 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); | ||
468 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
469 | trace_iwlwifi_dev_hcmd(priv, cmd->flags, | ||
470 | trace_bufs[0], trace_lens[0], | ||
471 | trace_bufs[1], trace_lens[1], | ||
472 | trace_bufs[2], trace_lens[2]); | ||
473 | #endif | ||
474 | |||
475 | /* Increment and update queue's write index */ | ||
476 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
477 | iwl_txq_update_write_ptr(priv, txq); | ||
478 | |||
479 | out: | ||
480 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
481 | return idx; | ||
482 | } | ||
483 | |||
484 | /** | ||
485 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
486 | * | ||
487 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
488 | * need to be reclaimed. As result, some free space forms. If there is | ||
489 | * enough free space (> low mark), wake the stack that feeds us. | ||
490 | */ | ||
491 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) | ||
492 | { | ||
493 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
494 | struct iwl_queue *q = &txq->q; | ||
495 | int nfreed = 0; | ||
496 | |||
497 | if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { | ||
498 | IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), " | ||
499 | "index %d is out of range [0-%d] %d %d.\n", __func__, | ||
500 | txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); | ||
501 | return; | ||
502 | } | ||
503 | |||
504 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | ||
505 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
506 | |||
507 | if (nfreed++ > 0) { | ||
508 | IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, | ||
509 | q->write_ptr, q->read_ptr); | ||
510 | iwlagn_fw_error(priv, false); | ||
511 | } | ||
512 | |||
513 | } | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
518 | * @rxb: Rx buffer to reclaim | ||
519 | * | ||
520 | * If an Rx buffer has an async callback associated with it the callback | ||
521 | * will be executed. The attached skb (if present) will only be freed | ||
522 | * if the callback returns 1 | ||
523 | */ | ||
524 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | ||
525 | { | ||
526 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
527 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
528 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
529 | int index = SEQ_TO_INDEX(sequence); | ||
530 | int cmd_index; | ||
531 | struct iwl_device_cmd *cmd; | ||
532 | struct iwl_cmd_meta *meta; | ||
533 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | ||
534 | unsigned long flags; | ||
535 | |||
536 | /* If a Tx command is being handled and it isn't in the actual | ||
537 | * command queue then there a command routing bug has been introduced | ||
538 | * in the queue management code. */ | ||
539 | if (WARN(txq_id != priv->cmd_queue, | ||
540 | "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", | ||
541 | txq_id, priv->cmd_queue, sequence, | ||
542 | priv->txq[priv->cmd_queue].q.read_ptr, | ||
543 | priv->txq[priv->cmd_queue].q.write_ptr)) { | ||
544 | iwl_print_hex_error(priv, pkt, 32); | ||
545 | return; | ||
546 | } | ||
547 | |||
548 | cmd_index = get_cmd_index(&txq->q, index); | ||
549 | cmd = txq->cmd[cmd_index]; | ||
550 | meta = &txq->meta[cmd_index]; | ||
551 | |||
552 | iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); | ||
553 | |||
554 | /* Input error checking is done when commands are added to queue. */ | ||
555 | if (meta->flags & CMD_WANT_SKB) { | ||
556 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); | ||
557 | rxb->page = NULL; | ||
558 | } else if (meta->callback) | ||
559 | meta->callback(priv, cmd, pkt); | ||
560 | |||
561 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
562 | |||
563 | iwl_hcmd_queue_reclaim(priv, txq_id, index); | ||
564 | |||
565 | if (!(meta->flags & CMD_ASYNC)) { | ||
566 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
567 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", | ||
568 | get_cmd_string(cmd->hdr.cmd)); | ||
569 | wake_up_interruptible(&priv->wait_command_queue); | ||
570 | } | ||
571 | |||
572 | meta->flags = 0; | ||
573 | |||
574 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
575 | } | ||
576 | |||
577 | const char *get_cmd_string(u8 cmd) | ||
578 | { | ||
579 | switch (cmd) { | ||
580 | IWL_CMD(REPLY_ALIVE); | ||
581 | IWL_CMD(REPLY_ERROR); | ||
582 | IWL_CMD(REPLY_RXON); | ||
583 | IWL_CMD(REPLY_RXON_ASSOC); | ||
584 | IWL_CMD(REPLY_QOS_PARAM); | ||
585 | IWL_CMD(REPLY_RXON_TIMING); | ||
586 | IWL_CMD(REPLY_ADD_STA); | ||
587 | IWL_CMD(REPLY_REMOVE_STA); | ||
588 | IWL_CMD(REPLY_REMOVE_ALL_STA); | ||
589 | IWL_CMD(REPLY_TXFIFO_FLUSH); | ||
590 | IWL_CMD(REPLY_WEPKEY); | ||
591 | IWL_CMD(REPLY_TX); | ||
592 | IWL_CMD(REPLY_LEDS_CMD); | ||
593 | IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); | ||
594 | IWL_CMD(COEX_PRIORITY_TABLE_CMD); | ||
595 | IWL_CMD(COEX_MEDIUM_NOTIFICATION); | ||
596 | IWL_CMD(COEX_EVENT_CMD); | ||
597 | IWL_CMD(REPLY_QUIET_CMD); | ||
598 | IWL_CMD(REPLY_CHANNEL_SWITCH); | ||
599 | IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); | ||
600 | IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); | ||
601 | IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); | ||
602 | IWL_CMD(POWER_TABLE_CMD); | ||
603 | IWL_CMD(PM_SLEEP_NOTIFICATION); | ||
604 | IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); | ||
605 | IWL_CMD(REPLY_SCAN_CMD); | ||
606 | IWL_CMD(REPLY_SCAN_ABORT_CMD); | ||
607 | IWL_CMD(SCAN_START_NOTIFICATION); | ||
608 | IWL_CMD(SCAN_RESULTS_NOTIFICATION); | ||
609 | IWL_CMD(SCAN_COMPLETE_NOTIFICATION); | ||
610 | IWL_CMD(BEACON_NOTIFICATION); | ||
611 | IWL_CMD(REPLY_TX_BEACON); | ||
612 | IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); | ||
613 | IWL_CMD(QUIET_NOTIFICATION); | ||
614 | IWL_CMD(REPLY_TX_PWR_TABLE_CMD); | ||
615 | IWL_CMD(MEASURE_ABORT_NOTIFICATION); | ||
616 | IWL_CMD(REPLY_BT_CONFIG); | ||
617 | IWL_CMD(REPLY_STATISTICS_CMD); | ||
618 | IWL_CMD(STATISTICS_NOTIFICATION); | ||
619 | IWL_CMD(REPLY_CARD_STATE_CMD); | ||
620 | IWL_CMD(CARD_STATE_NOTIFICATION); | ||
621 | IWL_CMD(MISSED_BEACONS_NOTIFICATION); | ||
622 | IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); | ||
623 | IWL_CMD(SENSITIVITY_CMD); | ||
624 | IWL_CMD(REPLY_PHY_CALIBRATION_CMD); | ||
625 | IWL_CMD(REPLY_RX_PHY_CMD); | ||
626 | IWL_CMD(REPLY_RX_MPDU_CMD); | ||
627 | IWL_CMD(REPLY_RX); | ||
628 | IWL_CMD(REPLY_COMPRESSED_BA); | ||
629 | IWL_CMD(CALIBRATION_CFG_CMD); | ||
630 | IWL_CMD(CALIBRATION_RES_NOTIFICATION); | ||
631 | IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); | ||
632 | IWL_CMD(REPLY_TX_POWER_DBM_CMD); | ||
633 | IWL_CMD(TEMPERATURE_NOTIFICATION); | ||
634 | IWL_CMD(TX_ANT_CONFIGURATION_CMD); | ||
635 | IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); | ||
636 | IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); | ||
637 | IWL_CMD(REPLY_BT_COEX_PROT_ENV); | ||
638 | IWL_CMD(REPLY_WIPAN_PARAMS); | ||
639 | IWL_CMD(REPLY_WIPAN_RXON); | ||
640 | IWL_CMD(REPLY_WIPAN_RXON_TIMING); | ||
641 | IWL_CMD(REPLY_WIPAN_RXON_ASSOC); | ||
642 | IWL_CMD(REPLY_WIPAN_QOS_PARAM); | ||
643 | IWL_CMD(REPLY_WIPAN_WEPKEY); | ||
644 | IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); | ||
645 | IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); | ||
646 | IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); | ||
647 | default: | ||
648 | return "UNKNOWN"; | ||
649 | |||
650 | } | ||
651 | } | ||
652 | |||
653 | #define HOST_COMPLETE_TIMEOUT (2 * HZ) | ||
654 | |||
655 | static void iwl_generic_cmd_callback(struct iwl_priv *priv, | ||
656 | struct iwl_device_cmd *cmd, | ||
657 | struct iwl_rx_packet *pkt) | ||
658 | { | ||
659 | if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
660 | IWL_ERR(priv, "Bad return from %s (0x%08X)\n", | ||
661 | get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
666 | switch (cmd->hdr.cmd) { | ||
667 | case REPLY_TX_LINK_QUALITY_CMD: | ||
668 | case SENSITIVITY_CMD: | ||
669 | IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", | ||
670 | get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); | ||
671 | break; | ||
672 | default: | ||
673 | IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", | ||
674 | get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); | ||
675 | } | ||
676 | #endif | ||
677 | } | ||
678 | |||
679 | static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
680 | { | ||
681 | int ret; | ||
682 | |||
683 | /* An asynchronous command can not expect an SKB to be set. */ | ||
684 | if (WARN_ON(cmd->flags & CMD_WANT_SKB)) | ||
685 | return -EINVAL; | ||
686 | |||
687 | /* Assign a generic callback if one is not provided */ | ||
688 | if (!cmd->callback) | ||
689 | cmd->callback = iwl_generic_cmd_callback; | ||
690 | |||
691 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
692 | return -EBUSY; | ||
693 | |||
694 | ret = iwl_enqueue_hcmd(priv, cmd); | ||
695 | if (ret < 0) { | ||
696 | IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", | ||
697 | get_cmd_string(cmd->id), ret); | ||
698 | return ret; | ||
699 | } | ||
700 | return 0; | ||
701 | } | ||
702 | |||
703 | static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
704 | { | ||
705 | int cmd_idx; | ||
706 | int ret; | ||
707 | |||
708 | lockdep_assert_held(&priv->mutex); | ||
709 | |||
710 | /* A synchronous command can not have a callback set. */ | ||
711 | if (WARN_ON(cmd->callback)) | ||
712 | return -EINVAL; | ||
713 | |||
714 | IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", | ||
715 | get_cmd_string(cmd->id)); | ||
716 | |||
717 | set_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
718 | IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", | ||
719 | get_cmd_string(cmd->id)); | ||
720 | |||
721 | cmd_idx = iwl_enqueue_hcmd(priv, cmd); | ||
722 | if (cmd_idx < 0) { | ||
723 | ret = cmd_idx; | ||
724 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
725 | IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", | ||
726 | get_cmd_string(cmd->id), ret); | ||
727 | return ret; | ||
728 | } | ||
729 | |||
730 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
731 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | ||
732 | HOST_COMPLETE_TIMEOUT); | ||
733 | if (!ret) { | ||
734 | if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { | ||
735 | IWL_ERR(priv, | ||
736 | "Error sending %s: time out after %dms.\n", | ||
737 | get_cmd_string(cmd->id), | ||
738 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | ||
739 | |||
740 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
741 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command" | ||
742 | "%s\n", get_cmd_string(cmd->id)); | ||
743 | ret = -ETIMEDOUT; | ||
744 | goto cancel; | ||
745 | } | ||
746 | } | ||
747 | |||
748 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { | ||
749 | IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", | ||
750 | get_cmd_string(cmd->id)); | ||
751 | ret = -ECANCELED; | ||
752 | goto fail; | ||
753 | } | ||
754 | if (test_bit(STATUS_FW_ERROR, &priv->status)) { | ||
755 | IWL_ERR(priv, "Command %s failed: FW Error\n", | ||
756 | get_cmd_string(cmd->id)); | ||
757 | ret = -EIO; | ||
758 | goto fail; | ||
759 | } | ||
760 | if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { | ||
761 | IWL_ERR(priv, "Error: Response NULL in '%s'\n", | ||
762 | get_cmd_string(cmd->id)); | ||
763 | ret = -EIO; | ||
764 | goto cancel; | ||
765 | } | ||
766 | |||
767 | return 0; | ||
768 | |||
769 | cancel: | ||
770 | if (cmd->flags & CMD_WANT_SKB) { | ||
771 | /* | ||
772 | * Cancel the CMD_WANT_SKB flag for the cmd in the | ||
773 | * TX cmd queue. Otherwise in case the cmd comes | ||
774 | * in later, it will possibly set an invalid | ||
775 | * address (cmd->meta.source). | ||
776 | */ | ||
777 | priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= | ||
778 | ~CMD_WANT_SKB; | ||
779 | } | ||
780 | fail: | ||
781 | if (cmd->reply_page) { | ||
782 | iwl_free_pages(priv, cmd->reply_page); | ||
783 | cmd->reply_page = 0; | ||
784 | } | ||
785 | |||
786 | return ret; | ||
787 | } | ||
788 | |||
789 | int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
790 | { | ||
791 | if (cmd->flags & CMD_ASYNC) | ||
792 | return iwl_send_cmd_async(priv, cmd); | ||
793 | |||
794 | return iwl_send_cmd_sync(priv, cmd); | ||
795 | } | ||
796 | |||
797 | int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len, | ||
798 | const void *data) | ||
799 | { | ||
800 | struct iwl_host_cmd cmd = { | ||
801 | .id = id, | ||
802 | .len = { len, }, | ||
803 | .data = { data, }, | ||
804 | .flags = flags, | ||
805 | }; | ||
806 | |||
807 | return iwl_send_cmd(priv, &cmd); | ||
808 | } | ||