diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 364 |
1 files changed, 266 insertions, 98 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index e69597ea43e2..686e176b5ebd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <net/mac80211.h> | 33 | #include <net/mac80211.h> |
34 | #include "iwl-eeprom.h" | 34 | #include "iwl-eeprom.h" |
35 | #include "iwl-agn.h" | ||
35 | #include "iwl-dev.h" | 36 | #include "iwl-dev.h" |
36 | #include "iwl-core.h" | 37 | #include "iwl-core.h" |
37 | #include "iwl-sta.h" | 38 | #include "iwl-sta.h" |
@@ -85,6 +86,158 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
85 | txq->need_update = 0; | 86 | txq->need_update = 0; |
86 | } | 87 | } |
87 | 88 | ||
89 | static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) | ||
90 | { | ||
91 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
92 | |||
93 | dma_addr_t addr = get_unaligned_le32(&tb->lo); | ||
94 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
95 | addr |= | ||
96 | ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; | ||
97 | |||
98 | return addr; | ||
99 | } | ||
100 | |||
101 | static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) | ||
102 | { | ||
103 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
104 | |||
105 | return le16_to_cpu(tb->hi_n_len) >> 4; | ||
106 | } | ||
107 | |||
108 | static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, | ||
109 | dma_addr_t addr, u16 len) | ||
110 | { | ||
111 | struct iwl_tfd_tb *tb = &tfd->tbs[idx]; | ||
112 | u16 hi_n_len = len << 4; | ||
113 | |||
114 | put_unaligned_le32(addr, &tb->lo); | ||
115 | if (sizeof(dma_addr_t) > sizeof(u32)) | ||
116 | hi_n_len |= ((addr >> 16) >> 16) & 0xF; | ||
117 | |||
118 | tb->hi_n_len = cpu_to_le16(hi_n_len); | ||
119 | |||
120 | tfd->num_tbs = idx + 1; | ||
121 | } | ||
122 | |||
123 | static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) | ||
124 | { | ||
125 | return tfd->num_tbs & 0x1f; | ||
126 | } | ||
127 | |||
128 | static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, | ||
129 | struct iwl_tfd *tfd) | ||
130 | { | ||
131 | struct pci_dev *dev = priv->pci_dev; | ||
132 | int i; | ||
133 | int num_tbs; | ||
134 | |||
135 | /* Sanity check on number of chunks */ | ||
136 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
137 | |||
138 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
139 | IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); | ||
140 | /* @todo issue fatal error, it is quite serious situation */ | ||
141 | return; | ||
142 | } | ||
143 | |||
144 | /* Unmap tx_cmd */ | ||
145 | if (num_tbs) | ||
146 | pci_unmap_single(dev, | ||
147 | dma_unmap_addr(meta, mapping), | ||
148 | dma_unmap_len(meta, len), | ||
149 | PCI_DMA_BIDIRECTIONAL); | ||
150 | |||
151 | /* Unmap chunks, if any. */ | ||
152 | for (i = 1; i < num_tbs; i++) | ||
153 | pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), | ||
154 | iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | ||
159 | * @priv - driver private data | ||
160 | * @txq - tx queue | ||
161 | * | ||
162 | * Does NOT advance any TFD circular buffer read/write indexes | ||
163 | * Does NOT free the TFD itself (which is within circular buffer) | ||
164 | */ | ||
165 | void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
166 | { | ||
167 | struct iwl_tfd *tfd_tmp = txq->tfds; | ||
168 | int index = txq->q.read_ptr; | ||
169 | |||
170 | iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]); | ||
171 | |||
172 | /* free SKB */ | ||
173 | if (txq->txb) { | ||
174 | struct sk_buff *skb; | ||
175 | |||
176 | skb = txq->txb[txq->q.read_ptr].skb; | ||
177 | |||
178 | /* can be called from irqs-disabled context */ | ||
179 | if (skb) { | ||
180 | dev_kfree_skb_any(skb); | ||
181 | txq->txb[txq->q.read_ptr].skb = NULL; | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | |||
186 | int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, | ||
187 | struct iwl_tx_queue *txq, | ||
188 | dma_addr_t addr, u16 len, | ||
189 | u8 reset) | ||
190 | { | ||
191 | struct iwl_queue *q; | ||
192 | struct iwl_tfd *tfd, *tfd_tmp; | ||
193 | u32 num_tbs; | ||
194 | |||
195 | q = &txq->q; | ||
196 | tfd_tmp = txq->tfds; | ||
197 | tfd = &tfd_tmp[q->write_ptr]; | ||
198 | |||
199 | if (reset) | ||
200 | memset(tfd, 0, sizeof(*tfd)); | ||
201 | |||
202 | num_tbs = iwl_tfd_get_num_tbs(tfd); | ||
203 | |||
204 | /* Each TFD can point to a maximum 20 Tx buffers */ | ||
205 | if (num_tbs >= IWL_NUM_OF_TBS) { | ||
206 | IWL_ERR(priv, "Error can not send more than %d chunks\n", | ||
207 | IWL_NUM_OF_TBS); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | if (WARN_ON(addr & ~DMA_BIT_MASK(36))) | ||
212 | return -EINVAL; | ||
213 | |||
214 | if (unlikely(addr & ~IWL_TX_DMA_MASK)) | ||
215 | IWL_ERR(priv, "Unaligned address = %llx\n", | ||
216 | (unsigned long long)addr); | ||
217 | |||
218 | iwl_tfd_set_tb(tfd, num_tbs, addr, len); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * Tell nic where to find circular buffer of Tx Frame Descriptors for | ||
225 | * given Tx queue, and enable the DMA channel used for that queue. | ||
226 | * | ||
227 | * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA | ||
228 | * channels supported in hardware. | ||
229 | */ | ||
230 | static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
231 | { | ||
232 | int txq_id = txq->q.id; | ||
233 | |||
234 | /* Circular buffer (TFD queue in DRAM) physical base address */ | ||
235 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), | ||
236 | txq->q.dma_addr >> 8); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
88 | /** | 241 | /** |
89 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's | 242 | * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's |
90 | */ | 243 | */ |
@@ -97,7 +250,7 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) | |||
97 | return; | 250 | return; |
98 | 251 | ||
99 | while (q->write_ptr != q->read_ptr) { | 252 | while (q->write_ptr != q->read_ptr) { |
100 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 253 | iwlagn_txq_free_tfd(priv, txq); |
101 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 254 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
102 | } | 255 | } |
103 | } | 256 | } |
@@ -154,7 +307,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv) | |||
154 | return; | 307 | return; |
155 | 308 | ||
156 | while (q->read_ptr != q->write_ptr) { | 309 | while (q->read_ptr != q->write_ptr) { |
157 | i = get_cmd_index(q, q->read_ptr, 0); | 310 | i = get_cmd_index(q, q->read_ptr); |
158 | 311 | ||
159 | if (txq->meta[i].flags & CMD_MAPPED) { | 312 | if (txq->meta[i].flags & CMD_MAPPED) { |
160 | pci_unmap_single(priv->pci_dev, | 313 | pci_unmap_single(priv->pci_dev, |
@@ -166,15 +319,6 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv) | |||
166 | 319 | ||
167 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 320 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
168 | } | 321 | } |
169 | |||
170 | i = q->n_window; | ||
171 | if (txq->meta[i].flags & CMD_MAPPED) { | ||
172 | pci_unmap_single(priv->pci_dev, | ||
173 | dma_unmap_addr(&txq->meta[i], mapping), | ||
174 | dma_unmap_len(&txq->meta[i], len), | ||
175 | PCI_DMA_BIDIRECTIONAL); | ||
176 | txq->meta[i].flags = 0; | ||
177 | } | ||
178 | } | 322 | } |
179 | 323 | ||
180 | /** | 324 | /** |
@@ -194,7 +338,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
194 | iwl_cmd_queue_unmap(priv); | 338 | iwl_cmd_queue_unmap(priv); |
195 | 339 | ||
196 | /* De-alloc array of command/tx buffers */ | 340 | /* De-alloc array of command/tx buffers */ |
197 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 341 | for (i = 0; i < TFD_CMD_SLOTS; i++) |
198 | kfree(txq->cmd[i]); | 342 | kfree(txq->cmd[i]); |
199 | 343 | ||
200 | /* De-alloc circular buffer of TFDs */ | 344 | /* De-alloc circular buffer of TFDs */ |
@@ -334,33 +478,17 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
334 | { | 478 | { |
335 | int i, len; | 479 | int i, len; |
336 | int ret; | 480 | int ret; |
337 | int actual_slots = slots_num; | ||
338 | |||
339 | /* | ||
340 | * Alloc buffer array for commands (Tx or other types of commands). | ||
341 | * For the command queue (#4/#9), allocate command space + one big | ||
342 | * command for scan, since scan command is very huge; the system will | ||
343 | * not have two scans at the same time, so only one is needed. | ||
344 | * For normal Tx queues (all other queues), no super-size command | ||
345 | * space is needed. | ||
346 | */ | ||
347 | if (txq_id == priv->cmd_queue) | ||
348 | actual_slots++; | ||
349 | 481 | ||
350 | txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, | 482 | txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num, |
351 | GFP_KERNEL); | 483 | GFP_KERNEL); |
352 | txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, | 484 | txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num, |
353 | GFP_KERNEL); | 485 | GFP_KERNEL); |
354 | 486 | ||
355 | if (!txq->meta || !txq->cmd) | 487 | if (!txq->meta || !txq->cmd) |
356 | goto out_free_arrays; | 488 | goto out_free_arrays; |
357 | 489 | ||
358 | len = sizeof(struct iwl_device_cmd); | 490 | len = sizeof(struct iwl_device_cmd); |
359 | for (i = 0; i < actual_slots; i++) { | 491 | for (i = 0; i < slots_num; i++) { |
360 | /* only happens for cmd queue */ | ||
361 | if (i == slots_num) | ||
362 | len = IWL_MAX_CMD_SIZE; | ||
363 | |||
364 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); | 492 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); |
365 | if (!txq->cmd[i]) | 493 | if (!txq->cmd[i]) |
366 | goto err; | 494 | goto err; |
@@ -391,11 +519,11 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
391 | return ret; | 519 | return ret; |
392 | 520 | ||
393 | /* Tell device where to find queue */ | 521 | /* Tell device where to find queue */ |
394 | priv->cfg->ops->lib->txq_init(priv, txq); | 522 | iwlagn_tx_queue_init(priv, txq); |
395 | 523 | ||
396 | return 0; | 524 | return 0; |
397 | err: | 525 | err: |
398 | for (i = 0; i < actual_slots; i++) | 526 | for (i = 0; i < slots_num; i++) |
399 | kfree(txq->cmd[i]); | 527 | kfree(txq->cmd[i]); |
400 | out_free_arrays: | 528 | out_free_arrays: |
401 | kfree(txq->meta); | 529 | kfree(txq->meta); |
@@ -420,7 +548,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
420 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | 548 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); |
421 | 549 | ||
422 | /* Tell device where to find queue */ | 550 | /* Tell device where to find queue */ |
423 | priv->cfg->ops->lib->txq_init(priv, txq); | 551 | iwlagn_tx_queue_init(priv, txq); |
424 | } | 552 | } |
425 | 553 | ||
426 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | 554 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
@@ -443,23 +571,49 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
443 | dma_addr_t phys_addr; | 571 | dma_addr_t phys_addr; |
444 | unsigned long flags; | 572 | unsigned long flags; |
445 | u32 idx; | 573 | u32 idx; |
446 | u16 fix_size; | 574 | u16 copy_size, cmd_size; |
447 | bool is_ct_kill = false; | 575 | bool is_ct_kill = false; |
576 | bool had_nocopy = false; | ||
577 | int i; | ||
578 | u8 *cmd_dest; | ||
579 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
580 | const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; | ||
581 | int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; | ||
582 | int trace_idx; | ||
583 | #endif | ||
584 | |||
585 | if (test_bit(STATUS_FW_ERROR, &priv->status)) { | ||
586 | IWL_WARN(priv, "fw recovery, no hcmd send\n"); | ||
587 | return -EIO; | ||
588 | } | ||
448 | 589 | ||
449 | fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | 590 | copy_size = sizeof(out_cmd->hdr); |
591 | cmd_size = sizeof(out_cmd->hdr); | ||
592 | |||
593 | /* need one for the header if the first is NOCOPY */ | ||
594 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); | ||
595 | |||
596 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
597 | if (!cmd->len[i]) | ||
598 | continue; | ||
599 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { | ||
600 | had_nocopy = true; | ||
601 | } else { | ||
602 | /* NOCOPY must not be followed by normal! */ | ||
603 | if (WARN_ON(had_nocopy)) | ||
604 | return -EINVAL; | ||
605 | copy_size += cmd->len[i]; | ||
606 | } | ||
607 | cmd_size += cmd->len[i]; | ||
608 | } | ||
450 | 609 | ||
451 | /* | 610 | /* |
452 | * If any of the command structures end up being larger than | 611 | * If any of the command structures end up being larger than |
453 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | 612 | * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically |
454 | * we will need to increase the size of the TFD entries | 613 | * allocated into separate TFDs, then we will need to |
455 | * Also, check to see if command buffer should not exceed the size | 614 | * increase the size of the buffers. |
456 | * of device_cmd and max_cmd_size. | ||
457 | */ | 615 | */ |
458 | if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | 616 | if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) |
459 | !(cmd->flags & CMD_SIZE_HUGE))) | ||
460 | return -EINVAL; | ||
461 | |||
462 | if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE)) | ||
463 | return -EINVAL; | 617 | return -EINVAL; |
464 | 618 | ||
465 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { | 619 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { |
@@ -468,14 +622,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
468 | return -EIO; | 622 | return -EIO; |
469 | } | 623 | } |
470 | 624 | ||
471 | /* | ||
472 | * As we only have a single huge buffer, check that the command | ||
473 | * is synchronous (otherwise buffers could end up being reused). | ||
474 | */ | ||
475 | |||
476 | if (WARN_ON((cmd->flags & CMD_ASYNC) && (cmd->flags & CMD_SIZE_HUGE))) | ||
477 | return -EINVAL; | ||
478 | |||
479 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 625 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
480 | 626 | ||
481 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 627 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
@@ -490,7 +636,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
490 | return -ENOSPC; | 636 | return -ENOSPC; |
491 | } | 637 | } |
492 | 638 | ||
493 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 639 | idx = get_cmd_index(q, q->write_ptr); |
494 | out_cmd = txq->cmd[idx]; | 640 | out_cmd = txq->cmd[idx]; |
495 | out_meta = &txq->meta[idx]; | 641 | out_meta = &txq->meta[idx]; |
496 | 642 | ||
@@ -505,57 +651,84 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
505 | if (cmd->flags & CMD_ASYNC) | 651 | if (cmd->flags & CMD_ASYNC) |
506 | out_meta->callback = cmd->callback; | 652 | out_meta->callback = cmd->callback; |
507 | 653 | ||
508 | out_cmd->hdr.cmd = cmd->id; | 654 | /* set up the header */ |
509 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | ||
510 | |||
511 | /* At this point, the out_cmd now has all of the incoming cmd | ||
512 | * information */ | ||
513 | 655 | ||
656 | out_cmd->hdr.cmd = cmd->id; | ||
514 | out_cmd->hdr.flags = 0; | 657 | out_cmd->hdr.flags = 0; |
515 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | | 658 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | |
516 | INDEX_TO_SEQ(q->write_ptr)); | 659 | INDEX_TO_SEQ(q->write_ptr)); |
517 | if (cmd->flags & CMD_SIZE_HUGE) | 660 | |
518 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; | 661 | /* and copy the data that needs to be copied */ |
519 | 662 | ||
520 | #ifdef CONFIG_IWLWIFI_DEBUG | 663 | cmd_dest = &out_cmd->cmd.payload[0]; |
521 | switch (out_cmd->hdr.cmd) { | 664 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { |
522 | case REPLY_TX_LINK_QUALITY_CMD: | 665 | if (!cmd->len[i]) |
523 | case SENSITIVITY_CMD: | 666 | continue; |
524 | IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " | 667 | if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) |
525 | "%d bytes at %d[%d]:%d\n", | 668 | break; |
526 | get_cmd_string(out_cmd->hdr.cmd), | 669 | memcpy(cmd_dest, cmd->data[i], cmd->len[i]); |
527 | out_cmd->hdr.cmd, | 670 | cmd_dest += cmd->len[i]; |
528 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | ||
529 | q->write_ptr, idx, priv->cmd_queue); | ||
530 | break; | ||
531 | default: | ||
532 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " | ||
533 | "%d bytes at %d[%d]:%d\n", | ||
534 | get_cmd_string(out_cmd->hdr.cmd), | ||
535 | out_cmd->hdr.cmd, | ||
536 | le16_to_cpu(out_cmd->hdr.sequence), fix_size, | ||
537 | q->write_ptr, idx, priv->cmd_queue); | ||
538 | } | 671 | } |
539 | #endif | 672 | |
673 | IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " | ||
674 | "%d bytes at %d[%d]:%d\n", | ||
675 | get_cmd_string(out_cmd->hdr.cmd), | ||
676 | out_cmd->hdr.cmd, | ||
677 | le16_to_cpu(out_cmd->hdr.sequence), cmd_size, | ||
678 | q->write_ptr, idx, priv->cmd_queue); | ||
679 | |||
540 | phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, | 680 | phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, |
541 | fix_size, PCI_DMA_BIDIRECTIONAL); | 681 | copy_size, PCI_DMA_BIDIRECTIONAL); |
542 | if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { | 682 | if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { |
543 | idx = -ENOMEM; | 683 | idx = -ENOMEM; |
544 | goto out; | 684 | goto out; |
545 | } | 685 | } |
546 | 686 | ||
547 | dma_unmap_addr_set(out_meta, mapping, phys_addr); | 687 | dma_unmap_addr_set(out_meta, mapping, phys_addr); |
548 | dma_unmap_len_set(out_meta, len, fix_size); | 688 | dma_unmap_len_set(out_meta, len, copy_size); |
689 | |||
690 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); | ||
691 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
692 | trace_bufs[0] = &out_cmd->hdr; | ||
693 | trace_lens[0] = copy_size; | ||
694 | trace_idx = 1; | ||
695 | #endif | ||
696 | |||
697 | for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { | ||
698 | if (!cmd->len[i]) | ||
699 | continue; | ||
700 | if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) | ||
701 | continue; | ||
702 | phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i], | ||
703 | cmd->len[i], PCI_DMA_TODEVICE); | ||
704 | if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) { | ||
705 | iwlagn_unmap_tfd(priv, out_meta, | ||
706 | &txq->tfds[q->write_ptr]); | ||
707 | idx = -ENOMEM; | ||
708 | goto out; | ||
709 | } | ||
710 | |||
711 | iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, | ||
712 | cmd->len[i], 0); | ||
713 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING | ||
714 | trace_bufs[trace_idx] = cmd->data[i]; | ||
715 | trace_lens[trace_idx] = cmd->len[i]; | ||
716 | trace_idx++; | ||
717 | #endif | ||
718 | } | ||
549 | 719 | ||
550 | out_meta->flags = cmd->flags | CMD_MAPPED; | 720 | out_meta->flags = cmd->flags | CMD_MAPPED; |
551 | 721 | ||
552 | txq->need_update = 1; | 722 | txq->need_update = 1; |
553 | 723 | ||
554 | trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); | 724 | /* check that tracing gets all possible blocks */ |
555 | 725 | BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); | |
556 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | 726 | #ifdef CONFIG_IWLWIFI_DEVICE_TRACING |
557 | phys_addr, fix_size, 1, | 727 | trace_iwlwifi_dev_hcmd(priv, cmd->flags, |
558 | U32_PAD(cmd->len)); | 728 | trace_bufs[0], trace_lens[0], |
729 | trace_bufs[1], trace_lens[1], | ||
730 | trace_bufs[2], trace_lens[2]); | ||
731 | #endif | ||
559 | 732 | ||
560 | /* Increment and update queue's write index */ | 733 | /* Increment and update queue's write index */ |
561 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 734 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
@@ -573,8 +746,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
573 | * need to be reclaimed. As result, some free space forms. If there is | 746 | * need to be reclaimed. As result, some free space forms. If there is |
574 | * enough free space (> low mark), wake the stack that feeds us. | 747 | * enough free space (> low mark), wake the stack that feeds us. |
575 | */ | 748 | */ |
576 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | 749 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) |
577 | int idx, int cmd_idx) | ||
578 | { | 750 | { |
579 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 751 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
580 | struct iwl_queue *q = &txq->q; | 752 | struct iwl_queue *q = &txq->q; |
@@ -614,7 +786,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
614 | int txq_id = SEQ_TO_QUEUE(sequence); | 786 | int txq_id = SEQ_TO_QUEUE(sequence); |
615 | int index = SEQ_TO_INDEX(sequence); | 787 | int index = SEQ_TO_INDEX(sequence); |
616 | int cmd_index; | 788 | int cmd_index; |
617 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | ||
618 | struct iwl_device_cmd *cmd; | 789 | struct iwl_device_cmd *cmd; |
619 | struct iwl_cmd_meta *meta; | 790 | struct iwl_cmd_meta *meta; |
620 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 791 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
@@ -632,14 +803,11 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
632 | return; | 803 | return; |
633 | } | 804 | } |
634 | 805 | ||
635 | cmd_index = get_cmd_index(&txq->q, index, huge); | 806 | cmd_index = get_cmd_index(&txq->q, index); |
636 | cmd = txq->cmd[cmd_index]; | 807 | cmd = txq->cmd[cmd_index]; |
637 | meta = &txq->meta[cmd_index]; | 808 | meta = &txq->meta[cmd_index]; |
638 | 809 | ||
639 | pci_unmap_single(priv->pci_dev, | 810 | iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]); |
640 | dma_unmap_addr(meta, mapping), | ||
641 | dma_unmap_len(meta, len), | ||
642 | PCI_DMA_BIDIRECTIONAL); | ||
643 | 811 | ||
644 | /* Input error checking is done when commands are added to queue. */ | 812 | /* Input error checking is done when commands are added to queue. */ |
645 | if (meta->flags & CMD_WANT_SKB) { | 813 | if (meta->flags & CMD_WANT_SKB) { |
@@ -650,7 +818,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
650 | 818 | ||
651 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 819 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
652 | 820 | ||
653 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); | 821 | iwl_hcmd_queue_reclaim(priv, txq_id, index); |
654 | 822 | ||
655 | if (!(meta->flags & CMD_ASYNC)) { | 823 | if (!(meta->flags & CMD_ASYNC)) { |
656 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 824 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |