aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2011-05-13 14:57:40 -0400
committerWey-Yi Guy <wey-yi.w.guy@intel.com>2011-05-13 15:00:41 -0400
commit4ce7cc2b09553a91d4aea014c39674685715173a (patch)
tree0a2e4b8ae8852a6404c479a7b605ae4b0af6b72d /drivers/net/wireless/iwlwifi/iwl-tx.c
parent4c42db0f04e55d48f0ea9f424144a5211b7a155c (diff)
iwlagn: support multiple TBs per command
The current "huge" command handling is a bit confusing, and very limited since only one command may be huge at a time. Additionally, we often copy data around quite pointlessly since we could instead map the existing scan buffer for example and use it directly. This patch makes that possible. The first change is that multiple buffers may be given to each command (this change was prepared earlier so callsites don't need to change). Each of those can be mapped attached to a TB in the TFD, and the command header can use a TB (the first one) in the TFD as well. Doing this allows getting rid of huge commands in favour of mapping existing buffers. The beacon transmission is also optimised to not copy the SKB at all but use multiple TBs. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c231
1 files changed, 124 insertions, 107 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a47558f0ee3d..2f6b38cfcc13 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -125,25 +125,13 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
125 return tfd->num_tbs & 0x1f; 125 return tfd->num_tbs & 0x1f;
126} 126}
127 127
128/** 128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
129 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 129 struct iwl_tfd *tfd)
130 * @priv - driver private data
131 * @txq - tx queue
132 *
133 * Does NOT advance any TFD circular buffer read/write indexes
134 * Does NOT free the TFD itself (which is within circular buffer)
135 */
136void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
137{ 130{
138 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
139 struct iwl_tfd *tfd;
140 struct pci_dev *dev = priv->pci_dev; 131 struct pci_dev *dev = priv->pci_dev;
141 int index = txq->q.read_ptr;
142 int i; 132 int i;
143 int num_tbs; 133 int num_tbs;
144 134
145 tfd = &tfd_tmp[index];
146
147 /* Sanity check on number of chunks */ 135 /* Sanity check on number of chunks */
148 num_tbs = iwl_tfd_get_num_tbs(tfd); 136 num_tbs = iwl_tfd_get_num_tbs(tfd);
149 137
@@ -156,14 +144,30 @@ void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
156 /* Unmap tx_cmd */ 144 /* Unmap tx_cmd */
157 if (num_tbs) 145 if (num_tbs)
158 pci_unmap_single(dev, 146 pci_unmap_single(dev,
159 dma_unmap_addr(&txq->meta[index], mapping), 147 dma_unmap_addr(meta, mapping),
160 dma_unmap_len(&txq->meta[index], len), 148 dma_unmap_len(meta, len),
161 PCI_DMA_BIDIRECTIONAL); 149 PCI_DMA_BIDIRECTIONAL);
162 150
163 /* Unmap chunks, if any. */ 151 /* Unmap chunks, if any. */
164 for (i = 1; i < num_tbs; i++) 152 for (i = 1; i < num_tbs; i++)
165 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), 153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
166 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); 154 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
155}
156
157/**
158 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
159 * @priv - driver private data
160 * @txq - tx queue
161 *
162 * Does NOT advance any TFD circular buffer read/write indexes
163 * Does NOT free the TFD itself (which is within circular buffer)
164 */
165void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
166{
167 struct iwl_tfd *tfd_tmp = txq->tfds;
168 int index = txq->q.read_ptr;
169
170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
167 171
168 /* free SKB */ 172 /* free SKB */
169 if (txq->txb) { 173 if (txq->txb) {
@@ -189,7 +193,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
189 u32 num_tbs; 193 u32 num_tbs;
190 194
191 q = &txq->q; 195 q = &txq->q;
192 tfd_tmp = (struct iwl_tfd *)txq->tfds; 196 tfd_tmp = txq->tfds;
193 tfd = &tfd_tmp[q->write_ptr]; 197 tfd = &tfd_tmp[q->write_ptr];
194 198
195 if (reset) 199 if (reset)
@@ -303,7 +307,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
303 return; 307 return;
304 308
305 while (q->read_ptr != q->write_ptr) { 309 while (q->read_ptr != q->write_ptr) {
306 i = get_cmd_index(q, q->read_ptr, 0); 310 i = get_cmd_index(q, q->read_ptr);
307 311
308 if (txq->meta[i].flags & CMD_MAPPED) { 312 if (txq->meta[i].flags & CMD_MAPPED) {
309 pci_unmap_single(priv->pci_dev, 313 pci_unmap_single(priv->pci_dev,
@@ -315,15 +319,6 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
315 319
316 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 320 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
317 } 321 }
318
319 i = q->n_window;
320 if (txq->meta[i].flags & CMD_MAPPED) {
321 pci_unmap_single(priv->pci_dev,
322 dma_unmap_addr(&txq->meta[i], mapping),
323 dma_unmap_len(&txq->meta[i], len),
324 PCI_DMA_BIDIRECTIONAL);
325 txq->meta[i].flags = 0;
326 }
327} 322}
328 323
329/** 324/**
@@ -343,7 +338,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
343 iwl_cmd_queue_unmap(priv); 338 iwl_cmd_queue_unmap(priv);
344 339
345 /* De-alloc array of command/tx buffers */ 340 /* De-alloc array of command/tx buffers */
346 for (i = 0; i <= TFD_CMD_SLOTS; i++) 341 for (i = 0; i < TFD_CMD_SLOTS; i++)
347 kfree(txq->cmd[i]); 342 kfree(txq->cmd[i]);
348 343
349 /* De-alloc circular buffer of TFDs */ 344 /* De-alloc circular buffer of TFDs */
@@ -483,33 +478,17 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
483{ 478{
484 int i, len; 479 int i, len;
485 int ret; 480 int ret;
486 int actual_slots = slots_num;
487
488 /*
489 * Alloc buffer array for commands (Tx or other types of commands).
490 * For the command queue (#4/#9), allocate command space + one big
491 * command for scan, since scan command is very huge; the system will
492 * not have two scans at the same time, so only one is needed.
493 * For normal Tx queues (all other queues), no super-size command
494 * space is needed.
495 */
496 if (txq_id == priv->cmd_queue)
497 actual_slots++;
498 481
499 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, 482 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
500 GFP_KERNEL); 483 GFP_KERNEL);
501 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, 484 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
502 GFP_KERNEL); 485 GFP_KERNEL);
503 486
504 if (!txq->meta || !txq->cmd) 487 if (!txq->meta || !txq->cmd)
505 goto out_free_arrays; 488 goto out_free_arrays;
506 489
507 len = sizeof(struct iwl_device_cmd); 490 len = sizeof(struct iwl_device_cmd);
508 for (i = 0; i < actual_slots; i++) { 491 for (i = 0; i < slots_num; i++) {
509 /* only happens for cmd queue */
510 if (i == slots_num)
511 len = IWL_MAX_CMD_SIZE;
512
513 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 492 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
514 if (!txq->cmd[i]) 493 if (!txq->cmd[i])
515 goto err; 494 goto err;
@@ -544,7 +523,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
544 523
545 return 0; 524 return 0;
546err: 525err:
547 for (i = 0; i < actual_slots; i++) 526 for (i = 0; i < slots_num; i++)
548 kfree(txq->cmd[i]); 527 kfree(txq->cmd[i]);
549out_free_arrays: 528out_free_arrays:
550 kfree(txq->meta); 529 kfree(txq->meta);
@@ -592,23 +571,44 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
592 dma_addr_t phys_addr; 571 dma_addr_t phys_addr;
593 unsigned long flags; 572 unsigned long flags;
594 u32 idx; 573 u32 idx;
595 u16 fix_size; 574 u16 copy_size, cmd_size;
596 bool is_ct_kill = false; 575 bool is_ct_kill = false;
576 bool had_nocopy = false;
577 int i;
578 u8 *cmd_dest;
579#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
580 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
581 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
582 int trace_idx;
583#endif
597 584
598 fix_size = (u16)(cmd->len[0] + sizeof(out_cmd->hdr)); 585 copy_size = sizeof(out_cmd->hdr);
586 cmd_size = sizeof(out_cmd->hdr);
587
588 /* need one for the header if the first is NOCOPY */
589 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
590
591 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
592 if (!cmd->len[i])
593 continue;
594 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
595 had_nocopy = true;
596 } else {
597 /* NOCOPY must not be followed by normal! */
598 if (WARN_ON(had_nocopy))
599 return -EINVAL;
600 copy_size += cmd->len[i];
601 }
602 cmd_size += cmd->len[i];
603 }
599 604
600 /* 605 /*
601 * If any of the command structures end up being larger than 606 * If any of the command structures end up being larger than
602 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 607 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
603 * we will need to increase the size of the TFD entries 608 * allocated into separate TFDs, then we will need to
604 * Also, check to see if command buffer should not exceed the size 609 * increase the size of the buffers.
605 * of device_cmd and max_cmd_size.
606 */ 610 */
607 if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 611 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
608 !(cmd->flags & CMD_SIZE_HUGE)))
609 return -EINVAL;
610
611 if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE))
612 return -EINVAL; 612 return -EINVAL;
613 613
614 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { 614 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
@@ -617,14 +617,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
617 return -EIO; 617 return -EIO;
618 } 618 }
619 619
620 /*
621 * As we only have a single huge buffer, check that the command
622 * is synchronous (otherwise buffers could end up being reused).
623 */
624
625 if (WARN_ON((cmd->flags & CMD_ASYNC) && (cmd->flags & CMD_SIZE_HUGE)))
626 return -EINVAL;
627
628 spin_lock_irqsave(&priv->hcmd_lock, flags); 620 spin_lock_irqsave(&priv->hcmd_lock, flags);
629 621
630 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 622 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
@@ -639,7 +631,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
639 return -ENOSPC; 631 return -ENOSPC;
640 } 632 }
641 633
642 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 634 idx = get_cmd_index(q, q->write_ptr);
643 out_cmd = txq->cmd[idx]; 635 out_cmd = txq->cmd[idx];
644 out_meta = &txq->meta[idx]; 636 out_meta = &txq->meta[idx];
645 637
@@ -654,55 +646,84 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
654 if (cmd->flags & CMD_ASYNC) 646 if (cmd->flags & CMD_ASYNC)
655 out_meta->callback = cmd->callback; 647 out_meta->callback = cmd->callback;
656 648
657 out_cmd->hdr.cmd = cmd->id; 649 /* set up the header */
658 memcpy(&out_cmd->cmd.payload, cmd->data[0], cmd->len[0]);
659
660 /* At this point, the out_cmd now has all of the incoming cmd
661 * information */
662 650
651 out_cmd->hdr.cmd = cmd->id;
663 out_cmd->hdr.flags = 0; 652 out_cmd->hdr.flags = 0;
664 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | 653 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
665 INDEX_TO_SEQ(q->write_ptr)); 654 INDEX_TO_SEQ(q->write_ptr));
666 if (cmd->flags & CMD_SIZE_HUGE) 655
667 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 656 /* and copy the data that needs to be copied */
668 657
669#ifdef CONFIG_IWLWIFI_DEBUG 658 cmd_dest = &out_cmd->cmd.payload[0];
670 switch (out_cmd->hdr.cmd) { 659 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
671 case REPLY_TX_LINK_QUALITY_CMD: 660 if (!cmd->len[i])
672 case SENSITIVITY_CMD: 661 continue;
673 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " 662 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
674 "%d bytes at %d[%d]:%d\n", 663 break;
675 get_cmd_string(out_cmd->hdr.cmd), 664 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
676 out_cmd->hdr.cmd, 665 cmd_dest += cmd->len[i];
677 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
678 q->write_ptr, idx, priv->cmd_queue);
679 break;
680 default:
681 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
682 "%d bytes at %d[%d]:%d\n",
683 get_cmd_string(out_cmd->hdr.cmd),
684 out_cmd->hdr.cmd,
685 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
686 q->write_ptr, idx, priv->cmd_queue);
687 } 666 }
688#endif 667
668 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
669 "%d bytes at %d[%d]:%d\n",
670 get_cmd_string(out_cmd->hdr.cmd),
671 out_cmd->hdr.cmd,
672 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
673 q->write_ptr, idx, priv->cmd_queue);
674
689 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, 675 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
690 fix_size, PCI_DMA_BIDIRECTIONAL); 676 copy_size, PCI_DMA_BIDIRECTIONAL);
691 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { 677 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
692 idx = -ENOMEM; 678 idx = -ENOMEM;
693 goto out; 679 goto out;
694 } 680 }
695 681
696 dma_unmap_addr_set(out_meta, mapping, phys_addr); 682 dma_unmap_addr_set(out_meta, mapping, phys_addr);
697 dma_unmap_len_set(out_meta, len, fix_size); 683 dma_unmap_len_set(out_meta, len, copy_size);
684
685 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
686#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
687 trace_bufs[0] = &out_cmd->hdr;
688 trace_lens[0] = copy_size;
689 trace_idx = 1;
690#endif
691
692 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
693 if (!cmd->len[i])
694 continue;
695 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
696 continue;
697 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
698 cmd->len[i], PCI_DMA_TODEVICE);
699 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
700 iwlagn_unmap_tfd(priv, out_meta,
701 &txq->tfds[q->write_ptr]);
702 idx = -ENOMEM;
703 goto out;
704 }
705
706 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
707 cmd->len[i], 0);
708#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
709 trace_bufs[trace_idx] = cmd->data[i];
710 trace_lens[trace_idx] = cmd->len[i];
711 trace_idx++;
712#endif
713 }
698 714
699 out_meta->flags = cmd->flags | CMD_MAPPED; 715 out_meta->flags = cmd->flags | CMD_MAPPED;
700 716
701 txq->need_update = 1; 717 txq->need_update = 1;
702 718
703 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); 719 /* check that tracing gets all possible blocks */
704 720 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
705 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, fix_size, 1); 721#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
722 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
723 trace_bufs[0], trace_lens[0],
724 trace_bufs[1], trace_lens[1],
725 trace_bufs[2], trace_lens[2]);
726#endif
706 727
707 /* Increment and update queue's write index */ 728 /* Increment and update queue's write index */
708 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 729 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -761,7 +782,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
761 int txq_id = SEQ_TO_QUEUE(sequence); 782 int txq_id = SEQ_TO_QUEUE(sequence);
762 int index = SEQ_TO_INDEX(sequence); 783 int index = SEQ_TO_INDEX(sequence);
763 int cmd_index; 784 int cmd_index;
764 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
765 struct iwl_device_cmd *cmd; 785 struct iwl_device_cmd *cmd;
766 struct iwl_cmd_meta *meta; 786 struct iwl_cmd_meta *meta;
767 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 787 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
@@ -779,14 +799,11 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
779 return; 799 return;
780 } 800 }
781 801
782 cmd_index = get_cmd_index(&txq->q, index, huge); 802 cmd_index = get_cmd_index(&txq->q, index);
783 cmd = txq->cmd[cmd_index]; 803 cmd = txq->cmd[cmd_index];
784 meta = &txq->meta[cmd_index]; 804 meta = &txq->meta[cmd_index];
785 805
786 pci_unmap_single(priv->pci_dev, 806 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
787 dma_unmap_addr(meta, mapping),
788 dma_unmap_len(meta, len),
789 PCI_DMA_BIDIRECTIONAL);
790 807
791 /* Input error checking is done when commands are added to queue. */ 808 /* Input error checking is done when commands are added to queue. */
792 if (meta->flags & CMD_WANT_SKB) { 809 if (meta->flags & CMD_WANT_SKB) {