diff options
Diffstat (limited to 'drivers/net/wireless/iwlegacy/iwl-tx.c')
| -rw-r--r-- | drivers/net/wireless/iwlegacy/iwl-tx.c | 52 |
1 files changed, 24 insertions, 28 deletions
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index a227773cb384..4fff995c6f3e 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c | |||
| @@ -146,33 +146,32 @@ void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv) | |||
| 146 | { | 146 | { |
| 147 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 147 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
| 148 | struct iwl_queue *q = &txq->q; | 148 | struct iwl_queue *q = &txq->q; |
| 149 | bool huge = false; | ||
| 150 | int i; | 149 | int i; |
| 151 | 150 | ||
| 152 | if (q->n_bd == 0) | 151 | if (q->n_bd == 0) |
| 153 | return; | 152 | return; |
| 154 | 153 | ||
| 155 | while (q->read_ptr != q->write_ptr) { | 154 | while (q->read_ptr != q->write_ptr) { |
| 156 | /* we have no way to tell if it is a huge cmd ATM */ | ||
| 157 | i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); | 155 | i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); |
| 158 | 156 | ||
| 159 | if (txq->meta[i].flags & CMD_SIZE_HUGE) | 157 | if (txq->meta[i].flags & CMD_MAPPED) { |
| 160 | huge = true; | ||
| 161 | else | ||
| 162 | pci_unmap_single(priv->pci_dev, | 158 | pci_unmap_single(priv->pci_dev, |
| 163 | dma_unmap_addr(&txq->meta[i], mapping), | 159 | dma_unmap_addr(&txq->meta[i], mapping), |
| 164 | dma_unmap_len(&txq->meta[i], len), | 160 | dma_unmap_len(&txq->meta[i], len), |
| 165 | PCI_DMA_BIDIRECTIONAL); | 161 | PCI_DMA_BIDIRECTIONAL); |
| 162 | txq->meta[i].flags = 0; | ||
| 163 | } | ||
| 166 | 164 | ||
| 167 | q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); | 165 | q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); |
| 168 | } | 166 | } |
| 169 | 167 | ||
| 170 | if (huge) { | 168 | i = q->n_window; |
| 171 | i = q->n_window; | 169 | if (txq->meta[i].flags & CMD_MAPPED) { |
| 172 | pci_unmap_single(priv->pci_dev, | 170 | pci_unmap_single(priv->pci_dev, |
| 173 | dma_unmap_addr(&txq->meta[i], mapping), | 171 | dma_unmap_addr(&txq->meta[i], mapping), |
| 174 | dma_unmap_len(&txq->meta[i], len), | 172 | dma_unmap_len(&txq->meta[i], len), |
| 175 | PCI_DMA_BIDIRECTIONAL); | 173 | PCI_DMA_BIDIRECTIONAL); |
| 174 | txq->meta[i].flags = 0; | ||
| 176 | } | 175 | } |
| 177 | } | 176 | } |
| 178 | EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap); | 177 | EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap); |
| @@ -467,29 +466,27 @@ int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
| 467 | return -EIO; | 466 | return -EIO; |
| 468 | } | 467 | } |
| 469 | 468 | ||
| 469 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
| 470 | |||
| 470 | if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 471 | if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
| 471 | IWL_ERR(priv, "No space in command queue\n"); | 472 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); |
| 472 | IWL_ERR(priv, "Restarting adapter due to queue full\n"); | 473 | |
| 474 | IWL_ERR(priv, "Restarting adapter due to command queue full\n"); | ||
| 473 | queue_work(priv->workqueue, &priv->restart); | 475 | queue_work(priv->workqueue, &priv->restart); |
| 474 | return -ENOSPC; | 476 | return -ENOSPC; |
| 475 | } | 477 | } |
| 476 | 478 | ||
| 477 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
| 478 | |||
| 479 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
| 480 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
| 481 | */ | ||
| 482 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
| 483 | idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); | ||
| 484 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
| 485 | } | ||
| 486 | |||
| 487 | idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 479 | idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
| 488 | out_cmd = txq->cmd[idx]; | 480 | out_cmd = txq->cmd[idx]; |
| 489 | out_meta = &txq->meta[idx]; | 481 | out_meta = &txq->meta[idx]; |
| 490 | 482 | ||
| 483 | if (WARN_ON(out_meta->flags & CMD_MAPPED)) { | ||
| 484 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
| 485 | return -ENOSPC; | ||
| 486 | } | ||
| 487 | |||
| 491 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ | 488 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ |
| 492 | out_meta->flags = cmd->flags; | 489 | out_meta->flags = cmd->flags | CMD_MAPPED; |
| 493 | if (cmd->flags & CMD_WANT_SKB) | 490 | if (cmd->flags & CMD_WANT_SKB) |
| 494 | out_meta->source = cmd; | 491 | out_meta->source = cmd; |
| 495 | if (cmd->flags & CMD_ASYNC) | 492 | if (cmd->flags & CMD_ASYNC) |
| @@ -610,6 +607,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 610 | struct iwl_device_cmd *cmd; | 607 | struct iwl_device_cmd *cmd; |
| 611 | struct iwl_cmd_meta *meta; | 608 | struct iwl_cmd_meta *meta; |
| 612 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 609 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
| 610 | unsigned long flags; | ||
| 613 | 611 | ||
| 614 | /* If a Tx command is being handled and it isn't in the actual | 612 | /* If a Tx command is being handled and it isn't in the actual |
| 615 | * command queue then there a command routing bug has been introduced | 613 | * command queue then there a command routing bug has been introduced |
| @@ -623,14 +621,6 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 623 | return; | 621 | return; |
| 624 | } | 622 | } |
| 625 | 623 | ||
| 626 | /* If this is a huge cmd, clear the huge flag on the meta.flags | ||
| 627 | * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap | ||
| 628 | * the DMA buffer for the scan (huge) command. | ||
| 629 | */ | ||
| 630 | if (huge) { | ||
| 631 | cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0); | ||
| 632 | txq->meta[cmd_index].flags = 0; | ||
| 633 | } | ||
| 634 | cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); | 624 | cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); |
| 635 | cmd = txq->cmd[cmd_index]; | 625 | cmd = txq->cmd[cmd_index]; |
| 636 | meta = &txq->meta[cmd_index]; | 626 | meta = &txq->meta[cmd_index]; |
| @@ -647,6 +637,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 647 | } else if (meta->callback) | 637 | } else if (meta->callback) |
| 648 | meta->callback(priv, cmd, pkt); | 638 | meta->callback(priv, cmd, pkt); |
| 649 | 639 | ||
| 640 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
| 641 | |||
| 650 | iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); | 642 | iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
| 651 | 643 | ||
| 652 | if (!(meta->flags & CMD_ASYNC)) { | 644 | if (!(meta->flags & CMD_ASYNC)) { |
| @@ -655,6 +647,10 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
| 655 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); | 647 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); |
| 656 | wake_up_interruptible(&priv->wait_command_queue); | 648 | wake_up_interruptible(&priv->wait_command_queue); |
| 657 | } | 649 | } |
| 650 | |||
| 651 | /* Mark as unmapped */ | ||
| 658 | meta->flags = 0; | 652 | meta->flags = 0; |
| 653 | |||
| 654 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
| 659 | } | 655 | } |
| 660 | EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); | 656 | EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); |
