diff options
author | Stanislaw Gruszka <sgruszka@redhat.com> | 2011-03-31 11:36:26 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-04-07 15:34:11 -0400 |
commit | 3598e1774c94e55c71b585340e7dc4538f310e3f (patch) | |
tree | 09f7d500b76090f3cdb66bf2162d63d222696898 /drivers | |
parent | 08b8099c128d601fd675b212ef8b10397706b633 (diff) |
iwlwifi: fix enqueue hcmd race conditions
We mark command as huge by using meta->flags from other (non huge) command,
but flags can be possibly overridden, when non huge command is enqueued,
what can lead to:
WARNING: at lib/dma-debug.c:696 dma_debug_device_change+0x1a3/0x1f0()
DMA-API: device driver has pending DMA allocations while released from device [count=1]
To fix introduce additional CMD_MAPPED to mark command as mapped and
serialize iwl_enqueue_hcmd() with iwl_tx_cmd_complete() using
hcmd_lock. Serialization will also fix possible race conditions,
because q->read_ptr, q->write_ptr are modified/used in parallel.
On the way fix whitespace.
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Acked-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-dev.h | 1 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 62 |
2 files changed, 34 insertions, 29 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index a5d438d91821..746587546a4f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -309,6 +309,7 @@ enum { | |||
309 | CMD_SIZE_HUGE = (1 << 0), | 309 | CMD_SIZE_HUGE = (1 << 0), |
310 | CMD_ASYNC = (1 << 1), | 310 | CMD_ASYNC = (1 << 1), |
311 | CMD_WANT_SKB = (1 << 2), | 311 | CMD_WANT_SKB = (1 << 2), |
312 | CMD_MAPPED = (1 << 3), | ||
312 | }; | 313 | }; |
313 | 314 | ||
314 | #define DEF_CMD_PAYLOAD_SIZE 320 | 315 | #define DEF_CMD_PAYLOAD_SIZE 320 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 277c9175dcf6..39a4180ee854 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -149,32 +149,31 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv) | |||
149 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 149 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
150 | struct iwl_queue *q = &txq->q; | 150 | struct iwl_queue *q = &txq->q; |
151 | int i; | 151 | int i; |
152 | bool huge = false; | ||
153 | 152 | ||
154 | if (q->n_bd == 0) | 153 | if (q->n_bd == 0) |
155 | return; | 154 | return; |
156 | 155 | ||
157 | while (q->read_ptr != q->write_ptr) { | 156 | while (q->read_ptr != q->write_ptr) { |
158 | /* we have no way to tell if it is a huge cmd ATM */ | ||
159 | i = get_cmd_index(q, q->read_ptr, 0); | 157 | i = get_cmd_index(q, q->read_ptr, 0); |
160 | 158 | ||
161 | if (txq->meta[i].flags & CMD_SIZE_HUGE) | 159 | if (txq->meta[i].flags & CMD_MAPPED) { |
162 | huge = true; | ||
163 | else | ||
164 | pci_unmap_single(priv->pci_dev, | 160 | pci_unmap_single(priv->pci_dev, |
165 | dma_unmap_addr(&txq->meta[i], mapping), | 161 | dma_unmap_addr(&txq->meta[i], mapping), |
166 | dma_unmap_len(&txq->meta[i], len), | 162 | dma_unmap_len(&txq->meta[i], len), |
167 | PCI_DMA_BIDIRECTIONAL); | 163 | PCI_DMA_BIDIRECTIONAL); |
164 | txq->meta[i].flags = 0; | ||
165 | } | ||
168 | 166 | ||
169 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 167 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
170 | } | 168 | } |
171 | 169 | ||
172 | if (huge) { | 170 | i = q->n_window; |
173 | i = q->n_window; | 171 | if (txq->meta[i].flags & CMD_MAPPED) { |
174 | pci_unmap_single(priv->pci_dev, | 172 | pci_unmap_single(priv->pci_dev, |
175 | dma_unmap_addr(&txq->meta[i], mapping), | 173 | dma_unmap_addr(&txq->meta[i], mapping), |
176 | dma_unmap_len(&txq->meta[i], len), | 174 | dma_unmap_len(&txq->meta[i], len), |
177 | PCI_DMA_BIDIRECTIONAL); | 175 | PCI_DMA_BIDIRECTIONAL); |
176 | txq->meta[i].flags = 0; | ||
178 | } | 177 | } |
179 | } | 178 | } |
180 | 179 | ||
@@ -463,7 +462,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
463 | return -EIO; | 462 | return -EIO; |
464 | } | 463 | } |
465 | 464 | ||
465 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
466 | |||
466 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 467 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
468 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
469 | |||
467 | IWL_ERR(priv, "No space in command queue\n"); | 470 | IWL_ERR(priv, "No space in command queue\n"); |
468 | if (priv->cfg->ops->lib->tt_ops.ct_kill_check) { | 471 | if (priv->cfg->ops->lib->tt_ops.ct_kill_check) { |
469 | is_ct_kill = | 472 | is_ct_kill = |
@@ -476,22 +479,17 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
476 | return -ENOSPC; | 479 | return -ENOSPC; |
477 | } | 480 | } |
478 | 481 | ||
479 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
480 | |||
481 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
482 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
483 | */ | ||
484 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
485 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
486 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
487 | } | ||
488 | |||
489 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 482 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
490 | out_cmd = txq->cmd[idx]; | 483 | out_cmd = txq->cmd[idx]; |
491 | out_meta = &txq->meta[idx]; | 484 | out_meta = &txq->meta[idx]; |
492 | 485 | ||
486 | if (WARN_ON(out_meta->flags & CMD_MAPPED)) { | ||
487 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
488 | return -ENOSPC; | ||
489 | } | ||
490 | |||
493 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ | 491 | memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ |
494 | out_meta->flags = cmd->flags; | 492 | out_meta->flags = cmd->flags | CMD_MAPPED; |
495 | if (cmd->flags & CMD_WANT_SKB) | 493 | if (cmd->flags & CMD_WANT_SKB) |
496 | out_meta->source = cmd; | 494 | out_meta->source = cmd; |
497 | if (cmd->flags & CMD_ASYNC) | 495 | if (cmd->flags & CMD_ASYNC) |
@@ -609,6 +607,10 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
609 | struct iwl_device_cmd *cmd; | 607 | struct iwl_device_cmd *cmd; |
610 | struct iwl_cmd_meta *meta; | 608 | struct iwl_cmd_meta *meta; |
611 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; | 609 | struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; |
610 | unsigned long flags; | ||
611 | void (*callback) (struct iwl_priv *priv, struct iwl_device_cmd *cmd, | ||
612 | struct iwl_rx_packet *pkt); | ||
613 | |||
612 | 614 | ||
613 | /* If a Tx command is being handled and it isn't in the actual | 615 | /* If a Tx command is being handled and it isn't in the actual |
614 | * command queue then there a command routing bug has been introduced | 616 | * command queue then there a command routing bug has been introduced |
@@ -622,14 +624,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
622 | return; | 624 | return; |
623 | } | 625 | } |
624 | 626 | ||
625 | /* If this is a huge cmd, clear the huge flag on the meta.flags | 627 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
626 | * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap | 628 | |
627 | * the DMA buffer for the scan (huge) command. | ||
628 | */ | ||
629 | if (huge) { | ||
630 | cmd_index = get_cmd_index(&txq->q, index, 0); | ||
631 | txq->meta[cmd_index].flags = 0; | ||
632 | } | ||
633 | cmd_index = get_cmd_index(&txq->q, index, huge); | 629 | cmd_index = get_cmd_index(&txq->q, index, huge); |
634 | cmd = txq->cmd[cmd_index]; | 630 | cmd = txq->cmd[cmd_index]; |
635 | meta = &txq->meta[cmd_index]; | 631 | meta = &txq->meta[cmd_index]; |
@@ -639,12 +635,13 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
639 | dma_unmap_len(meta, len), | 635 | dma_unmap_len(meta, len), |
640 | PCI_DMA_BIDIRECTIONAL); | 636 | PCI_DMA_BIDIRECTIONAL); |
641 | 637 | ||
638 | callback = NULL; | ||
642 | /* Input error checking is done when commands are added to queue. */ | 639 | /* Input error checking is done when commands are added to queue. */ |
643 | if (meta->flags & CMD_WANT_SKB) { | 640 | if (meta->flags & CMD_WANT_SKB) { |
644 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); | 641 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); |
645 | rxb->page = NULL; | 642 | rxb->page = NULL; |
646 | } else if (meta->callback) | 643 | } else |
647 | meta->callback(priv, cmd, pkt); | 644 | callback = meta->callback; |
648 | 645 | ||
649 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); | 646 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
650 | 647 | ||
@@ -654,5 +651,12 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
654 | get_cmd_string(cmd->hdr.cmd)); | 651 | get_cmd_string(cmd->hdr.cmd)); |
655 | wake_up_interruptible(&priv->wait_command_queue); | 652 | wake_up_interruptible(&priv->wait_command_queue); |
656 | } | 653 | } |
654 | |||
655 | /* Mark as unmapped */ | ||
657 | meta->flags = 0; | 656 | meta->flags = 0; |
657 | |||
658 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
659 | |||
660 | if (callback) | ||
661 | callback(priv, cmd, pkt); | ||
658 | } | 662 | } |