diff options
| author | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2011-10-14 15:54:47 -0400 |
|---|---|---|
| committer | John W. Linville <linville@tuxdriver.com> | 2011-11-02 15:23:10 -0400 |
| commit | cda4ee3f2e6907e89baf7a12e02e02fa208c0625 (patch) | |
| tree | aeaedda0e811a1d83cbf00de9cb6673f4ca696f7 | |
| parent | c125d5e846894043361c0c89c1140be8fd6600b7 (diff) | |
iwlagn: fix the race in the unmapping of the HCMD
As Stanislaw pointed out, my patch
iwlagn: fix a race in the unmapping of the TFDs
solved only part of the problem. The race still exists for TFDs of
the host commands. Fix that too.
Reported-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
| -rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 8e8c75c997ee..da3411057afc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
| @@ -407,6 +407,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
| 407 | struct iwl_queue *q = &txq->q; | 407 | struct iwl_queue *q = &txq->q; |
| 408 | enum dma_data_direction dma_dir; | 408 | enum dma_data_direction dma_dir; |
| 409 | unsigned long flags; | 409 | unsigned long flags; |
| 410 | spinlock_t *lock; | ||
| 410 | 411 | ||
| 411 | if (!q->n_bd) | 412 | if (!q->n_bd) |
| 412 | return; | 413 | return; |
| @@ -414,19 +415,22 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
| 414 | /* In the command queue, all the TBs are mapped as BIDI | 415 | /* In the command queue, all the TBs are mapped as BIDI |
| 415 | * so unmap them as such. | 416 | * so unmap them as such. |
| 416 | */ | 417 | */ |
| 417 | if (txq_id == trans->shrd->cmd_queue) | 418 | if (txq_id == trans->shrd->cmd_queue) { |
| 418 | dma_dir = DMA_BIDIRECTIONAL; | 419 | dma_dir = DMA_BIDIRECTIONAL; |
| 419 | else | 420 | lock = &trans->hcmd_lock; |
| 421 | } else { | ||
| 420 | dma_dir = DMA_TO_DEVICE; | 422 | dma_dir = DMA_TO_DEVICE; |
| 423 | lock = &trans->shrd->sta_lock; | ||
| 424 | } | ||
| 421 | 425 | ||
| 422 | spin_lock_irqsave(&trans->shrd->sta_lock, flags); | 426 | spin_lock_irqsave(lock, flags); |
| 423 | while (q->write_ptr != q->read_ptr) { | 427 | while (q->write_ptr != q->read_ptr) { |
| 424 | /* The read_ptr needs to bound by q->n_window */ | 428 | /* The read_ptr needs to bound by q->n_window */ |
| 425 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), | 429 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), |
| 426 | dma_dir); | 430 | dma_dir); |
| 427 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 431 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
| 428 | } | 432 | } |
| 429 | spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); | 433 | spin_unlock_irqrestore(lock, flags); |
| 430 | } | 434 | } |
| 431 | 435 | ||
| 432 | /** | 436 | /** |
