diff options
author | Johannes Berg <johannes.berg@intel.com> | 2012-11-04 03:29:17 -0500 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2012-11-05 10:08:58 -0500 |
commit | 7c34158231b2eda8dcbd297be2bb1559e69cb433 (patch) | |
tree | 550e6e356d67681dea769e035f2801fe2dc2cb76 | |
parent | 8f7b8db6e0557c8437adf9371e020cd89a7e85dc (diff) |
iwlwifi: handle DMA mapping failures
The RX replenish code doesn't handle DMA mapping failures,
which will cause issues if there actually is a failure. This
was reported by Shuah Khan who found a DMA mapping framework
warning ("device driver failed to check map error").
Cc: stable@vger.kernel.org
Reported-by: Shuah Khan <shuah.khan@hp.com>
Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
-rw-r--r-- | drivers/net/wireless/iwlwifi/pcie/rx.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index 17c8e5d82681..bb69f8f90b3b 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) | |||
321 | dma_map_page(trans->dev, page, 0, | 321 | dma_map_page(trans->dev, page, 0, |
322 | PAGE_SIZE << trans_pcie->rx_page_order, | 322 | PAGE_SIZE << trans_pcie->rx_page_order, |
323 | DMA_FROM_DEVICE); | 323 | DMA_FROM_DEVICE); |
324 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | ||
325 | rxb->page = NULL; | ||
326 | spin_lock_irqsave(&rxq->lock, flags); | ||
327 | list_add(&rxb->list, &rxq->rx_used); | ||
328 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
329 | __free_pages(page, trans_pcie->rx_page_order); | ||
330 | return; | ||
331 | } | ||
324 | /* dma address must be no more than 36 bits */ | 332 | /* dma address must be no more than 36 bits */ |
325 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | 333 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); |
326 | /* and also 256 byte aligned! */ | 334 | /* and also 256 byte aligned! */ |
@@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, | |||
488 | dma_map_page(trans->dev, rxb->page, 0, | 496 | dma_map_page(trans->dev, rxb->page, 0, |
489 | PAGE_SIZE << trans_pcie->rx_page_order, | 497 | PAGE_SIZE << trans_pcie->rx_page_order, |
490 | DMA_FROM_DEVICE); | 498 | DMA_FROM_DEVICE); |
491 | list_add_tail(&rxb->list, &rxq->rx_free); | 499 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { |
492 | rxq->free_count++; | 500 | /* |
501 | * free the page(s) as well to not break | ||
502 | * the invariant that the items on the used | ||
503 | * list have no page(s) | ||
504 | */ | ||
505 | __free_pages(rxb->page, trans_pcie->rx_page_order); | ||
506 | rxb->page = NULL; | ||
507 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
508 | } else { | ||
509 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
510 | rxq->free_count++; | ||
511 | } | ||
493 | } else | 512 | } else |
494 | list_add_tail(&rxb->list, &rxq->rx_used); | 513 | list_add_tail(&rxb->list, &rxq->rx_used); |
495 | spin_unlock_irqrestore(&rxq->lock, flags); | 514 | spin_unlock_irqrestore(&rxq->lock, flags); |