aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2011-12-16 04:01:38 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2011-12-23 11:20:57 -0500
commit1fded07513ea57b5ee128958ff119e05588b7227 (patch)
treeb25d48ee587c5c3a4e8703de89204d51dd459c5b /drivers/dma
parent0ef7e206d6a5a5de0cd84ed4925a4f688c62e732 (diff)
dmaengine: intel_mid_dma: locking and freeing fixes
Two issues are fixed: 1. DMA descriptors are reused so when freeing lli structures that are linked to them, the pointer must be nulled. 2. midc_scan_descriptors() must be called with the channel lock held. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/intel_mid_dma.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 01929ed6659d..d4b961677e02 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
280 * callbacks but must be called with the lock held. 280 * callbacks but must be called with the lock held.
281 */ 281 */
282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 282static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
283 struct intel_mid_dma_desc *desc) 283 struct intel_mid_dma_desc *desc)
284 __releases(&midc->lock) __acquires(&midc->lock)
284{ 285{
285 struct dma_async_tx_descriptor *txd = &desc->txd; 286 struct dma_async_tx_descriptor *txd = &desc->txd;
286 dma_async_tx_callback callback_txd = NULL; 287 dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
311 pci_pool_free(desc->lli_pool, desc->lli, 312 pci_pool_free(desc->lli_pool, desc->lli,
312 desc->lli_phys); 313 desc->lli_phys);
313 pci_pool_destroy(desc->lli_pool); 314 pci_pool_destroy(desc->lli_pool);
315 desc->lli = NULL;
314 } 316 }
315 list_move(&desc->desc_node, &midc->free_list); 317 list_move(&desc->desc_node, &midc->free_list);
316 midc->busy = false; 318 midc->busy = false;
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
490 492
491 ret = dma_async_is_complete(cookie, last_complete, last_used); 493 ret = dma_async_is_complete(cookie, last_complete, last_used);
492 if (ret != DMA_SUCCESS) { 494 if (ret != DMA_SUCCESS) {
495 spin_lock_bh(&midc->lock);
493 midc_scan_descriptors(to_middma_device(chan->device), midc); 496 midc_scan_descriptors(to_middma_device(chan->device), midc);
497 spin_unlock_bh(&midc->lock);
494 498
495 last_complete = midc->completed; 499 last_complete = midc->completed;
496 last_used = chan->cookie; 500 last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
566 pci_pool_free(desc->lli_pool, desc->lli, 570 pci_pool_free(desc->lli_pool, desc->lli,
567 desc->lli_phys); 571 desc->lli_phys);
568 pci_pool_destroy(desc->lli_pool); 572 pci_pool_destroy(desc->lli_pool);
573 desc->lli = NULL;
569 } 574 }
570 list_move(&desc->desc_node, &midc->free_list); 575 list_move(&desc->desc_node, &midc->free_list);
571 } 576 }