aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>2013-01-18 07:14:15 -0500
committerVinod Koul <vinod.koul@intel.com>2013-01-20 23:49:21 -0500
commit77bcc497c60ec62dbb84abc809a6e218d53409e9 (patch)
tree6d0f491d55b316010f352934a23b4ecd5d01e118 /drivers/dma
parent5be10f349bc0a2f3dd2ab6417ffe29746403984c (diff)
dw_dmac: move soft LLP code from tasklet to dwc_scan_descriptors
The proper place for the main logic of the soft LLP mode is dwc_scan_descriptors. It prevents to get the transfer unexpectedly aborted in case the user calls dwc_tx_status. Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw_dmac.c43
1 files changed, 21 insertions, 22 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 635a4a5d31ae..dc3b9558a25c 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -400,6 +400,20 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
400 if (status_xfer & dwc->mask) { 400 if (status_xfer & dwc->mask) {
401 /* Everything we've submitted is done */ 401 /* Everything we've submitted is done */
402 dma_writel(dw, CLEAR.XFER, dwc->mask); 402 dma_writel(dw, CLEAR.XFER, dwc->mask);
403
404 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
405 if (dwc->tx_node_active != dwc->tx_list) {
406 desc = to_dw_desc(dwc->tx_node_active);
407
408 /* Submit next block */
409 dwc_do_single_block(dwc, desc);
410 spin_unlock_irqrestore(&dwc->lock, flags);
411
412 return;
413 }
414 /* We are done here */
415 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
416 }
403 spin_unlock_irqrestore(&dwc->lock, flags); 417 spin_unlock_irqrestore(&dwc->lock, flags);
404 418
405 dwc_complete_all(dw, dwc); 419 dwc_complete_all(dw, dwc);
@@ -411,6 +425,12 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
411 return; 425 return;
412 } 426 }
413 427
428 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
429 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
430 spin_unlock_irqrestore(&dwc->lock, flags);
431 return;
432 }
433
414 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, 434 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
415 (unsigned long long)llp); 435 (unsigned long long)llp);
416 436
@@ -596,29 +616,8 @@ static void dw_dma_tasklet(unsigned long data)
596 dwc_handle_cyclic(dw, dwc, status_err, status_xfer); 616 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
597 else if (status_err & (1 << i)) 617 else if (status_err & (1 << i))
598 dwc_handle_error(dw, dwc); 618 dwc_handle_error(dw, dwc);
599 else if (status_xfer & (1 << i)) { 619 else if (status_xfer & (1 << i))
600 unsigned long flags;
601
602 spin_lock_irqsave(&dwc->lock, flags);
603 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
604 if (dwc->tx_node_active != dwc->tx_list) {
605 struct dw_desc *desc =
606 to_dw_desc(dwc->tx_node_active);
607
608 dma_writel(dw, CLEAR.XFER, dwc->mask);
609
610 dwc_do_single_block(dwc, desc);
611
612 spin_unlock_irqrestore(&dwc->lock, flags);
613 continue;
614 }
615 /* we are done here */
616 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
617 }
618 spin_unlock_irqrestore(&dwc->lock, flags);
619
620 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
621 }
622 } 621 }
623 622
624 /* 623 /*