aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorMans Rullgard <mans@mansr.com>2016-01-11 08:04:29 -0500
committerVinod Koul <vinod.koul@intel.com>2016-01-14 00:49:42 -0500
commit2895b2cad6e7a95104cf396e5330054453382ae1 (patch)
treecd28330654a9c75992511b80b42e6fe96e70deca /drivers/dma
parentdf3bb8a0e619d501cd13334c3e0586edcdcbc716 (diff)
dmaengine: dw: fix cyclic transfer callbacks
Cyclic transfer callbacks rely on block completion interrupts which were disabled in commit ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts"). This re-enables block interrupts so the cyclic callbacks can work. Other transfer types are not affected as they set the INT_EN bit only on the last block. Fixes: ff7b05f29fd4 ("dmaengine/dw_dmac: Don't handle block interrupts") Signed-off-by: Mans Rullgard <mans@mansr.com> Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com> Cc: <stable@vger.kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw/core.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index af2b92f8501e..b92662722404 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
159 channel_set_bit(dw, MASK.ERROR, dwc->mask); 160 channel_set_bit(dw, MASK.ERROR, dwc->mask);
160 161
161 dwc->initialized = true; 162 dwc->initialized = true;
@@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
536 537
537/* Called with dwc->lock held and all DMAC interrupts disabled */ 538/* Called with dwc->lock held and all DMAC interrupts disabled */
538static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 539static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
539 u32 status_err, u32 status_xfer) 540 u32 status_block, u32 status_err, u32 status_xfer)
540{ 541{
541 unsigned long flags; 542 unsigned long flags;
542 543
543 if (dwc->mask) { 544 if (status_block & dwc->mask) {
544 void (*callback)(void *param); 545 void (*callback)(void *param);
545 void *callback_param; 546 void *callback_param;
546 547
547 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 548 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
548 channel_readl(dwc, LLP)); 549 channel_readl(dwc, LLP));
550 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
549 551
550 callback = dwc->cdesc->period_callback; 552 callback = dwc->cdesc->period_callback;
551 callback_param = dwc->cdesc->period_callback_param; 553 callback_param = dwc->cdesc->period_callback_param;
@@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
577 channel_writel(dwc, CTL_LO, 0); 579 channel_writel(dwc, CTL_LO, 0);
578 channel_writel(dwc, CTL_HI, 0); 580 channel_writel(dwc, CTL_HI, 0);
579 581
582 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
580 dma_writel(dw, CLEAR.ERROR, dwc->mask); 583 dma_writel(dw, CLEAR.ERROR, dwc->mask);
581 dma_writel(dw, CLEAR.XFER, dwc->mask); 584 dma_writel(dw, CLEAR.XFER, dwc->mask);
582 585
@@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
593{ 596{
594 struct dw_dma *dw = (struct dw_dma *)data; 597 struct dw_dma *dw = (struct dw_dma *)data;
595 struct dw_dma_chan *dwc; 598 struct dw_dma_chan *dwc;
599 u32 status_block;
596 u32 status_xfer; 600 u32 status_xfer;
597 u32 status_err; 601 u32 status_err;
598 int i; 602 int i;
599 603
604 status_block = dma_readl(dw, RAW.BLOCK);
600 status_xfer = dma_readl(dw, RAW.XFER); 605 status_xfer = dma_readl(dw, RAW.XFER);
601 status_err = dma_readl(dw, RAW.ERROR); 606 status_err = dma_readl(dw, RAW.ERROR);
602 607
@@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
605 for (i = 0; i < dw->dma.chancnt; i++) { 610 for (i = 0; i < dw->dma.chancnt; i++) {
606 dwc = &dw->chan[i]; 611 dwc = &dw->chan[i];
607 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 612 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
608 dwc_handle_cyclic(dw, dwc, status_err, status_xfer); 613 dwc_handle_cyclic(dw, dwc, status_block, status_err,
614 status_xfer);
609 else if (status_err & (1 << i)) 615 else if (status_err & (1 << i))
610 dwc_handle_error(dw, dwc); 616 dwc_handle_error(dw, dwc);
611 else if (status_xfer & (1 << i)) 617 else if (status_xfer & (1 << i))
@@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
616 * Re-enable interrupts. 622 * Re-enable interrupts.
617 */ 623 */
618 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
619 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
620} 627}
621 628
@@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
635 * softirq handler. 642 * softirq handler.
636 */ 643 */
637 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 644 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
645 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
638 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 646 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
639 647
640 status = dma_readl(dw, STATUS_INT); 648 status = dma_readl(dw, STATUS_INT);
@@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
645 653
646 /* Try to recover */ 654 /* Try to recover */
647 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 655 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
656 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
648 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 657 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
649 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 658 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
650 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 659 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
1111 dma_writel(dw, CFG, 0); 1120 dma_writel(dw, CFG, 0);
1112 1121
1113 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1122 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1123 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1114 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1124 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1115 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1125 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1116 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1126 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1216 1226
1217 /* Disable interrupts */ 1227 /* Disable interrupts */
1218 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1228 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1229 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1219 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1230 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1220 1231
1221 spin_unlock_irqrestore(&dwc->lock, flags); 1232 spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1458,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1458 1469
1459 dwc_chan_disable(dw, dwc); 1470 dwc_chan_disable(dw, dwc);
1460 1471
1472 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1461 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1473 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1462 dma_writel(dw, CLEAR.XFER, dwc->mask); 1474 dma_writel(dw, CLEAR.XFER, dwc->mask);
1463 1475
@@ -1546,9 +1558,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1546 /* Force dma off, just in case */ 1558 /* Force dma off, just in case */
1547 dw_dma_off(dw); 1559 dw_dma_off(dw);
1548 1560
1549 /* Disable BLOCK interrupts as well */
1550 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1551
1552 /* Create a pool of consistent memory blocks for hardware descriptors */ 1561 /* Create a pool of consistent memory blocks for hardware descriptors */
1553 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1562 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1554 sizeof(struct dw_desc), 4, 0); 1563 sizeof(struct dw_desc), 4, 0);