aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@st.com>2012-02-01 05:42:23 -0500
committerVinod Koul <vinod.koul@linux.intel.com>2012-02-22 07:45:38 -0500
commitff7b05f29fd4db810021d905e2dad95ab4fe2984 (patch)
treedb633023e17d42a9e352ca77cbe16cdd04f69567 /drivers/dma
parent6c618c9de5f2b4b43c30c6203869620bec6ed929 (diff)
dmaengine/dw_dmac: Don't handle block interrupts
Block interrupts give interrupt on completion of every LLI, which is actually too much interrupts. This is just not required for current functioning of dw_dmac. So, just don't handle them at all. Signed-off-by: Viresh Kumar <viresh.kumar@st.com> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dw_dmac.c36
1 files changed, 6 insertions, 30 deletions
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index f3aecb3c0343..5d7b199208d9 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -192,7 +192,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
192 192
193 /* Enable interrupts */ 193 /* Enable interrupts */
194 channel_set_bit(dw, MASK.XFER, dwc->mask); 194 channel_set_bit(dw, MASK.XFER, dwc->mask);
195 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
196 channel_set_bit(dw, MASK.ERROR, dwc->mask); 195 channel_set_bit(dw, MASK.ERROR, dwc->mask);
197 196
198 dwc->initialized = true; 197 dwc->initialized = true;
@@ -329,12 +328,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
329 unsigned long flags; 328 unsigned long flags;
330 329
331 spin_lock_irqsave(&dwc->lock, flags); 330 spin_lock_irqsave(&dwc->lock, flags);
332 /*
333 * Clear block interrupt flag before scanning so that we don't
334 * miss any, and read LLP before RAW_XFER to ensure it is
335 * valid if we decide to scan the list.
336 */
337 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
338 llp = channel_readl(dwc, LLP); 331 llp = channel_readl(dwc, LLP);
339 status_xfer = dma_readl(dw, RAW.XFER); 332 status_xfer = dma_readl(dw, RAW.XFER);
340 333
@@ -470,17 +463,16 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
470 463
471/* called with dwc->lock held and all DMAC interrupts disabled */ 464/* called with dwc->lock held and all DMAC interrupts disabled */
472static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 465static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
473 u32 status_block, u32 status_err, u32 status_xfer) 466 u32 status_err, u32 status_xfer)
474{ 467{
475 unsigned long flags; 468 unsigned long flags;
476 469
477 if (status_block & dwc->mask) { 470 if (dwc->mask) {
478 void (*callback)(void *param); 471 void (*callback)(void *param);
479 void *callback_param; 472 void *callback_param;
480 473
481 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 474 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
482 channel_readl(dwc, LLP)); 475 channel_readl(dwc, LLP));
483 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
484 476
485 callback = dwc->cdesc->period_callback; 477 callback = dwc->cdesc->period_callback;
486 callback_param = dwc->cdesc->period_callback_param; 478 callback_param = dwc->cdesc->period_callback_param;
@@ -520,7 +512,6 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
520 channel_writel(dwc, CTL_LO, 0); 512 channel_writel(dwc, CTL_LO, 0);
521 channel_writel(dwc, CTL_HI, 0); 513 channel_writel(dwc, CTL_HI, 0);
522 514
523 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
524 dma_writel(dw, CLEAR.ERROR, dwc->mask); 515 dma_writel(dw, CLEAR.ERROR, dwc->mask);
525 dma_writel(dw, CLEAR.XFER, dwc->mask); 516 dma_writel(dw, CLEAR.XFER, dwc->mask);
526 517
@@ -537,36 +528,29 @@ static void dw_dma_tasklet(unsigned long data)
537{ 528{
538 struct dw_dma *dw = (struct dw_dma *)data; 529 struct dw_dma *dw = (struct dw_dma *)data;
539 struct dw_dma_chan *dwc; 530 struct dw_dma_chan *dwc;
540 u32 status_block;
541 u32 status_xfer; 531 u32 status_xfer;
542 u32 status_err; 532 u32 status_err;
543 int i; 533 int i;
544 534
545 status_block = dma_readl(dw, RAW.BLOCK);
546 status_xfer = dma_readl(dw, RAW.XFER); 535 status_xfer = dma_readl(dw, RAW.XFER);
547 status_err = dma_readl(dw, RAW.ERROR); 536 status_err = dma_readl(dw, RAW.ERROR);
548 537
549 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", 538 dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
550 status_block, status_err);
551 539
552 for (i = 0; i < dw->dma.chancnt; i++) { 540 for (i = 0; i < dw->dma.chancnt; i++) {
553 dwc = &dw->chan[i]; 541 dwc = &dw->chan[i];
554 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 542 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
555 dwc_handle_cyclic(dw, dwc, status_block, status_err, 543 dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
556 status_xfer);
557 else if (status_err & (1 << i)) 544 else if (status_err & (1 << i))
558 dwc_handle_error(dw, dwc); 545 dwc_handle_error(dw, dwc);
559 else if ((status_block | status_xfer) & (1 << i)) 546 else if (status_xfer & (1 << i))
560 dwc_scan_descriptors(dw, dwc); 547 dwc_scan_descriptors(dw, dwc);
561 } 548 }
562 549
563 /* 550 /*
564 * Re-enable interrupts. Block Complete interrupts are only 551 * Re-enable interrupts.
565 * enabled if the INT_EN bit in the descriptor is set. This
566 * will trigger a scan before the whole list is done.
567 */ 552 */
568 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 553 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
569 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
570 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 554 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
571} 555}
572 556
@@ -583,7 +567,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
583 * softirq handler. 567 * softirq handler.
584 */ 568 */
585 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 569 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
586 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
587 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 570 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
588 571
589 status = dma_readl(dw, STATUS_INT); 572 status = dma_readl(dw, STATUS_INT);
@@ -594,7 +577,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
594 577
595 /* Try to recover */ 578 /* Try to recover */
596 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 579 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
597 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
598 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 580 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
599 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 581 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
600 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 582 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@@ -1068,7 +1050,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1068 1050
1069 /* Disable interrupts */ 1051 /* Disable interrupts */
1070 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1052 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1071 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1072 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1053 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1073 1054
1074 spin_unlock_irqrestore(&dwc->lock, flags); 1055 spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1120,7 +1101,6 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1120 return -EBUSY; 1101 return -EBUSY;
1121 } 1102 }
1122 1103
1123 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1124 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1104 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1125 dma_writel(dw, CLEAR.XFER, dwc->mask); 1105 dma_writel(dw, CLEAR.XFER, dwc->mask);
1126 1106
@@ -1322,7 +1302,6 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
1322 while (dma_readl(dw, CH_EN) & dwc->mask) 1302 while (dma_readl(dw, CH_EN) & dwc->mask)
1323 cpu_relax(); 1303 cpu_relax();
1324 1304
1325 dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1326 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1305 dma_writel(dw, CLEAR.ERROR, dwc->mask);
1327 dma_writel(dw, CLEAR.XFER, dwc->mask); 1306 dma_writel(dw, CLEAR.XFER, dwc->mask);
1328 1307
@@ -1347,7 +1326,6 @@ static void dw_dma_off(struct dw_dma *dw)
1347 dma_writel(dw, CFG, 0); 1326 dma_writel(dw, CFG, 0);
1348 1327
1349 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1328 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1350 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1351 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1329 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1352 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1330 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1353 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1331 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@@ -1449,13 +1427,11 @@ static int __init dw_probe(struct platform_device *pdev)
1449 1427
1450 /* Clear/disable all interrupts on all channels. */ 1428 /* Clear/disable all interrupts on all channels. */
1451 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1429 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1452 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1453 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1430 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1454 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1431 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1455 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1432 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1456 1433
1457 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1434 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1458 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1459 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1435 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1460 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1436 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1461 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1437 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);