aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/coh901318.c42
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw_dmac.c10
-rw-r--r--drivers/dma/fsldma.c13
-rw-r--r--drivers/dma/ipu/ipu_idmac.c21
-rw-r--r--drivers/dma/shdma.c12
-rw-r--r--drivers/dma/timb_dma.c9
-rw-r--r--drivers/dma/txx9dmac.c10
-rw-r--r--drivers/mmc/host/atmel-mci.c2
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/video/mx3fb.c3
12 files changed, 98 insertions, 38 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index efc1a61ca231..f9143cf9e50a 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -759,13 +759,17 @@ err_desc_get:
759 return NULL; 759 return NULL;
760} 760}
761 761
762static void atc_terminate_all(struct dma_chan *chan) 762static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
763{ 763{
764 struct at_dma_chan *atchan = to_at_dma_chan(chan); 764 struct at_dma_chan *atchan = to_at_dma_chan(chan);
765 struct at_dma *atdma = to_at_dma(chan->device); 765 struct at_dma *atdma = to_at_dma(chan->device);
766 struct at_desc *desc, *_desc; 766 struct at_desc *desc, *_desc;
767 LIST_HEAD(list); 767 LIST_HEAD(list);
768 768
769 /* Only supports DMA_TERMINATE_ALL */
770 if (cmd != DMA_TERMINATE_ALL)
771 return -ENXIO;
772
769 /* 773 /*
770 * This is only called when something went wrong elsewhere, so 774 * This is only called when something went wrong elsewhere, so
771 * we don't really care about the data. Just disable the 775 * we don't really care about the data. Just disable the
@@ -789,6 +793,8 @@ static void atc_terminate_all(struct dma_chan *chan)
789 /* Flush all pending and queued descriptors */ 793 /* Flush all pending and queued descriptors */
790 list_for_each_entry_safe(desc, _desc, &list, desc_node) 794 list_for_each_entry_safe(desc, _desc, &list, desc_node)
791 atc_chain_complete(atchan, desc); 795 atc_chain_complete(atchan, desc);
796
797 return 0;
792} 798}
793 799
794/** 800/**
@@ -1091,7 +1097,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
1091 1097
1092 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1098 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1093 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1099 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1094 atdma->dma_common.device_terminate_all = atc_terminate_all; 1100 atdma->dma_common.device_control = atc_control;
1095 } 1101 }
1096 1102
1097 dma_writel(atdma, EN, AT_DMA_ENABLE); 1103 dma_writel(atdma, EN, AT_DMA_ENABLE);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index f636c4a87c7f..53c54e034aa3 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -506,10 +506,11 @@ u32 coh901318_get_bytes_left(struct dma_chan *chan)
506EXPORT_SYMBOL(coh901318_get_bytes_left); 506EXPORT_SYMBOL(coh901318_get_bytes_left);
507 507
508 508
509/* Stops a transfer without losing data. Enables power save. 509/*
510 Use this function in conjunction with coh901318_continue(..) 510 * Pauses a transfer without losing data. Enables power save.
511*/ 511 * Use this function in conjunction with coh901318_resume.
512void coh901318_stop(struct dma_chan *chan) 512 */
513static void coh901318_pause(struct dma_chan *chan)
513{ 514{
514 u32 val; 515 u32 val;
515 unsigned long flags; 516 unsigned long flags;
@@ -550,12 +551,11 @@ void coh901318_stop(struct dma_chan *chan)
550 551
551 spin_unlock_irqrestore(&cohc->lock, flags); 552 spin_unlock_irqrestore(&cohc->lock, flags);
552} 553}
553EXPORT_SYMBOL(coh901318_stop);
554 554
555/* Continues a transfer that has been stopped via 300_dma_stop(..). 555/* Resumes a transfer that has been stopped via 300_dma_stop(..).
556 Power save is handled. 556 Power save is handled.
557*/ 557*/
558void coh901318_continue(struct dma_chan *chan) 558static void coh901318_resume(struct dma_chan *chan)
559{ 559{
560 u32 val; 560 u32 val;
561 unsigned long flags; 561 unsigned long flags;
@@ -581,7 +581,6 @@ void coh901318_continue(struct dma_chan *chan)
581 581
582 spin_unlock_irqrestore(&cohc->lock, flags); 582 spin_unlock_irqrestore(&cohc->lock, flags);
583} 583}
584EXPORT_SYMBOL(coh901318_continue);
585 584
586bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 585bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
587{ 586{
@@ -945,7 +944,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
945 944
946 spin_unlock_irqrestore(&cohc->lock, flags); 945 spin_unlock_irqrestore(&cohc->lock, flags);
947 946
948 chan->device->device_terminate_all(chan); 947 chan->device->device_control(chan, DMA_TERMINATE_ALL);
949} 948}
950 949
951 950
@@ -1179,16 +1178,29 @@ coh901318_issue_pending(struct dma_chan *chan)
1179 spin_unlock_irqrestore(&cohc->lock, flags); 1178 spin_unlock_irqrestore(&cohc->lock, flags);
1180} 1179}
1181 1180
1182static void 1181static int
1183coh901318_terminate_all(struct dma_chan *chan) 1182coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
1184{ 1183{
1185 unsigned long flags; 1184 unsigned long flags;
1186 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1185 struct coh901318_chan *cohc = to_coh901318_chan(chan);
1187 struct coh901318_desc *cohd; 1186 struct coh901318_desc *cohd;
1188 void __iomem *virtbase = cohc->base->virtbase; 1187 void __iomem *virtbase = cohc->base->virtbase;
1189 1188
1190 coh901318_stop(chan); 1189 if (cmd == DMA_PAUSE) {
1190 coh901318_pause(chan);
1191 return 0;
1192 }
1193
1194 if (cmd == DMA_RESUME) {
1195 coh901318_resume(chan);
1196 return 0;
1197 }
1198
1199 if (cmd != DMA_TERMINATE_ALL)
1200 return -ENXIO;
1191 1201
1202 /* The remainder of this function terminates the transfer */
1203 coh901318_pause(chan);
1192 spin_lock_irqsave(&cohc->lock, flags); 1204 spin_lock_irqsave(&cohc->lock, flags);
1193 1205
1194 /* Clear any pending BE or TC interrupt */ 1206 /* Clear any pending BE or TC interrupt */
@@ -1227,6 +1239,8 @@ coh901318_terminate_all(struct dma_chan *chan)
1227 cohc->busy = 0; 1239 cohc->busy = 0;
1228 1240
1229 spin_unlock_irqrestore(&cohc->lock, flags); 1241 spin_unlock_irqrestore(&cohc->lock, flags);
1242
1243 return 0;
1230} 1244}
1231void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 1245void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
1232 struct coh901318_base *base) 1246 struct coh901318_base *base)
@@ -1344,7 +1358,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
1344 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 1358 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
1345 base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete; 1359 base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
1346 base->dma_slave.device_issue_pending = coh901318_issue_pending; 1360 base->dma_slave.device_issue_pending = coh901318_issue_pending;
1347 base->dma_slave.device_terminate_all = coh901318_terminate_all; 1361 base->dma_slave.device_control = coh901318_control;
1348 base->dma_slave.dev = &pdev->dev; 1362 base->dma_slave.dev = &pdev->dev;
1349 1363
1350 err = dma_async_device_register(&base->dma_slave); 1364 err = dma_async_device_register(&base->dma_slave);
@@ -1364,7 +1378,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
1364 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 1378 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
1365 base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete; 1379 base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
1366 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 1380 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
1367 base->dma_memcpy.device_terminate_all = coh901318_terminate_all; 1381 base->dma_memcpy.device_control = coh901318_control;
1368 base->dma_memcpy.dev = &pdev->dev; 1382 base->dma_memcpy.dev = &pdev->dev;
1369 /* 1383 /*
1370 * This controller can only access address at even 32bit boundaries, 1384 * This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 87399cafce37..ffc4ee9c5e21 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -694,7 +694,7 @@ int dma_async_device_register(struct dma_device *device)
694 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 694 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
695 !device->device_prep_slave_sg); 695 !device->device_prep_slave_sg);
696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 696 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
697 !device->device_terminate_all); 697 !device->device_control);
698 698
699 BUG_ON(!device->device_alloc_chan_resources); 699 BUG_ON(!device->device_alloc_chan_resources);
700 BUG_ON(!device->device_free_chan_resources); 700 BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index d28369f7afd2..8a6b85f61176 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -781,13 +781,17 @@ err_desc_get:
781 return NULL; 781 return NULL;
782} 782}
783 783
784static void dwc_terminate_all(struct dma_chan *chan) 784static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
785{ 785{
786 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 786 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
787 struct dw_dma *dw = to_dw_dma(chan->device); 787 struct dw_dma *dw = to_dw_dma(chan->device);
788 struct dw_desc *desc, *_desc; 788 struct dw_desc *desc, *_desc;
789 LIST_HEAD(list); 789 LIST_HEAD(list);
790 790
791 /* Only supports DMA_TERMINATE_ALL */
792 if (cmd != DMA_TERMINATE_ALL)
793 return -ENXIO;
794
791 /* 795 /*
792 * This is only called when something went wrong elsewhere, so 796 * This is only called when something went wrong elsewhere, so
793 * we don't really care about the data. Just disable the 797 * we don't really care about the data. Just disable the
@@ -810,6 +814,8 @@ static void dwc_terminate_all(struct dma_chan *chan)
810 /* Flush all pending and queued descriptors */ 814 /* Flush all pending and queued descriptors */
811 list_for_each_entry_safe(desc, _desc, &list, desc_node) 815 list_for_each_entry_safe(desc, _desc, &list, desc_node)
812 dwc_descriptor_complete(dwc, desc); 816 dwc_descriptor_complete(dwc, desc);
817
818 return 0;
813} 819}
814 820
815static enum dma_status 821static enum dma_status
@@ -1338,7 +1344,7 @@ static int __init dw_probe(struct platform_device *pdev)
1338 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1344 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1339 1345
1340 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1346 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1341 dw->dma.device_terminate_all = dwc_terminate_all; 1347 dw->dma.device_control = dwc_control;
1342 1348
1343 dw->dma.device_is_tx_complete = dwc_is_tx_complete; 1349 dw->dma.device_is_tx_complete = dwc_is_tx_complete;
1344 dw->dma.device_issue_pending = dwc_issue_pending; 1350 dw->dma.device_issue_pending = dwc_issue_pending;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index bbb4be5a3ff4..714fc46e7695 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -774,13 +774,18 @@ fail:
774 return NULL; 774 return NULL;
775} 775}
776 776
777static void fsl_dma_device_terminate_all(struct dma_chan *dchan) 777static int fsl_dma_device_control(struct dma_chan *dchan,
778 enum dma_ctrl_cmd cmd)
778{ 779{
779 struct fsldma_chan *chan; 780 struct fsldma_chan *chan;
780 unsigned long flags; 781 unsigned long flags;
781 782
783 /* Only supports DMA_TERMINATE_ALL */
784 if (cmd != DMA_TERMINATE_ALL)
785 return -ENXIO;
786
782 if (!dchan) 787 if (!dchan)
783 return; 788 return -EINVAL;
784 789
785 chan = to_fsl_chan(dchan); 790 chan = to_fsl_chan(dchan);
786 791
@@ -794,6 +799,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
794 fsldma_free_desc_list(chan, &chan->ld_running); 799 fsldma_free_desc_list(chan, &chan->ld_running);
795 800
796 spin_unlock_irqrestore(&chan->desc_lock, flags); 801 spin_unlock_irqrestore(&chan->desc_lock, flags);
802
803 return 0;
797} 804}
798 805
799/** 806/**
@@ -1332,7 +1339,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
1332 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1339 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1333 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1340 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1334 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1341 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1335 fdev->common.device_terminate_all = fsl_dma_device_terminate_all; 1342 fdev->common.device_control = fsl_dma_device_control;
1336 fdev->common.dev = &op->dev; 1343 fdev->common.dev = &op->dev;
1337 1344
1338 dev_set_drvdata(&op->dev, fdev); 1345 dev_set_drvdata(&op->dev, fdev);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 2a446397c884..39e7fb2a90e3 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1472,13 +1472,17 @@ static void idmac_issue_pending(struct dma_chan *chan)
1472 */ 1472 */
1473} 1473}
1474 1474
1475static void __idmac_terminate_all(struct dma_chan *chan) 1475static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
1476{ 1476{
1477 struct idmac_channel *ichan = to_idmac_chan(chan); 1477 struct idmac_channel *ichan = to_idmac_chan(chan);
1478 struct idmac *idmac = to_idmac(chan->device); 1478 struct idmac *idmac = to_idmac(chan->device);
1479 unsigned long flags; 1479 unsigned long flags;
1480 int i; 1480 int i;
1481 1481
1482 /* Only supports DMA_TERMINATE_ALL */
1483 if (cmd != DMA_TERMINATE_ALL)
1484 return -ENXIO;
1485
1482 ipu_disable_channel(idmac, ichan, 1486 ipu_disable_channel(idmac, ichan,
1483 ichan->status >= IPU_CHANNEL_ENABLED); 1487 ichan->status >= IPU_CHANNEL_ENABLED);
1484 1488
@@ -1505,17 +1509,22 @@ static void __idmac_terminate_all(struct dma_chan *chan)
1505 tasklet_enable(&to_ipu(idmac)->tasklet); 1509 tasklet_enable(&to_ipu(idmac)->tasklet);
1506 1510
1507 ichan->status = IPU_CHANNEL_INITIALIZED; 1511 ichan->status = IPU_CHANNEL_INITIALIZED;
1512
1513 return 0;
1508} 1514}
1509 1515
1510static void idmac_terminate_all(struct dma_chan *chan) 1516static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
1511{ 1517{
1512 struct idmac_channel *ichan = to_idmac_chan(chan); 1518 struct idmac_channel *ichan = to_idmac_chan(chan);
1519 int ret;
1513 1520
1514 mutex_lock(&ichan->chan_mutex); 1521 mutex_lock(&ichan->chan_mutex);
1515 1522
1516 __idmac_terminate_all(chan); 1523 ret = __idmac_control(chan, cmd);
1517 1524
1518 mutex_unlock(&ichan->chan_mutex); 1525 mutex_unlock(&ichan->chan_mutex);
1526
1527 return ret;
1519} 1528}
1520 1529
1521#ifdef DEBUG 1530#ifdef DEBUG
@@ -1607,7 +1616,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1607 1616
1608 mutex_lock(&ichan->chan_mutex); 1617 mutex_lock(&ichan->chan_mutex);
1609 1618
1610 __idmac_terminate_all(chan); 1619 __idmac_control(chan, DMA_TERMINATE_ALL);
1611 1620
1612 if (ichan->status > IPU_CHANNEL_FREE) { 1621 if (ichan->status > IPU_CHANNEL_FREE) {
1613#ifdef DEBUG 1622#ifdef DEBUG
@@ -1669,7 +1678,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1669 1678
1670 /* Compulsory for DMA_SLAVE fields */ 1679 /* Compulsory for DMA_SLAVE fields */
1671 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1680 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1672 dma->device_terminate_all = idmac_terminate_all; 1681 dma->device_control = idmac_control;
1673 1682
1674 INIT_LIST_HEAD(&dma->channels); 1683 INIT_LIST_HEAD(&dma->channels);
1675 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1684 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1703,7 +1712,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
1703 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1712 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1704 struct idmac_channel *ichan = ipu->channel + i; 1713 struct idmac_channel *ichan = ipu->channel + i;
1705 1714
1706 idmac_terminate_all(&ichan->dma_chan); 1715 idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL);
1707 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); 1716 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
1708 } 1717 }
1709 1718
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 5d17e09cb625..ce28c1e22825 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -580,12 +580,16 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
580 direction, flags); 580 direction, flags);
581} 581}
582 582
583static void sh_dmae_terminate_all(struct dma_chan *chan) 583static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
584{ 584{
585 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 585 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
586 586
587 /* Only supports DMA_TERMINATE_ALL */
588 if (cmd != DMA_TERMINATE_ALL)
589 return -ENXIO;
590
587 if (!chan) 591 if (!chan)
588 return; 592 return -EINVAL;
589 593
590 dmae_halt(sh_chan); 594 dmae_halt(sh_chan);
591 595
@@ -601,6 +605,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
601 spin_unlock_bh(&sh_chan->desc_lock); 605 spin_unlock_bh(&sh_chan->desc_lock);
602 606
603 sh_dmae_chan_ld_cleanup(sh_chan, true); 607 sh_dmae_chan_ld_cleanup(sh_chan, true);
608
609 return 0;
604} 610}
605 611
606static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 612static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
@@ -1029,7 +1035,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1029 1035
1030 /* Compulsory for DMA_SLAVE fields */ 1036 /* Compulsory for DMA_SLAVE fields */
1031 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1037 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
1032 shdev->common.device_terminate_all = sh_dmae_terminate_all; 1038 shdev->common.device_control = sh_dmae_control;
1033 1039
1034 shdev->common.dev = &pdev->dev; 1040 shdev->common.dev = &pdev->dev;
1035 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1041 /* Default transfer size of 32 bytes requires 32-byte alignment */
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 145f1c23408f..7c06471ef863 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -613,7 +613,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
613 return &td_desc->txd; 613 return &td_desc->txd;
614} 614}
615 615
616static void td_terminate_all(struct dma_chan *chan) 616static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
617{ 617{
618 struct timb_dma_chan *td_chan = 618 struct timb_dma_chan *td_chan =
619 container_of(chan, struct timb_dma_chan, chan); 619 container_of(chan, struct timb_dma_chan, chan);
@@ -621,6 +621,9 @@ static void td_terminate_all(struct dma_chan *chan)
621 621
622 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 622 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
623 623
624 if (cmd != DMA_TERMINATE_ALL)
625 return -ENXIO;
626
624 /* first the easy part, put the queue into the free list */ 627 /* first the easy part, put the queue into the free list */
625 spin_lock_bh(&td_chan->lock); 628 spin_lock_bh(&td_chan->lock);
626 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 629 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -630,6 +633,8 @@ static void td_terminate_all(struct dma_chan *chan)
630 /* now tear down the runnning */ 633 /* now tear down the runnning */
631 __td_finish(td_chan); 634 __td_finish(td_chan);
632 spin_unlock_bh(&td_chan->lock); 635 spin_unlock_bh(&td_chan->lock);
636
637 return 0;
633} 638}
634 639
635static void td_tasklet(unsigned long data) 640static void td_tasklet(unsigned long data)
@@ -743,7 +748,7 @@ static int __devinit td_probe(struct platform_device *pdev)
743 dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 748 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
744 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 749 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
745 td->dma.device_prep_slave_sg = td_prep_slave_sg; 750 td->dma.device_prep_slave_sg = td_prep_slave_sg;
746 td->dma.device_terminate_all = td_terminate_all; 751 td->dma.device_control = td_control;
747 752
748 td->dma.dev = &pdev->dev; 753 td->dma.dev = &pdev->dev;
749 754
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 3ebc61067e54..e528e15f44ab 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -938,12 +938,16 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
938 return &first->txd; 938 return &first->txd;
939} 939}
940 940
941static void txx9dmac_terminate_all(struct dma_chan *chan) 941static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
942{ 942{
943 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 943 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
944 struct txx9dmac_desc *desc, *_desc; 944 struct txx9dmac_desc *desc, *_desc;
945 LIST_HEAD(list); 945 LIST_HEAD(list);
946 946
947 /* Only supports DMA_TERMINATE_ALL */
948 if (cmd != DMA_TERMINATE_ALL)
949 return -EINVAL;
950
947 dev_vdbg(chan2dev(chan), "terminate_all\n"); 951 dev_vdbg(chan2dev(chan), "terminate_all\n");
948 spin_lock_bh(&dc->lock); 952 spin_lock_bh(&dc->lock);
949 953
@@ -958,6 +962,8 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
958 /* Flush all pending and queued descriptors */ 962 /* Flush all pending and queued descriptors */
959 list_for_each_entry_safe(desc, _desc, &list, desc_node) 963 list_for_each_entry_safe(desc, _desc, &list, desc_node)
960 txx9dmac_descriptor_complete(dc, desc); 964 txx9dmac_descriptor_complete(dc, desc);
965
966 return 0;
961} 967}
962 968
963static enum dma_status 969static enum dma_status
@@ -1153,7 +1159,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1153 dc->dma.dev = &pdev->dev; 1159 dc->dma.dev = &pdev->dev;
1154 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1160 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1155 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1161 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1156 dc->dma.device_terminate_all = txx9dmac_terminate_all; 1162 dc->dma.device_control = txx9dmac_control;
1157 dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; 1163 dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
1158 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1164 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1159 if (pdata && pdata->memcpy_chan == ch) { 1165 if (pdata && pdata->memcpy_chan == ch) {
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 8072128e933b..ae6d24ba4f08 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -578,7 +578,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
578 struct dma_chan *chan = host->data_chan; 578 struct dma_chan *chan = host->data_chan;
579 579
580 if (chan) { 580 if (chan) {
581 chan->device->device_terminate_all(chan); 581 chan->device->device_control(chan, DMA_TERMINATE_ALL);
582 atmci_dma_cleanup(host); 582 atmci_dma_cleanup(host);
583 } else { 583 } else {
584 /* Data transfer was stopped by the interrupt handler */ 584 /* Data transfer was stopped by the interrupt handler */
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index f7b9aff88f4a..690988237971 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -1087,7 +1087,7 @@ static void work_fn_rx(struct work_struct *work)
1087 unsigned long flags; 1087 unsigned long flags;
1088 int count; 1088 int count;
1089 1089
1090 chan->device->device_terminate_all(chan); 1090 chan->device->device_control(chan, DMA_TERMINATE_ALL);
1091 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1091 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1092 sh_desc->partial, sh_desc->cookie); 1092 sh_desc->partial, sh_desc->cookie);
1093 1093
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 772ba3f45e6f..3aa50bc276eb 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
387 387
388 spin_unlock_irqrestore(&mx3fb->lock, flags); 388 spin_unlock_irqrestore(&mx3fb->lock, flags);
389 389
390 mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan); 390 mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan,
391 DMA_TERMINATE_ALL);
391 mx3_fbi->txd = NULL; 392 mx3_fbi->txd = NULL;
392 mx3_fbi->cookie = -EINVAL; 393 mx3_fbi->cookie = -EINVAL;
393} 394}