aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmatest.c6
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/intel_mid_dma.c8
-rw-r--r--drivers/dma/intel_mid_dma_regs.h4
-rw-r--r--drivers/dma/ioat/dma.c1
-rw-r--r--drivers/dma/ioat/dma_v2.c1
-rw-r--r--drivers/dma/ioat/dma_v3.c1
-rw-r--r--drivers/dma/mpc512x_dma.c2
-rw-r--r--drivers/dma/shdma.c231
-rw-r--r--drivers/dma/shdma.h3
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/timb_dma.c3
15 files changed, 200 insertions, 72 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 3134003eec8b..36144f88d718 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -190,7 +190,7 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
190/** 190/**
191 * atc_assign_cookie - compute and assign new cookie 191 * atc_assign_cookie - compute and assign new cookie
192 * @atchan: channel we work on 192 * @atchan: channel we work on
193 * @desc: descriptor to asign cookie for 193 * @desc: descriptor to assign cookie for
194 * 194 *
195 * Called with atchan->lock held and bh disabled 195 * Called with atchan->lock held and bh disabled
196 */ 196 */
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5a318916d05..af8c0b5ed70f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -529,7 +529,7 @@ static void coh901318_pause(struct dma_chan *chan)
529 val = readl(virtbase + COH901318_CX_CFG + 529 val = readl(virtbase + COH901318_CX_CFG +
530 COH901318_CX_CFG_SPACING * channel); 530 COH901318_CX_CFG_SPACING * channel);
531 531
532 /* Stopping infinit transfer */ 532 /* Stopping infinite transfer */
533 if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && 533 if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
534 (val & COH901318_CX_CFG_CH_ENABLE)) 534 (val & COH901318_CX_CFG_CH_ENABLE))
535 cohc->stopped = 1; 535 cohc->stopped = 1;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index e0888cb538d4..b4f5c32b6a47 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -56,8 +56,8 @@ MODULE_PARM_DESC(pq_sources,
56 56
57static int timeout = 3000; 57static int timeout = 3000;
58module_param(timeout, uint, S_IRUGO); 58module_param(timeout, uint, S_IRUGO);
59MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \ 59MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
60 Pass -1 for infinite timeout"); 60 "Pass -1 for infinite timeout");
61 61
62/* 62/*
63 * Initialization patterns. All bytes in the source buffer has bit 7 63 * Initialization patterns. All bytes in the source buffer has bit 7
@@ -634,5 +634,5 @@ static void __exit dmatest_exit(void)
634} 634}
635module_exit(dmatest_exit); 635module_exit(dmatest_exit);
636 636
637MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); 637MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
638MODULE_LICENSE("GPL v2"); 638MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index efd836dfb65a..4d180ca9a1d8 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1575,5 +1575,5 @@ module_exit(dw_exit);
1575 1575
1576MODULE_LICENSE("GPL v2"); 1576MODULE_LICENSE("GPL v2");
1577MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1577MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1578MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); 1578MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1579MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 1579MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6b396759e7f5..8a781540590c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
1448 {} 1448 {}
1449}; 1449};
1450 1450
1451static struct of_platform_driver fsldma_of_driver = { 1451static struct platform_driver fsldma_of_driver = {
1452 .driver = { 1452 .driver = {
1453 .name = "fsl-elo-dma", 1453 .name = "fsl-elo-dma",
1454 .owner = THIS_MODULE, 1454 .owner = THIS_MODULE,
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f153adfcaceb..f653517ef744 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -911,8 +911,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
911 911
912/** 912/**
913 * midc_handle_error - Handle DMA txn error 913 * midc_handle_error - Handle DMA txn error
914 * @mid: controller where error occured 914 * @mid: controller where error occurred
915 * @midc: chan where error occured 915 * @midc: chan where error occurred
916 * 916 *
917 * Scan the descriptor for error 917 * Scan the descriptor for error
918 */ 918 */
@@ -1099,7 +1099,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
1099 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1099 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE,
1100 LNW_PERIPHRAL_MASK_SIZE); 1100 LNW_PERIPHRAL_MASK_SIZE);
1101 if (dma->mask_reg == NULL) { 1101 if (dma->mask_reg == NULL) {
1102 pr_err("ERR_MDMA:Cant map periphral intr space !!\n"); 1102 pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
1103 return -ENOMEM; 1103 return -ENOMEM;
1104 } 1104 }
1105 } else 1105 } else
@@ -1375,7 +1375,7 @@ int dma_resume(struct pci_dev *pci)
1375 pci_restore_state(pci); 1375 pci_restore_state(pci);
1376 ret = pci_enable_device(pci); 1376 ret = pci_enable_device(pci);
1377 if (ret) { 1377 if (ret) {
1378 pr_err("MDMA: device cant be enabled for %x\n", pci->device); 1378 pr_err("MDMA: device can't be enabled for %x\n", pci->device);
1379 return ret; 1379 return ret;
1380 } 1380 }
1381 device->state = RUNNING; 1381 device->state = RUNNING;
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index 709fecbdde79..aea5ee88ce03 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -174,8 +174,8 @@ union intel_mid_dma_cfg_hi {
174 * @dma: dma device struture pointer 174 * @dma: dma device struture pointer
175 * @busy: bool representing if ch is busy (active txn) or not 175 * @busy: bool representing if ch is busy (active txn) or not
176 * @in_use: bool representing if ch is in use or not 176 * @in_use: bool representing if ch is in use or not
177 * @raw_tfr: raw trf interrupt recieved 177 * @raw_tfr: raw trf interrupt received
178 * @raw_block: raw block interrupt recieved 178 * @raw_block: raw block interrupt received
179 */ 179 */
180struct intel_mid_dma_chan { 180struct intel_mid_dma_chan {
181 struct dma_chan chan; 181 struct dma_chan chan;
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index c9213ead4a26..a4d6cb0c0343 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -34,6 +34,7 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/prefetch.h>
37#include <linux/i7300_idle.h> 38#include <linux/i7300_idle.h>
38#include "dma.h" 39#include "dma.h"
39#include "registers.h" 40#include "registers.h"
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 54d1a3c24e9c..5d65f8377971 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -34,6 +34,7 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/prefetch.h>
37#include <linux/i7300_idle.h> 38#include <linux/i7300_idle.h>
38#include "dma.h" 39#include "dma.h"
39#include "dma_v2.h" 40#include "dma_v2.h"
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index d0f499098479..d845dc4b7103 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -60,6 +60,7 @@
60#include <linux/gfp.h> 60#include <linux/gfp.h>
61#include <linux/dmaengine.h> 61#include <linux/dmaengine.h>
62#include <linux/dma-mapping.h> 62#include <linux/dma-mapping.h>
63#include <linux/prefetch.h>
63#include "registers.h" 64#include "registers.h"
64#include "hw.h" 65#include "hw.h"
65#include "dma.h" 66#include "dma.h"
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4f95d31f5a20..b9bae94f2015 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -328,7 +328,7 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)
328 return IRQ_HANDLED; 328 return IRQ_HANDLED;
329} 329}
330 330
331/* proccess completed descriptors */ 331/* process completed descriptors */
332static void mpc_dma_process_completed(struct mpc_dma *mdma) 332static void mpc_dma_process_completed(struct mpc_dma *mdma)
333{ 333{
334 dma_cookie_t last_cookie = 0; 334 dma_cookie_t last_cookie = 0;
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6451b581a70b..636e40925b16 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -48,7 +48,7 @@ enum sh_dmae_desc_status {
48 48
49/* 49/*
50 * Used for write-side mutual exclusion for the global device list, 50 * Used for write-side mutual exclusion for the global device list,
51 * read-side synchronization by way of RCU. 51 * read-side synchronization by way of RCU, and per-controller data.
52 */ 52 */
53static DEFINE_SPINLOCK(sh_dmae_lock); 53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices); 54static LIST_HEAD(sh_dmae_devices);
@@ -85,22 +85,35 @@ static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
85 */ 85 */
86static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 86static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
87{ 87{
88 unsigned short dmaor = dmaor_read(shdev); 88 unsigned short dmaor;
89 unsigned long flags;
90
91 spin_lock_irqsave(&sh_dmae_lock, flags);
89 92
93 dmaor = dmaor_read(shdev);
90 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 94 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
95
96 spin_unlock_irqrestore(&sh_dmae_lock, flags);
91} 97}
92 98
93static int sh_dmae_rst(struct sh_dmae_device *shdev) 99static int sh_dmae_rst(struct sh_dmae_device *shdev)
94{ 100{
95 unsigned short dmaor; 101 unsigned short dmaor;
102 unsigned long flags;
96 103
97 sh_dmae_ctl_stop(shdev); 104 spin_lock_irqsave(&sh_dmae_lock, flags);
98 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
99 105
100 dmaor_write(shdev, dmaor); 106 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
101 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { 107
102 pr_warning("dma-sh: Can't initialize DMAOR.\n"); 108 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
103 return -EINVAL; 109
110 dmaor = dmaor_read(shdev);
111
112 spin_unlock_irqrestore(&sh_dmae_lock, flags);
113
114 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
115 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
116 return -EIO;
104 } 117 }
105 return 0; 118 return 0;
106} 119}
@@ -184,7 +197,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
184 197
185static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 198static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
186{ 199{
187 /* When DMA was working, can not set data to CHCR */ 200 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
188 if (dmae_is_busy(sh_chan)) 201 if (dmae_is_busy(sh_chan))
189 return -EBUSY; 202 return -EBUSY;
190 203
@@ -200,12 +213,17 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
200 struct sh_dmae_device, common); 213 struct sh_dmae_device, common);
201 struct sh_dmae_pdata *pdata = shdev->pdata; 214 struct sh_dmae_pdata *pdata = shdev->pdata;
202 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
203 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 216 u16 __iomem *addr = shdev->dmars;
204 int shift = chan_pdata->dmars_bit; 217 int shift = chan_pdata->dmars_bit;
205 218
206 if (dmae_is_busy(sh_chan)) 219 if (dmae_is_busy(sh_chan))
207 return -EBUSY; 220 return -EBUSY;
208 221
222 /* in the case of a missing DMARS resource use first memory window */
223 if (!addr)
224 addr = (u16 __iomem *)shdev->chan_reg;
225 addr += chan_pdata->dmars / sizeof(u16);
226
209 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 227 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
210 addr); 228 addr);
211 229
@@ -374,7 +392,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
374 LIST_HEAD(list); 392 LIST_HEAD(list);
375 int descs = sh_chan->descs_allocated; 393 int descs = sh_chan->descs_allocated;
376 394
395 /* Protect against ISR */
396 spin_lock_irq(&sh_chan->desc_lock);
377 dmae_halt(sh_chan); 397 dmae_halt(sh_chan);
398 spin_unlock_irq(&sh_chan->desc_lock);
399
400 /* Now no new interrupts will occur */
378 401
379 /* Prepared and not submitted descriptors can still be on the queue */ 402 /* Prepared and not submitted descriptors can still be on the queue */
380 if (!list_empty(&sh_chan->ld_queue)) 403 if (!list_empty(&sh_chan->ld_queue))
@@ -384,6 +407,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
384 /* The caller is holding dma_list_mutex */ 407 /* The caller is holding dma_list_mutex */
385 struct sh_dmae_slave *param = chan->private; 408 struct sh_dmae_slave *param = chan->private;
386 clear_bit(param->slave_id, sh_dmae_slave_used); 409 clear_bit(param->slave_id, sh_dmae_slave_used);
410 chan->private = NULL;
387 } 411 }
388 412
389 spin_lock_bh(&sh_chan->desc_lock); 413 spin_lock_bh(&sh_chan->desc_lock);
@@ -563,8 +587,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
563 if (!chan || !len) 587 if (!chan || !len)
564 return NULL; 588 return NULL;
565 589
566 chan->private = NULL;
567
568 sh_chan = to_sh_chan(chan); 590 sh_chan = to_sh_chan(chan);
569 591
570 sg_init_table(&sg, 1); 592 sg_init_table(&sg, 1);
@@ -620,9 +642,9 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
620 if (!chan) 642 if (!chan)
621 return -EINVAL; 643 return -EINVAL;
622 644
645 spin_lock_bh(&sh_chan->desc_lock);
623 dmae_halt(sh_chan); 646 dmae_halt(sh_chan);
624 647
625 spin_lock_bh(&sh_chan->desc_lock);
626 if (!list_empty(&sh_chan->ld_queue)) { 648 if (!list_empty(&sh_chan->ld_queue)) {
627 /* Record partial transfer */ 649 /* Record partial transfer */
628 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 650 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
@@ -716,6 +738,14 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
716 list_move(&desc->node, &sh_chan->ld_free); 738 list_move(&desc->node, &sh_chan->ld_free);
717 } 739 }
718 } 740 }
741
742 if (all && !callback)
743 /*
744 * Terminating and the loop completed normally: forgive
745 * uncompleted cookies
746 */
747 sh_chan->completed_cookie = sh_chan->common.cookie;
748
719 spin_unlock_bh(&sh_chan->desc_lock); 749 spin_unlock_bh(&sh_chan->desc_lock);
720 750
721 if (callback) 751 if (callback)
@@ -733,10 +763,6 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
733{ 763{
734 while (__ld_cleanup(sh_chan, all)) 764 while (__ld_cleanup(sh_chan, all))
735 ; 765 ;
736
737 if (all)
738 /* Terminating - forgive uncompleted cookies */
739 sh_chan->completed_cookie = sh_chan->common.cookie;
740} 766}
741 767
742static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 768static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -782,8 +808,10 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
782 808
783 sh_dmae_chan_ld_cleanup(sh_chan, false); 809 sh_dmae_chan_ld_cleanup(sh_chan, false);
784 810
785 last_used = chan->cookie; 811 /* First read completed cookie to avoid a skew */
786 last_complete = sh_chan->completed_cookie; 812 last_complete = sh_chan->completed_cookie;
813 rmb();
814 last_used = chan->cookie;
787 BUG_ON(last_complete < 0); 815 BUG_ON(last_complete < 0);
788 dma_set_tx_state(txstate, last_complete, last_used, 0); 816 dma_set_tx_state(txstate, last_complete, last_used, 0);
789 817
@@ -813,8 +841,12 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
813static irqreturn_t sh_dmae_interrupt(int irq, void *data) 841static irqreturn_t sh_dmae_interrupt(int irq, void *data)
814{ 842{
815 irqreturn_t ret = IRQ_NONE; 843 irqreturn_t ret = IRQ_NONE;
816 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 844 struct sh_dmae_chan *sh_chan = data;
817 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 845 u32 chcr;
846
847 spin_lock(&sh_chan->desc_lock);
848
849 chcr = sh_dmae_readl(sh_chan, CHCR);
818 850
819 if (chcr & CHCR_TE) { 851 if (chcr & CHCR_TE) {
820 /* DMA stop */ 852 /* DMA stop */
@@ -824,10 +856,13 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
824 tasklet_schedule(&sh_chan->tasklet); 856 tasklet_schedule(&sh_chan->tasklet);
825 } 857 }
826 858
859 spin_unlock(&sh_chan->desc_lock);
860
827 return ret; 861 return ret;
828} 862}
829 863
830static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev) 864/* Called from error IRQ or NMI */
865static bool sh_dmae_reset(struct sh_dmae_device *shdev)
831{ 866{
832 unsigned int handled = 0; 867 unsigned int handled = 0;
833 int i; 868 int i;
@@ -839,22 +874,32 @@ static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev)
839 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 874 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
840 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 875 struct sh_dmae_chan *sh_chan = shdev->chan[i];
841 struct sh_desc *desc; 876 struct sh_desc *desc;
877 LIST_HEAD(dl);
842 878
843 if (!sh_chan) 879 if (!sh_chan)
844 continue; 880 continue;
845 881
882 spin_lock(&sh_chan->desc_lock);
883
846 /* Stop the channel */ 884 /* Stop the channel */
847 dmae_halt(sh_chan); 885 dmae_halt(sh_chan);
848 886
887 list_splice_init(&sh_chan->ld_queue, &dl);
888
889 spin_unlock(&sh_chan->desc_lock);
890
849 /* Complete all */ 891 /* Complete all */
850 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 892 list_for_each_entry(desc, &dl, node) {
851 struct dma_async_tx_descriptor *tx = &desc->async_tx; 893 struct dma_async_tx_descriptor *tx = &desc->async_tx;
852 desc->mark = DESC_IDLE; 894 desc->mark = DESC_IDLE;
853 if (tx->callback) 895 if (tx->callback)
854 tx->callback(tx->callback_param); 896 tx->callback(tx->callback_param);
855 } 897 }
856 898
857 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); 899 spin_lock(&sh_chan->desc_lock);
900 list_splice(&dl, &sh_chan->ld_free);
901 spin_unlock(&sh_chan->desc_lock);
902
858 handled++; 903 handled++;
859 } 904 }
860 905
@@ -865,7 +910,13 @@ static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev)
865 910
866static irqreturn_t sh_dmae_err(int irq, void *data) 911static irqreturn_t sh_dmae_err(int irq, void *data)
867{ 912{
868 return IRQ_RETVAL(sh_dmae_reset(data)); 913 struct sh_dmae_device *shdev = data;
914
915 if (!(dmaor_read(shdev) & DMAOR_AE))
916 return IRQ_NONE;
917
918 sh_dmae_reset(data);
919 return IRQ_HANDLED;
869} 920}
870 921
871static void dmae_do_tasklet(unsigned long data) 922static void dmae_do_tasklet(unsigned long data)
@@ -897,17 +948,11 @@ static void dmae_do_tasklet(unsigned long data)
897 948
898static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 949static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
899{ 950{
900 unsigned int handled;
901
902 /* Fast path out if NMIF is not asserted for this controller */ 951 /* Fast path out if NMIF is not asserted for this controller */
903 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 952 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
904 return false; 953 return false;
905 954
906 handled = sh_dmae_reset(shdev); 955 return sh_dmae_reset(shdev);
907 if (handled)
908 return true;
909
910 return false;
911} 956}
912 957
913static int sh_dmae_nmi_handler(struct notifier_block *self, 958static int sh_dmae_nmi_handler(struct notifier_block *self,
@@ -977,9 +1022,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
977 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 1022 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
978 (unsigned long)new_sh_chan); 1023 (unsigned long)new_sh_chan);
979 1024
980 /* Init the channel */
981 dmae_init(new_sh_chan);
982
983 spin_lock_init(&new_sh_chan->desc_lock); 1025 spin_lock_init(&new_sh_chan->desc_lock);
984 1026
985 /* Init descripter manage list */ 1027 /* Init descripter manage list */
@@ -1040,9 +1082,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1040 struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 1082 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
1041 unsigned long irqflags = IRQF_DISABLED, 1083 unsigned long irqflags = IRQF_DISABLED,
1042 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1084 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
1043 unsigned long flags;
1044 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1085 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
1045 int err, i, irq_cnt = 0, irqres = 0; 1086 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
1046 struct sh_dmae_device *shdev; 1087 struct sh_dmae_device *shdev;
1047 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1088 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
1048 1089
@@ -1051,7 +1092,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1051 return -ENODEV; 1092 return -ENODEV;
1052 1093
1053 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1094 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054 /* DMARS area is optional, if absent, this controller cannot do slave DMA */ 1095 /* DMARS area is optional */
1055 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1096 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1056 /* 1097 /*
1057 * IRQ resources: 1098 * IRQ resources:
@@ -1106,11 +1147,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1106 pm_runtime_enable(&pdev->dev); 1147 pm_runtime_enable(&pdev->dev);
1107 pm_runtime_get_sync(&pdev->dev); 1148 pm_runtime_get_sync(&pdev->dev);
1108 1149
1109 spin_lock_irqsave(&sh_dmae_lock, flags); 1150 spin_lock_irq(&sh_dmae_lock);
1110 list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 1151 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1111 spin_unlock_irqrestore(&sh_dmae_lock, flags); 1152 spin_unlock_irq(&sh_dmae_lock);
1112 1153
1113 /* reset dma controller */ 1154 /* reset dma controller - only needed as a test */
1114 err = sh_dmae_rst(shdev); 1155 err = sh_dmae_rst(shdev);
1115 if (err) 1156 if (err)
1116 goto rst_err; 1157 goto rst_err;
@@ -1118,7 +1159,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1118 INIT_LIST_HEAD(&shdev->common.channels); 1159 INIT_LIST_HEAD(&shdev->common.channels);
1119 1160
1120 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1161 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1121 if (dmars) 1162 if (pdata->slave && pdata->slave_num)
1122 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1163 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1123 1164
1124 shdev->common.device_alloc_chan_resources 1165 shdev->common.device_alloc_chan_resources
@@ -1167,8 +1208,13 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1167 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1208 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1168 /* Special case - all multiplexed */ 1209 /* Special case - all multiplexed */
1169 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1210 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1170 chan_irq[irq_cnt] = chanirq_res->start; 1211 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1171 chan_flag[irq_cnt] = IRQF_SHARED; 1212 chan_irq[irq_cnt] = chanirq_res->start;
1213 chan_flag[irq_cnt] = IRQF_SHARED;
1214 } else {
1215 irq_cap = 1;
1216 break;
1217 }
1172 } 1218 }
1173 } else { 1219 } else {
1174 do { 1220 do {
@@ -1182,22 +1228,32 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1182 "Found IRQ %d for channel %d\n", 1228 "Found IRQ %d for channel %d\n",
1183 i, irq_cnt); 1229 i, irq_cnt);
1184 chan_irq[irq_cnt++] = i; 1230 chan_irq[irq_cnt++] = i;
1231
1232 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1233 break;
1234 }
1235
1236 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1237 irq_cap = 1;
1238 break;
1185 } 1239 }
1186 chanirq_res = platform_get_resource(pdev, 1240 chanirq_res = platform_get_resource(pdev,
1187 IORESOURCE_IRQ, ++irqres); 1241 IORESOURCE_IRQ, ++irqres);
1188 } while (irq_cnt < pdata->channel_num && chanirq_res); 1242 } while (irq_cnt < pdata->channel_num && chanirq_res);
1189 } 1243 }
1190 1244
1191 if (irq_cnt < pdata->channel_num)
1192 goto eirqres;
1193
1194 /* Create DMA Channel */ 1245 /* Create DMA Channel */
1195 for (i = 0; i < pdata->channel_num; i++) { 1246 for (i = 0; i < irq_cnt; i++) {
1196 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1247 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1197 if (err) 1248 if (err)
1198 goto chan_probe_err; 1249 goto chan_probe_err;
1199 } 1250 }
1200 1251
1252 if (irq_cap)
1253 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1254 "channels when a maximum of %d are supported.\n",
1255 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1256
1201 pm_runtime_put(&pdev->dev); 1257 pm_runtime_put(&pdev->dev);
1202 1258
1203 platform_set_drvdata(pdev, shdev); 1259 platform_set_drvdata(pdev, shdev);
@@ -1207,21 +1263,24 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1207 1263
1208chan_probe_err: 1264chan_probe_err:
1209 sh_dmae_chan_remove(shdev); 1265 sh_dmae_chan_remove(shdev);
1210eirqres: 1266
1211#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1267#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1212 free_irq(errirq, shdev); 1268 free_irq(errirq, shdev);
1213eirq_err: 1269eirq_err:
1214#endif 1270#endif
1215rst_err: 1271rst_err:
1216 spin_lock_irqsave(&sh_dmae_lock, flags); 1272 spin_lock_irq(&sh_dmae_lock);
1217 list_del_rcu(&shdev->node); 1273 list_del_rcu(&shdev->node);
1218 spin_unlock_irqrestore(&sh_dmae_lock, flags); 1274 spin_unlock_irq(&sh_dmae_lock);
1219 1275
1220 pm_runtime_put(&pdev->dev); 1276 pm_runtime_put(&pdev->dev);
1277 pm_runtime_disable(&pdev->dev);
1278
1221 if (dmars) 1279 if (dmars)
1222 iounmap(shdev->dmars); 1280 iounmap(shdev->dmars);
1223emapdmars: 1281emapdmars:
1224 iounmap(shdev->chan_reg); 1282 iounmap(shdev->chan_reg);
1283 synchronize_rcu();
1225emapchan: 1284emapchan:
1226 kfree(shdev); 1285 kfree(shdev);
1227ealloc: 1286ealloc:
@@ -1237,7 +1296,6 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1237{ 1296{
1238 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1297 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1239 struct resource *res; 1298 struct resource *res;
1240 unsigned long flags;
1241 int errirq = platform_get_irq(pdev, 0); 1299 int errirq = platform_get_irq(pdev, 0);
1242 1300
1243 dma_async_device_unregister(&shdev->common); 1301 dma_async_device_unregister(&shdev->common);
@@ -1245,9 +1303,9 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1245 if (errirq > 0) 1303 if (errirq > 0)
1246 free_irq(errirq, shdev); 1304 free_irq(errirq, shdev);
1247 1305
1248 spin_lock_irqsave(&sh_dmae_lock, flags); 1306 spin_lock_irq(&sh_dmae_lock);
1249 list_del_rcu(&shdev->node); 1307 list_del_rcu(&shdev->node);
1250 spin_unlock_irqrestore(&sh_dmae_lock, flags); 1308 spin_unlock_irq(&sh_dmae_lock);
1251 1309
1252 /* channel data remove */ 1310 /* channel data remove */
1253 sh_dmae_chan_remove(shdev); 1311 sh_dmae_chan_remove(shdev);
@@ -1258,6 +1316,7 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1258 iounmap(shdev->dmars); 1316 iounmap(shdev->dmars);
1259 iounmap(shdev->chan_reg); 1317 iounmap(shdev->chan_reg);
1260 1318
1319 synchronize_rcu();
1261 kfree(shdev); 1320 kfree(shdev);
1262 1321
1263 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1276,12 +1335,78 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
1276 sh_dmae_ctl_stop(shdev); 1335 sh_dmae_ctl_stop(shdev);
1277} 1336}
1278 1337
1338static int sh_dmae_runtime_suspend(struct device *dev)
1339{
1340 return 0;
1341}
1342
1343static int sh_dmae_runtime_resume(struct device *dev)
1344{
1345 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1346
1347 return sh_dmae_rst(shdev);
1348}
1349
1350#ifdef CONFIG_PM
1351static int sh_dmae_suspend(struct device *dev)
1352{
1353 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1354 int i;
1355
1356 for (i = 0; i < shdev->pdata->channel_num; i++) {
1357 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1358 if (sh_chan->descs_allocated)
1359 sh_chan->pm_error = pm_runtime_put_sync(dev);
1360 }
1361
1362 return 0;
1363}
1364
1365static int sh_dmae_resume(struct device *dev)
1366{
1367 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1368 int i;
1369
1370 for (i = 0; i < shdev->pdata->channel_num; i++) {
1371 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1372 struct sh_dmae_slave *param = sh_chan->common.private;
1373
1374 if (!sh_chan->descs_allocated)
1375 continue;
1376
1377 if (!sh_chan->pm_error)
1378 pm_runtime_get_sync(dev);
1379
1380 if (param) {
1381 const struct sh_dmae_slave_config *cfg = param->config;
1382 dmae_set_dmars(sh_chan, cfg->mid_rid);
1383 dmae_set_chcr(sh_chan, cfg->chcr);
1384 } else {
1385 dmae_init(sh_chan);
1386 }
1387 }
1388
1389 return 0;
1390}
1391#else
1392#define sh_dmae_suspend NULL
1393#define sh_dmae_resume NULL
1394#endif
1395
1396const struct dev_pm_ops sh_dmae_pm = {
1397 .suspend = sh_dmae_suspend,
1398 .resume = sh_dmae_resume,
1399 .runtime_suspend = sh_dmae_runtime_suspend,
1400 .runtime_resume = sh_dmae_runtime_resume,
1401};
1402
1279static struct platform_driver sh_dmae_driver = { 1403static struct platform_driver sh_dmae_driver = {
1280 .remove = __exit_p(sh_dmae_remove), 1404 .remove = __exit_p(sh_dmae_remove),
1281 .shutdown = sh_dmae_shutdown, 1405 .shutdown = sh_dmae_shutdown,
1282 .driver = { 1406 .driver = {
1283 .owner = THIS_MODULE, 1407 .owner = THIS_MODULE,
1284 .name = "sh-dma-engine", 1408 .name = "sh-dma-engine",
1409 .pm = &sh_dmae_pm,
1285 }, 1410 },
1286}; 1411};
1287 1412
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 52e4fb173805..5ae9fc512180 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#define SH_DMAC_MAX_CHANNELS 6 20#define SH_DMAC_MAX_CHANNELS 20
21#define SH_DMA_SLAVE_NUMBER 256 21#define SH_DMA_SLAVE_NUMBER 256
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 23
@@ -37,6 +37,7 @@ struct sh_dmae_chan {
37 int id; /* Raw id of this channel */ 37 int id; /* Raw id of this channel */
38 u32 __iomem *base; 38 u32 __iomem *base;
39 char dev_id[16]; /* unique name per DMAC of channel */ 39 char dev_id[16]; /* unique name per DMAC of channel */
40 int pm_error;
40}; 41};
41 42
42struct sh_dmae_device { 43struct sh_dmae_device {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 5d054bb908e5..8f222d4db7de 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -90,7 +90,7 @@ struct d40_lli_pool {
90 * @lli_log: Same as above but for logical channels. 90 * @lli_log: Same as above but for logical channels.
91 * @lli_pool: The pool with two entries pre-allocated. 91 * @lli_pool: The pool with two entries pre-allocated.
92 * @lli_len: Number of llis of current descriptor. 92 * @lli_len: Number of llis of current descriptor.
93 * @lli_current: Number of transfered llis. 93 * @lli_current: Number of transferred llis.
94 * @lcla_alloc: Number of LCLA entries allocated. 94 * @lcla_alloc: Number of LCLA entries allocated.
95 * @txd: DMA engine struct. Used for among other things for communication 95 * @txd: DMA engine struct. Used for among other things for communication
96 * during a transfer. 96 * during a transfer.
@@ -1214,7 +1214,7 @@ static void dma_tasklet(unsigned long data)
1214 return; 1214 return;
1215 1215
1216 err: 1216 err:
1217 /* Rescue manouver if receiving double interrupts */ 1217 /* Rescue manoeuvre if receiving double interrupts */
1218 if (d40c->pending_tx > 0) 1218 if (d40c->pending_tx > 0)
1219 d40c->pending_tx--; 1219 d40c->pending_tx--;
1220 spin_unlock_irqrestore(&d40c->lock, flags); 1220 spin_unlock_irqrestore(&d40c->lock, flags);
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index d2c75feff7df..f69f90a61873 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -27,7 +27,6 @@
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/mfd/core.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
32 31
33#include <linux/timb_dma.h> 32#include <linux/timb_dma.h>
@@ -685,7 +684,7 @@ static irqreturn_t td_irq(int irq, void *devid)
685 684
686static int __devinit td_probe(struct platform_device *pdev) 685static int __devinit td_probe(struct platform_device *pdev)
687{ 686{
688 struct timb_dma_platform_data *pdata = mfd_get_data(pdev); 687 struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
689 struct timb_dma *td; 688 struct timb_dma *td;
690 struct resource *iomem; 689 struct resource *iomem;
691 int irq; 690 int irq;