aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig5
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/imx-dma.c9
-rw-r--r--drivers/dma/ioat/dma.c16
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/dma_v2.c12
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c49
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/pl330.c25
-rw-r--r--drivers/dma/ste_dma40.c323
-rw-r--r--drivers/dma/ste_dma40_ll.h2
15 files changed, 308 insertions, 176 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cf9da362d64f..ef378b5b17e4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,11 +91,10 @@ config DW_DMAC
91 91
92config AT_HDMAC 92config AT_HDMAC
93 tristate "Atmel AHB DMA support" 93 tristate "Atmel AHB DMA support"
94 depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 94 depends on ARCH_AT91
95 select DMA_ENGINE 95 select DMA_ENGINE
96 help 96 help
97 Support the Atmel AHB DMA controller. This can be integrated in 97 Support the Atmel AHB DMA controller.
98 chips such as the Atmel AT91SAM9RL.
99 98
100config FSL_DMA 99config FSL_DMA
101 tristate "Freescale Elo and Elo Plus DMA support" 100 tristate "Freescale Elo and Elo Plus DMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c301a8ec31aa..3d704abd7912 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1429 * signal 1429 * signal
1430 */ 1430 */
1431 release_phy_channel(plchan); 1431 release_phy_channel(plchan);
1432 plchan->phychan_hold = 0;
1432 } 1433 }
1433 /* Dequeue jobs and free LLIs */ 1434 /* Dequeue jobs and free LLIs */
1434 if (plchan->at) { 1435 if (plchan->at) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7aa58d204892..445fdf811695 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
221 221
222 vdbg_dump_regs(atchan); 222 vdbg_dump_regs(atchan);
223 223
224 /* clear any pending interrupt */
225 while (dma_readl(atdma, EBCISR))
226 cpu_relax();
227
228 channel_writel(atchan, SADDR, 0); 224 channel_writel(atchan, SADDR, 0);
229 channel_writel(atchan, DADDR, 0); 225 channel_writel(atchan, DADDR, 0);
230 channel_writel(atchan, CTRLA, 0); 226 channel_writel(atchan, CTRLA, 0);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc31b365..2397f6f451b1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
332} 332}
333EXPORT_SYMBOL(dma_find_channel); 333EXPORT_SYMBOL(dma_find_channel);
334 334
335/*
336 * net_dma_find_channel - find a channel for net_dma
337 * net_dma has alignment requirements
338 */
339struct dma_chan *net_dma_find_channel(void)
340{
341 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
342 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
343 return NULL;
344
345 return chan;
346}
347EXPORT_SYMBOL(net_dma_find_channel);
348
335/** 349/**
336 * dma_issue_pending_all - flush all pending operations across all channels 350 * dma_issue_pending_all - flush all pending operations across all channels
337 */ 351 */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a45b5d2a5987..bb787d8e1529 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data)
571 if (desc->desc.callback) 571 if (desc->desc.callback)
572 desc->desc.callback(desc->desc.callback_param); 572 desc->desc.callback(desc->desc.callback_param);
573 573
574 dma_cookie_complete(&desc->desc); 574 /* If we are dealing with a cyclic descriptor keep it on ld_active
575 575 * and dont mark the descripor as complete.
576 /* If we are dealing with a cyclic descriptor keep it on ld_active */ 576 * Only in non-cyclic cases it would be marked as complete
577 */
577 if (imxdma_chan_is_doing_cyclic(imxdmac)) 578 if (imxdma_chan_is_doing_cyclic(imxdmac))
578 goto out; 579 goto out;
580 else
581 dma_cookie_complete(&desc->desc);
579 582
580 /* Free 2D slot if it was an interleaved transfer */ 583 /* Free 2D slot if it was an interleaved transfer */
581 if (imxdmac->enabled_2d) { 584 if (imxdmac->enabled_2d) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d80e0e9..73b2b65cb1de 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
546 PCI_DMA_TODEVICE, flags, 0); 546 PCI_DMA_TODEVICE, flags, 0);
547} 547}
548 548
549unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) 549dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
550{ 550{
551 unsigned long phys_complete; 551 dma_addr_t phys_complete;
552 u64 completion; 552 u64 completion;
553 553
554 completion = *chan->completion; 554 completion = *chan->completion;
@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
569} 569}
570 570
571bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 571bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
572 unsigned long *phys_complete) 572 dma_addr_t *phys_complete)
573{ 573{
574 *phys_complete = ioat_get_current_completion(chan); 574 *phys_complete = ioat_get_current_completion(chan);
575 if (*phys_complete == chan->last_completion) 575 if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
580 return true; 580 return true;
581} 581}
582 582
583static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) 583static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
584{ 584{
585 struct ioat_chan_common *chan = &ioat->base; 585 struct ioat_chan_common *chan = &ioat->base;
586 struct list_head *_desc, *n; 586 struct list_head *_desc, *n;
587 struct dma_async_tx_descriptor *tx; 587 struct dma_async_tx_descriptor *tx;
588 588
589 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 589 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
590 __func__, phys_complete); 590 __func__, (unsigned long long) phys_complete);
591 list_for_each_safe(_desc, n, &ioat->used_desc) { 591 list_for_each_safe(_desc, n, &ioat->used_desc) {
592 struct ioat_desc_sw *desc; 592 struct ioat_desc_sw *desc;
593 593
@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
652static void ioat1_cleanup(struct ioat_dma_chan *ioat) 652static void ioat1_cleanup(struct ioat_dma_chan *ioat)
653{ 653{
654 struct ioat_chan_common *chan = &ioat->base; 654 struct ioat_chan_common *chan = &ioat->base;
655 unsigned long phys_complete; 655 dma_addr_t phys_complete;
656 656
657 prefetch(chan->completion); 657 prefetch(chan->completion);
658 658
@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
699 spin_unlock_bh(&ioat->desc_lock); 699 spin_unlock_bh(&ioat->desc_lock);
700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
701 unsigned long phys_complete; 701 dma_addr_t phys_complete;
702 702
703 spin_lock_bh(&ioat->desc_lock); 703 spin_lock_bh(&ioat->desc_lock);
704 /* if we haven't made progress and we have already 704 /* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bccd974..5e8fe01ba69d 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
88struct ioat_chan_common { 88struct ioat_chan_common {
89 struct dma_chan common; 89 struct dma_chan common;
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 dma_addr_t last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 unsigned long state; 93 unsigned long state;
94 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
310void __devexit ioat_dma_remove(struct ioatdma_device *device); 310void __devexit ioat_dma_remove(struct ioatdma_device *device);
311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, 311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
312 void __iomem *iobase); 312 void __iomem *iobase);
313unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 313dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
314void ioat_init_channel(struct ioatdma_device *device, 314void ioat_init_channel(struct ioatdma_device *device,
315 struct ioat_chan_common *chan, int idx); 315 struct ioat_chan_common *chan, int idx);
316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
319 size_t len, struct ioat_dma_descriptor *hw); 319 size_t len, struct ioat_dma_descriptor *hw);
320bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 320bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
321 unsigned long *phys_complete); 321 dma_addr_t *phys_complete);
322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
323void ioat_kobject_del(struct ioatdma_device *device); 323void ioat_kobject_del(struct ioatdma_device *device);
324extern const struct sysfs_ops ioat_sysfs_ops; 324extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110ff3d96..86895760b598 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
128 spin_unlock_bh(&ioat->prep_lock); 128 spin_unlock_bh(&ioat->prep_lock);
129} 129}
130 130
131static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
132{ 132{
133 struct ioat_chan_common *chan = &ioat->base; 133 struct ioat_chan_common *chan = &ioat->base;
134 struct dma_async_tx_descriptor *tx; 134 struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
179static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 179static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180{ 180{
181 struct ioat_chan_common *chan = &ioat->base; 181 struct ioat_chan_common *chan = &ioat->base;
182 unsigned long phys_complete; 182 dma_addr_t phys_complete;
183 183
184 spin_lock_bh(&chan->cleanup_lock); 184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete)) 185 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 unsigned long phys_complete; 263 dma_addr_t phys_complete;
264 264
265 ioat2_quiesce(chan, 0); 265 ioat2_quiesce(chan, 0);
266 if (ioat_cleanup_preamble(chan, &phys_complete)) 266 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
275 struct ioat_chan_common *chan = &ioat->base; 275 struct ioat_chan_common *chan = &ioat->base;
276 276
277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
278 unsigned long phys_complete; 278 dma_addr_t phys_complete;
279 u64 status; 279 u64 status;
280 280
281 status = ioat_chansts(chan); 281 status = ioat_chansts(chan);
@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
572 */ 572 */
573 struct ioat_chan_common *chan = &ioat->base; 573 struct ioat_chan_common *chan = &ioat->base;
574 struct dma_chan *c = &chan->common; 574 struct dma_chan *c = &chan->common;
575 const u16 curr_size = ioat2_ring_size(ioat); 575 const u32 curr_size = ioat2_ring_size(ioat);
576 const u16 active = ioat2_ring_active(ioat); 576 const u16 active = ioat2_ring_active(ioat);
577 const u16 new_size = 1 << order; 577 const u32 new_size = 1 << order;
578 struct ioat_ring_ent **ring; 578 struct ioat_ring_ent **ring;
579 u16 i; 579 u16 i;
580 580
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b2b8d8..be2a55b95c23 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
74 return container_of(chan, struct ioat2_dma_chan, base); 74 return container_of(chan, struct ioat2_dma_chan, base);
75} 75}
76 76
77static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) 77static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
78{ 78{
79 return 1 << ioat->alloc_order; 79 return 1 << ioat->alloc_order;
80} 80}
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); 91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
92} 92}
93 93
94static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 94static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
95{ 95{
96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); 96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
97} 97}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c0e405..f7f1dc62c15c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
257 * The difference from the dma_v2.c __cleanup() is that this routine 257 * The difference from the dma_v2.c __cleanup() is that this routine
258 * handles extended descriptors and dma-unmapping raid operations. 258 * handles extended descriptors and dma-unmapping raid operations.
259 */ 259 */
260static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 260static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 struct ioat_ring_ent *desc; 263 struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
314static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 314static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
315{ 315{
316 struct ioat_chan_common *chan = &ioat->base; 316 struct ioat_chan_common *chan = &ioat->base;
317 unsigned long phys_complete; 317 dma_addr_t phys_complete;
318 318
319 spin_lock_bh(&chan->cleanup_lock); 319 spin_lock_bh(&chan->cleanup_lock);
320 if (ioat_cleanup_preamble(chan, &phys_complete)) 320 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
334{ 334{
335 struct ioat_chan_common *chan = &ioat->base; 335 struct ioat_chan_common *chan = &ioat->base;
336 unsigned long phys_complete; 336 dma_addr_t phys_complete;
337 337
338 ioat2_quiesce(chan, 0); 338 ioat2_quiesce(chan, 0);
339 if (ioat_cleanup_preamble(chan, &phys_complete)) 339 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
348 struct ioat_chan_common *chan = &ioat->base; 348 struct ioat_chan_common *chan = &ioat->base;
349 349
350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
351 unsigned long phys_complete; 351 dma_addr_t phys_complete;
352 u64 status; 352 u64 status;
353 353
354 status = ioat_chansts(chan); 354 status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1150} 1150}
1151 1151
1152static bool is_jf_ioat(struct pci_dev *pdev)
1153{
1154 switch (pdev->device) {
1155 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
1156 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
1157 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
1158 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
1159 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
1160 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
1161 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
1162 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
1163 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
1164 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
1165 return true;
1166 default:
1167 return false;
1168 }
1169}
1170
1171static bool is_snb_ioat(struct pci_dev *pdev)
1172{
1173 switch (pdev->device) {
1174 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
1175 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
1176 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
1177 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
1178 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
1179 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
1180 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
1181 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
1182 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
1183 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
1184 return true;
1185 default:
1186 return false;
1187 }
1188}
1189
1152int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1190int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1153{ 1191{
1154 struct pci_dev *pdev = device->pdev; 1192 struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1169 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1207 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1170 dma->device_free_chan_resources = ioat2_free_chan_resources; 1208 dma->device_free_chan_resources = ioat2_free_chan_resources;
1171 1209
1210 if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
1211 dma->copy_align = 6;
1212
1172 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1213 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1173 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1214 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1174 1215
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2c066a..79e3eba29702 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; 1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1253 /* address conversion buffers (dma_map / page_address) */ 1253 /* address conversion buffers (dma_map / page_address) */
1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; 1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; 1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1256 dma_addr_t pq_dest[2]; 1256 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1257 1257
1258 int i; 1258 int i;
1259 struct dma_async_tx_descriptor *tx; 1259 struct dma_async_tx_descriptor *tx;
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c81ef7e10e08..655d4ce6ed0d 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
201 201
202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
203{ 203{
204 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
205
206 mxs_dma_enable_chan(mxs_chan);
207
208 return dma_cookie_assign(tx); 204 return dma_cookie_assign(tx);
209} 205}
210 206
@@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
558 554
559static void mxs_dma_issue_pending(struct dma_chan *chan) 555static void mxs_dma_issue_pending(struct dma_chan *chan)
560{ 556{
561 /* 557 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
562 * Nothing to do. We only have a single descriptor. 558
563 */ 559 mxs_dma_enable_chan(mxs_chan);
564} 560}
565 561
566static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 562static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 282caf118be8..2ee6e23930ad 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list)
2225{ 2225{
2226 struct dma_pl330_dmac *pdmac; 2226 struct dma_pl330_dmac *pdmac;
2227 struct dma_pl330_desc *desc; 2227 struct dma_pl330_desc *desc;
2228 struct dma_pl330_chan *pch; 2228 struct dma_pl330_chan *pch = NULL;
2229 unsigned long flags; 2229 unsigned long flags;
2230 2230
2231 if (list_empty(list))
2232 return;
2233
2234 /* Finish off the work list */ 2231 /* Finish off the work list */
2235 list_for_each_entry(desc, list, node) { 2232 list_for_each_entry(desc, list, node) {
2236 dma_async_tx_callback callback; 2233 dma_async_tx_callback callback;
@@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list)
2247 desc->pchan = NULL; 2244 desc->pchan = NULL;
2248 } 2245 }
2249 2246
2247 /* pch will be unset if list was empty */
2248 if (!pch)
2249 return;
2250
2250 pdmac = pch->dmac; 2251 pdmac = pch->dmac;
2251 2252
2252 spin_lock_irqsave(&pdmac->pool_lock, flags); 2253 spin_lock_irqsave(&pdmac->pool_lock, flags);
@@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list)
2257static inline void handle_cyclic_desc_list(struct list_head *list) 2258static inline void handle_cyclic_desc_list(struct list_head *list)
2258{ 2259{
2259 struct dma_pl330_desc *desc; 2260 struct dma_pl330_desc *desc;
2260 struct dma_pl330_chan *pch; 2261 struct dma_pl330_chan *pch = NULL;
2261 unsigned long flags; 2262 unsigned long flags;
2262 2263
2263 if (list_empty(list))
2264 return;
2265
2266 list_for_each_entry(desc, list, node) { 2264 list_for_each_entry(desc, list, node) {
2267 dma_async_tx_callback callback; 2265 dma_async_tx_callback callback;
2268 2266
@@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
2274 callback(desc->txd.callback_param); 2272 callback(desc->txd.callback_param);
2275 } 2273 }
2276 2274
2275 /* pch will be unset if list was empty */
2276 if (!pch)
2277 return;
2278
2277 spin_lock_irqsave(&pch->lock, flags); 2279 spin_lock_irqsave(&pch->lock, flags);
2278 list_splice_tail_init(list, &pch->work_list); 2280 list_splice_tail_init(list, &pch->work_list);
2279 spin_unlock_irqrestore(&pch->lock, flags); 2281 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2926 INIT_LIST_HEAD(&pd->channels); 2928 INIT_LIST_HEAD(&pd->channels);
2927 2929
2928 /* Initialize channel parameters */ 2930 /* Initialize channel parameters */
2929 num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri, 2931 if (pdat)
2930 (u8)pi->pcfg.num_chan); 2932 num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
2933 else
2934 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2935
2931 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 2936 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2932 2937
2933 for (i = 0; i < num_chan; i++) { 2938 for (i = 0; i < num_chan; i++) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4bfa8d..2ed1ac3513f3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
18#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/amba/bus.h> 20#include <linux/amba/bus.h>
21#include <linux/regulator/consumer.h>
21 22
22#include <plat/ste_dma40.h> 23#include <plat/ste_dma40.h>
23 24
@@ -69,6 +70,22 @@ enum d40_command {
69}; 70};
70 71
71/* 72/*
73 * enum d40_events - The different Event Enables for the event lines.
74 *
75 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
76 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
77 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
78 * @D40_ROUND_EVENTLINE: Status check for event line.
79 */
80
81enum d40_events {
82 D40_DEACTIVATE_EVENTLINE = 0,
83 D40_ACTIVATE_EVENTLINE = 1,
84 D40_SUSPEND_REQ_EVENTLINE = 2,
85 D40_ROUND_EVENTLINE = 3
86};
87
88/*
72 * These are the registers that has to be saved and later restored 89 * These are the registers that has to be saved and later restored
73 * when the DMA hw is powered off. 90 * when the DMA hw is powered off.
74 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 91 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
870} 887}
871#endif 888#endif
872 889
873static int d40_channel_execute_command(struct d40_chan *d40c, 890static int __d40_execute_command_phy(struct d40_chan *d40c,
874 enum d40_command command) 891 enum d40_command command)
875{ 892{
876 u32 status; 893 u32 status;
877 int i; 894 int i;
@@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
880 unsigned long flags; 897 unsigned long flags;
881 u32 wmask; 898 u32 wmask;
882 899
900 if (command == D40_DMA_STOP) {
901 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
902 if (ret)
903 return ret;
904 }
905
883 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 906 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
884 907
885 if (d40c->phy_chan->num % 2 == 0) 908 if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
973 } 996 }
974 997
975 d40c->pending_tx = 0; 998 d40c->pending_tx = 0;
976 d40c->busy = false;
977} 999}
978 1000
979static void __d40_config_set_event(struct d40_chan *d40c, bool enable, 1001static void __d40_config_set_event(struct d40_chan *d40c,
980 u32 event, int reg) 1002 enum d40_events event_type, u32 event,
1003 int reg)
981{ 1004{
982 void __iomem *addr = chan_base(d40c) + reg; 1005 void __iomem *addr = chan_base(d40c) + reg;
983 int tries; 1006 int tries;
1007 u32 status;
1008
1009 switch (event_type) {
1010
1011 case D40_DEACTIVATE_EVENTLINE:
984 1012
985 if (!enable) {
986 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1013 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
987 | ~D40_EVENTLINE_MASK(event), addr); 1014 | ~D40_EVENTLINE_MASK(event), addr);
988 return; 1015 break;
989 } 1016
1017 case D40_SUSPEND_REQ_EVENTLINE:
1018 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1019 D40_EVENTLINE_POS(event);
1020
1021 if (status == D40_DEACTIVATE_EVENTLINE ||
1022 status == D40_SUSPEND_REQ_EVENTLINE)
1023 break;
990 1024
1025 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1026 | ~D40_EVENTLINE_MASK(event), addr);
1027
1028 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1029
1030 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1031 D40_EVENTLINE_POS(event);
1032
1033 cpu_relax();
1034 /*
1035 * Reduce the number of bus accesses while
1036 * waiting for the DMA to suspend.
1037 */
1038 udelay(3);
1039
1040 if (status == D40_DEACTIVATE_EVENTLINE)
1041 break;
1042 }
1043
1044 if (tries == D40_SUSPEND_MAX_IT) {
1045 chan_err(d40c,
1046 "unable to stop the event_line chl %d (log: %d)"
1047 "status %x\n", d40c->phy_chan->num,
1048 d40c->log_num, status);
1049 }
1050 break;
1051
1052 case D40_ACTIVATE_EVENTLINE:
991 /* 1053 /*
992 * The hardware sometimes doesn't register the enable when src and dst 1054 * The hardware sometimes doesn't register the enable when src and dst
993 * event lines are active on the same logical channel. Retry to ensure 1055 * event lines are active on the same logical channel. Retry to ensure
994 * it does. Usually only one retry is sufficient. 1056 * it does. Usually only one retry is sufficient.
995 */ 1057 */
996 tries = 100; 1058 tries = 100;
997 while (--tries) { 1059 while (--tries) {
998 writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 1060 writel((D40_ACTIVATE_EVENTLINE <<
999 | ~D40_EVENTLINE_MASK(event), addr); 1061 D40_EVENTLINE_POS(event)) |
1062 ~D40_EVENTLINE_MASK(event), addr);
1000 1063
1001 if (readl(addr) & D40_EVENTLINE_MASK(event)) 1064 if (readl(addr) & D40_EVENTLINE_MASK(event))
1002 break; 1065 break;
1003 } 1066 }
1004 1067
1005 if (tries != 99) 1068 if (tries != 99)
1006 dev_dbg(chan2dev(d40c), 1069 dev_dbg(chan2dev(d40c),
1007 "[%s] workaround enable S%cLNK (%d tries)\n", 1070 "[%s] workaround enable S%cLNK (%d tries)\n",
1008 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 1071 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1009 100 - tries); 1072 100 - tries);
1010 1073
1011 WARN_ON(!tries); 1074 WARN_ON(!tries);
1012} 1075 break;
1013 1076
1014static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 1077 case D40_ROUND_EVENTLINE:
1015{ 1078 BUG();
1016 unsigned long flags; 1079 break;
1017 1080
1018 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 1081 }
1082}
1019 1083
1084static void d40_config_set_event(struct d40_chan *d40c,
1085 enum d40_events event_type)
1086{
1020 /* Enable event line connected to device (or memcpy) */ 1087 /* Enable event line connected to device (or memcpy) */
1021 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 1088 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
1022 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 1089 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
1023 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1090 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1024 1091
1025 __d40_config_set_event(d40c, do_enable, event, 1092 __d40_config_set_event(d40c, event_type, event,
1026 D40_CHAN_REG_SSLNK); 1093 D40_CHAN_REG_SSLNK);
1027 } 1094 }
1028 1095
1029 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 1096 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
1030 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1097 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1031 1098
1032 __d40_config_set_event(d40c, do_enable, event, 1099 __d40_config_set_event(d40c, event_type, event,
1033 D40_CHAN_REG_SDLNK); 1100 D40_CHAN_REG_SDLNK);
1034 } 1101 }
1035
1036 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1037} 1102}
1038 1103
1039static u32 d40_chan_has_events(struct d40_chan *d40c) 1104static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
1047 return val; 1112 return val;
1048} 1113}
1049 1114
1115static int
1116__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1117{
1118 unsigned long flags;
1119 int ret = 0;
1120 u32 active_status;
1121 void __iomem *active_reg;
1122
1123 if (d40c->phy_chan->num % 2 == 0)
1124 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1125 else
1126 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1127
1128
1129 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1130
1131 switch (command) {
1132 case D40_DMA_STOP:
1133 case D40_DMA_SUSPEND_REQ:
1134
1135 active_status = (readl(active_reg) &
1136 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1137 D40_CHAN_POS(d40c->phy_chan->num);
1138
1139 if (active_status == D40_DMA_RUN)
1140 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1141 else
1142 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1143
1144 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1145 ret = __d40_execute_command_phy(d40c, command);
1146
1147 break;
1148
1149 case D40_DMA_RUN:
1150
1151 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1152 ret = __d40_execute_command_phy(d40c, command);
1153 break;
1154
1155 case D40_DMA_SUSPENDED:
1156 BUG();
1157 break;
1158 }
1159
1160 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1161 return ret;
1162}
1163
1164static int d40_channel_execute_command(struct d40_chan *d40c,
1165 enum d40_command command)
1166{
1167 if (chan_is_logical(d40c))
1168 return __d40_execute_command_log(d40c, command);
1169 else
1170 return __d40_execute_command_phy(d40c, command);
1171}
1172
1050static u32 d40_get_prmo(struct d40_chan *d40c) 1173static u32 d40_get_prmo(struct d40_chan *d40c)
1051{ 1174{
1052 static const unsigned int phy_map[] = { 1175 static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
1149 spin_lock_irqsave(&d40c->lock, flags); 1272 spin_lock_irqsave(&d40c->lock, flags);
1150 1273
1151 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1274 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1152 if (res == 0) { 1275
1153 if (chan_is_logical(d40c)) {
1154 d40_config_set_event(d40c, false);
1155 /* Resume the other logical channels if any */
1156 if (d40_chan_has_events(d40c))
1157 res = d40_channel_execute_command(d40c,
1158 D40_DMA_RUN);
1159 }
1160 }
1161 pm_runtime_mark_last_busy(d40c->base->dev); 1276 pm_runtime_mark_last_busy(d40c->base->dev);
1162 pm_runtime_put_autosuspend(d40c->base->dev); 1277 pm_runtime_put_autosuspend(d40c->base->dev);
1163 spin_unlock_irqrestore(&d40c->lock, flags); 1278 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
1174 1289
1175 spin_lock_irqsave(&d40c->lock, flags); 1290 spin_lock_irqsave(&d40c->lock, flags);
1176 pm_runtime_get_sync(d40c->base->dev); 1291 pm_runtime_get_sync(d40c->base->dev);
1177 if (d40c->base->rev == 0)
1178 if (chan_is_logical(d40c)) {
1179 res = d40_channel_execute_command(d40c,
1180 D40_DMA_SUSPEND_REQ);
1181 goto no_suspend;
1182 }
1183 1292
1184 /* If bytes left to transfer or linked tx resume job */ 1293 /* If bytes left to transfer or linked tx resume job */
1185 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1294 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1186
1187 if (chan_is_logical(d40c))
1188 d40_config_set_event(d40c, true);
1189
1190 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1295 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1191 }
1192 1296
1193no_suspend:
1194 pm_runtime_mark_last_busy(d40c->base->dev); 1297 pm_runtime_mark_last_busy(d40c->base->dev);
1195 pm_runtime_put_autosuspend(d40c->base->dev); 1298 pm_runtime_put_autosuspend(d40c->base->dev);
1196 spin_unlock_irqrestore(&d40c->lock, flags); 1299 spin_unlock_irqrestore(&d40c->lock, flags);
1197 return res; 1300 return res;
1198} 1301}
1199 1302
1200static int d40_terminate_all(struct d40_chan *chan)
1201{
1202 unsigned long flags;
1203 int ret = 0;
1204
1205 ret = d40_pause(chan);
1206 if (!ret && chan_is_physical(chan))
1207 ret = d40_channel_execute_command(chan, D40_DMA_STOP);
1208
1209 spin_lock_irqsave(&chan->lock, flags);
1210 d40_term_all(chan);
1211 spin_unlock_irqrestore(&chan->lock, flags);
1212
1213 return ret;
1214}
1215
1216static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 1303static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1217{ 1304{
1218 struct d40_chan *d40c = container_of(tx->chan, 1305 struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1232 1319
1233static int d40_start(struct d40_chan *d40c) 1320static int d40_start(struct d40_chan *d40c)
1234{ 1321{
1235 if (d40c->base->rev == 0) {
1236 int err;
1237
1238 if (chan_is_logical(d40c)) {
1239 err = d40_channel_execute_command(d40c,
1240 D40_DMA_SUSPEND_REQ);
1241 if (err)
1242 return err;
1243 }
1244 }
1245
1246 if (chan_is_logical(d40c))
1247 d40_config_set_event(d40c, true);
1248
1249 return d40_channel_execute_command(d40c, D40_DMA_RUN); 1322 return d40_channel_execute_command(d40c, D40_DMA_RUN);
1250} 1323}
1251 1324
@@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1258 d40d = d40_first_queued(d40c); 1331 d40d = d40_first_queued(d40c);
1259 1332
1260 if (d40d != NULL) { 1333 if (d40d != NULL) {
1261 if (!d40c->busy) 1334 if (!d40c->busy) {
1262 d40c->busy = true; 1335 d40c->busy = true;
1263 1336 pm_runtime_get_sync(d40c->base->dev);
1264 pm_runtime_get_sync(d40c->base->dev); 1337 }
1265 1338
1266 /* Remove from queue */ 1339 /* Remove from queue */
1267 d40_desc_remove(d40d); 1340 d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
1388 1461
1389 return; 1462 return;
1390 1463
1391 err: 1464err:
1392 /* Rescue manoeuvre if receiving double interrupts */ 1465 /* Rescue manouver if receiving double interrupts */
1393 if (d40c->pending_tx > 0) 1466 if (d40c->pending_tx > 0)
1394 d40c->pending_tx--; 1467 d40c->pending_tx--;
1395 spin_unlock_irqrestore(&d40c->lock, flags); 1468 spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
1770 return 0; 1843 return 0;
1771} 1844}
1772 1845
1773
1774static int d40_free_dma(struct d40_chan *d40c) 1846static int d40_free_dma(struct d40_chan *d40c)
1775{ 1847{
1776 1848
@@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c)
1806 } 1878 }
1807 1879
1808 pm_runtime_get_sync(d40c->base->dev); 1880 pm_runtime_get_sync(d40c->base->dev);
1809 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1881 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1810 if (res) { 1882 if (res) {
1811 chan_err(d40c, "suspend failed\n"); 1883 chan_err(d40c, "stop failed\n");
1812 goto out; 1884 goto out;
1813 } 1885 }
1814 1886
1815 if (chan_is_logical(d40c)) { 1887 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
1816 /* Release logical channel, deactivate the event line */
1817 1888
1818 d40_config_set_event(d40c, false); 1889 if (chan_is_logical(d40c))
1819 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1890 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1820 1891 else
1821 /* 1892 d40c->base->lookup_phy_chans[phy->num] = NULL;
1822 * Check if there are more logical allocation
1823 * on this phy channel.
1824 */
1825 if (!d40_alloc_mask_free(phy, is_src, event)) {
1826 /* Resume the other logical channels if any */
1827 if (d40_chan_has_events(d40c)) {
1828 res = d40_channel_execute_command(d40c,
1829 D40_DMA_RUN);
1830 if (res)
1831 chan_err(d40c,
1832 "Executing RUN command\n");
1833 }
1834 goto out;
1835 }
1836 } else {
1837 (void) d40_alloc_mask_free(phy, is_src, 0);
1838 }
1839
1840 /* Release physical channel */
1841 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1842 if (res) {
1843 chan_err(d40c, "Failed to stop channel\n");
1844 goto out;
1845 }
1846 1893
1847 if (d40c->busy) { 1894 if (d40c->busy) {
1848 pm_runtime_mark_last_busy(d40c->base->dev); 1895 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
1852 d40c->busy = false; 1899 d40c->busy = false;
1853 d40c->phy_chan = NULL; 1900 d40c->phy_chan = NULL;
1854 d40c->configured = false; 1901 d40c->configured = false;
1855 d40c->base->lookup_phy_chans[phy->num] = NULL;
1856out: 1902out:
1857 1903
1858 pm_runtime_mark_last_busy(d40c->base->dev); 1904 pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2070 if (sg_next(&sg_src[sg_len - 1]) == sg_src) 2116 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2071 desc->cyclic = true; 2117 desc->cyclic = true;
2072 2118
2073 if (direction != DMA_NONE) { 2119 if (direction != DMA_TRANS_NONE) {
2074 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 2120 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
2075 2121
2076 if (direction == DMA_DEV_TO_MEM) 2122 if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
2371 spin_unlock_irqrestore(&d40c->lock, flags); 2417 spin_unlock_irqrestore(&d40c->lock, flags);
2372} 2418}
2373 2419
2420static void d40_terminate_all(struct dma_chan *chan)
2421{
2422 unsigned long flags;
2423 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2424 int ret;
2425
2426 spin_lock_irqsave(&d40c->lock, flags);
2427
2428 pm_runtime_get_sync(d40c->base->dev);
2429 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2430 if (ret)
2431 chan_err(d40c, "Failed to stop channel\n");
2432
2433 d40_term_all(d40c);
2434 pm_runtime_mark_last_busy(d40c->base->dev);
2435 pm_runtime_put_autosuspend(d40c->base->dev);
2436 if (d40c->busy) {
2437 pm_runtime_mark_last_busy(d40c->base->dev);
2438 pm_runtime_put_autosuspend(d40c->base->dev);
2439 }
2440 d40c->busy = false;
2441
2442 spin_unlock_irqrestore(&d40c->lock, flags);
2443}
2444
2374static int 2445static int
2375dma40_config_to_halfchannel(struct d40_chan *d40c, 2446dma40_config_to_halfchannel(struct d40_chan *d40c,
2376 struct stedma40_half_channel_info *info, 2447 struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2551 2622
2552 switch (cmd) { 2623 switch (cmd) {
2553 case DMA_TERMINATE_ALL: 2624 case DMA_TERMINATE_ALL:
2554 return d40_terminate_all(d40c); 2625 d40_terminate_all(chan);
2626 return 0;
2555 case DMA_PAUSE: 2627 case DMA_PAUSE:
2556 return d40_pause(d40c); 2628 return d40_pause(d40c);
2557 case DMA_RESUME: 2629 case DMA_RESUME:
@@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2908 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2980 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2909 rev, res->start); 2981 rev, res->start);
2910 2982
2983 if (rev < 2) {
2984 d40_err(&pdev->dev, "hardware revision: %d is not supported",
2985 rev);
2986 goto failure;
2987 }
2988
2911 plat_data = pdev->dev.platform_data; 2989 plat_data = pdev->dev.platform_data;
2912 2990
2913 /* Count the number of logical channels in use */ 2991 /* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@ failure:
2998 3076
2999 if (base) { 3077 if (base) {
3000 kfree(base->lcla_pool.alloc_map); 3078 kfree(base->lcla_pool.alloc_map);
3079 kfree(base->reg_val_backup_chan);
3001 kfree(base->lookup_log_chans); 3080 kfree(base->lookup_log_chans);
3002 kfree(base->lookup_phy_chans); 3081 kfree(base->lookup_phy_chans);
3003 kfree(base->phy_res); 3082 kfree(base->phy_res);
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490968a3..51e8e5396e9b 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -62,8 +62,6 @@
62#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) 62#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
63 63
64/* Link register */ 64/* Link register */
65#define D40_DEACTIVATE_EVENTLINE 0x0
66#define D40_ACTIVATE_EVENTLINE 0x1
67#define D40_EVENTLINE_POS(i) (2 * i) 65#define D40_EVENTLINE_POS(i) (2 * i)
68#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) 66#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
69 67