aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt10
-rw-r--r--Documentation/dmaengine/client.txt59
-rw-r--r--Documentation/dmaengine/provider.txt20
-rw-r--r--drivers/dca/dca-core.c3
-rw-r--r--drivers/dma/acpi-dma.c5
-rw-r--r--drivers/dma/at_xdmac.c20
-rw-r--r--drivers/dma/dma-axi-dmac.c8
-rw-r--r--drivers/dma/dmaengine.c178
-rw-r--r--drivers/dma/dw/platform.c7
-rw-r--r--drivers/dma/edma.c10
-rw-r--r--drivers/dma/fsl-edma.c85
-rw-r--r--drivers/dma/hsu/hsu.c17
-rw-r--r--drivers/dma/hsu/hsu.h1
-rw-r--r--drivers/dma/idma64.c22
-rw-r--r--drivers/dma/idma64.h3
-rw-r--r--drivers/dma/img-mdc-dma.c78
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.h34
-rw-r--r--drivers/dma/ioat/registers.h16
-rw-r--r--drivers/dma/omap-dma.c82
-rw-r--r--drivers/dma/pxa_dma.c1
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c669
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/virt-dma.c46
-rw-r--r--drivers/dma/virt-dma.h25
-rw-r--r--include/linux/dca.h5
-rw-r--r--include/linux/dmaengine.h145
-rw-r--r--include/linux/omap-dma.h6
-rw-r--r--include/linux/platform_data/dma-rcar-hpbdma.h103
-rw-r--r--include/linux/platform_data/edma.h7
-rw-r--r--sound/core/pcm_dmaengine.c9
33 files changed, 635 insertions, 1052 deletions
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 040f365954cc..e7780a186a36 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -1,7 +1,13 @@
1* Renesas USB DMA Controller Device Tree bindings 1* Renesas USB DMA Controller Device Tree bindings
2 2
3Required Properties: 3Required Properties:
4- compatible: must contain "renesas,usb-dmac" 4-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
5 Examples with soctypes are:
6 - "renesas,r8a7790-usb-dmac" (R-Car H2)
7 - "renesas,r8a7791-usb-dmac" (R-Car M2-W)
8 - "renesas,r8a7793-usb-dmac" (R-Car M2-N)
9 - "renesas,r8a7794-usb-dmac" (R-Car E2)
10 - "renesas,r8a7795-usb-dmac" (R-Car H3)
5- reg: base address and length of the registers block for the DMAC 11- reg: base address and length of the registers block for the DMAC
6- interrupts: interrupt specifiers for the DMAC, one for each entry in 12- interrupts: interrupt specifiers for the DMAC, one for each entry in
7 interrupt-names. 13 interrupt-names.
@@ -15,7 +21,7 @@ Required Properties:
15Example: R8A7790 (R-Car H2) USB-DMACs 21Example: R8A7790 (R-Car H2) USB-DMACs
16 22
17 usb_dmac0: dma-controller@e65a0000 { 23 usb_dmac0: dma-controller@e65a0000 {
18 compatible = "renesas,usb-dmac"; 24 compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
19 reg = <0 0xe65a0000 0 0x100>; 25 reg = <0 0xe65a0000 0 0x100>;
20 interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH 26 interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
21 0 109 IRQ_TYPE_LEVEL_HIGH>; 27 0 109 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/dmaengine/client.txt b/Documentation/dmaengine/client.txt
index 11fb87ff6cd0..9e33189745f0 100644
--- a/Documentation/dmaengine/client.txt
+++ b/Documentation/dmaengine/client.txt
@@ -22,25 +22,14 @@ The slave DMA usage consists of following steps:
22 Channel allocation is slightly different in the slave DMA context, 22 Channel allocation is slightly different in the slave DMA context,
23 client drivers typically need a channel from a particular DMA 23 client drivers typically need a channel from a particular DMA
24 controller only and even in some cases a specific channel is desired. 24 controller only and even in some cases a specific channel is desired.
25 To request a channel dma_request_channel() API is used. 25 To request a channel dma_request_chan() API is used.
26 26
27 Interface: 27 Interface:
28 struct dma_chan *dma_request_channel(dma_cap_mask_t mask, 28 struct dma_chan *dma_request_chan(struct device *dev, const char *name);
29 dma_filter_fn filter_fn,
30 void *filter_param);
31 where dma_filter_fn is defined as:
32 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
33 29
34 The 'filter_fn' parameter is optional, but highly recommended for 30 Which will find and return the 'name' DMA channel associated with the 'dev'
35 slave and cyclic channels as they typically need to obtain a specific 31 device. The association is done via DT, ACPI or board file based
36 DMA channel. 32 dma_slave_map matching table.
37
38 When the optional 'filter_fn' parameter is NULL, dma_request_channel()
39 simply returns the first channel that satisfies the capability mask.
40
41 Otherwise, the 'filter_fn' routine will be called once for each free
42 channel which has a capability in 'mask'. 'filter_fn' is expected to
43 return 'true' when the desired DMA channel is found.
44 33
45 A channel allocated via this interface is exclusive to the caller, 34 A channel allocated via this interface is exclusive to the caller,
46 until dma_release_channel() is called. 35 until dma_release_channel() is called.
@@ -128,7 +117,7 @@ The slave DMA usage consists of following steps:
128 transaction. 117 transaction.
129 118
130 For cyclic DMA, a callback function may wish to terminate the 119 For cyclic DMA, a callback function may wish to terminate the
131 DMA via dmaengine_terminate_all(). 120 DMA via dmaengine_terminate_async().
132 121
133 Therefore, it is important that DMA engine drivers drop any 122 Therefore, it is important that DMA engine drivers drop any
134 locks before calling the callback function which may cause a 123 locks before calling the callback function which may cause a
@@ -166,12 +155,29 @@ The slave DMA usage consists of following steps:
166 155
167Further APIs: 156Further APIs:
168 157
1691. int dmaengine_terminate_all(struct dma_chan *chan) 1581. int dmaengine_terminate_sync(struct dma_chan *chan)
159 int dmaengine_terminate_async(struct dma_chan *chan)
160 int dmaengine_terminate_all(struct dma_chan *chan) /* DEPRECATED */
170 161
171 This causes all activity for the DMA channel to be stopped, and may 162 This causes all activity for the DMA channel to be stopped, and may
172 discard data in the DMA FIFO which hasn't been fully transferred. 163 discard data in the DMA FIFO which hasn't been fully transferred.
173 No callback functions will be called for any incomplete transfers. 164 No callback functions will be called for any incomplete transfers.
174 165
166 Two variants of this function are available.
167
168 dmaengine_terminate_async() might not wait until the DMA has been fully
169 stopped or until any running complete callbacks have finished. But it is
170 possible to call dmaengine_terminate_async() from atomic context or from
171 within a complete callback. dmaengine_synchronize() must be called before it
172 is safe to free the memory accessed by the DMA transfer or free resources
173 accessed from within the complete callback.
174
175 dmaengine_terminate_sync() will wait for the transfer and any running
176 complete callbacks to finish before it returns. But the function must not be
177 called from atomic context or from within a complete callback.
178
179 dmaengine_terminate_all() is deprecated and should not be used in new code.
180
1752. int dmaengine_pause(struct dma_chan *chan) 1812. int dmaengine_pause(struct dma_chan *chan)
176 182
177 This pauses activity on the DMA channel without data loss. 183 This pauses activity on the DMA channel without data loss.
@@ -197,3 +203,20 @@ Further APIs:
197 a running DMA channel. It is recommended that DMA engine users 203 a running DMA channel. It is recommended that DMA engine users
198 pause or stop (via dmaengine_terminate_all()) the channel before 204 pause or stop (via dmaengine_terminate_all()) the channel before
199 using this API. 205 using this API.
206
2075. void dmaengine_synchronize(struct dma_chan *chan)
208
209 Synchronize the termination of the DMA channel to the current context.
210
211 This function should be used after dmaengine_terminate_async() to synchronize
212 the termination of the DMA channel to the current context. The function will
213 wait for the transfer and any running complete callbacks to finish before it
214 returns.
215
216 If dmaengine_terminate_async() is used to stop the DMA channel this function
217 must be called before it is safe to free memory accessed by previously
218 submitted descriptors or to free any resources accessed within the complete
219 callback of previously submitted descriptors.
220
221 The behavior of this function is undefined if dma_async_issue_pending() has
222 been called between dmaengine_terminate_async() and this function.
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 67d4ce4df109..122b7f4876bb 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -327,8 +327,24 @@ supported.
327 327
328 * device_terminate_all 328 * device_terminate_all
329 - Aborts all the pending and ongoing transfers on the channel 329 - Aborts all the pending and ongoing transfers on the channel
330 - This command should operate synchronously on the channel, 330 - For aborted transfers the complete callback should not be called
331 terminating right away all the channels 331 - Can be called from atomic context or from within a complete
332 callback of a descriptor. Must not sleep. Drivers must be able
333 to handle this correctly.
334 - Termination may be asynchronous. The driver does not have to
335 wait until the currently active transfer has completely stopped.
336 See device_synchronize.
337
338 * device_synchronize
339 - Must synchronize the termination of a channel to the current
340 context.
341 - Must make sure that memory for previously submitted
342 descriptors is no longer accessed by the DMA controller.
343 - Must make sure that all complete callbacks for previously
344 submitted descriptors have finished running and none are
345 scheduled to run.
346 - May sleep.
347
332 348
333Misc notes (stuff that should be documented, but don't really know 349Misc notes (stuff that should be documented, but don't really know
334where to put them) 350where to put them)
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 819dfda88236..7afbb28d6a0f 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -321,7 +321,8 @@ EXPORT_SYMBOL_GPL(dca_get_tag);
321 * @ops - pointer to struct of dca operation function pointers 321 * @ops - pointer to struct of dca operation function pointers
322 * @priv_size - size of extra mem to be added for provider's needs 322 * @priv_size - size of extra mem to be added for provider's needs
323 */ 323 */
324struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size) 324struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
325 int priv_size)
325{ 326{
326 struct dca_provider *dca; 327 struct dca_provider *dca;
327 int alloc_size; 328 int alloc_size;
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 16d0daa058a5..eed6bda01790 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -15,6 +15,7 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/kernel.h>
18#include <linux/list.h> 19#include <linux/list.h>
19#include <linux/mutex.h> 20#include <linux/mutex.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
@@ -72,7 +73,9 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
72 si = (const struct acpi_csrt_shared_info *)&grp[1]; 73 si = (const struct acpi_csrt_shared_info *)&grp[1];
73 74
74 /* Match device by MMIO and IRQ */ 75 /* Match device by MMIO and IRQ */
75 if (si->mmio_base_low != mem || si->gsi_interrupt != irq) 76 if (si->mmio_base_low != lower_32_bits(mem) ||
77 si->mmio_base_high != upper_32_bits(mem) ||
78 si->gsi_interrupt != irq)
76 return 0; 79 return 0;
77 80
78 dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n", 81 dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d4bae5..d0ae4613b87e 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -863,8 +863,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
863 * access. Hopefully we can access DDR through both ports (at least on 863 * access. Hopefully we can access DDR through both ports (at least on
864 * SAMA5D4x), so we can use the same interface for source and dest, 864 * SAMA5D4x), so we can use the same interface for source and dest,
865 * that solves the fact we don't know the direction. 865 * that solves the fact we don't know the direction.
866 * ERRATA: Even if useless for memory transfers, the PERID has to not
867 * match the one of another channel. If not, it could lead to spurious
868 * flag status.
866 */ 869 */
867 u32 chan_cc = AT_XDMAC_CC_DIF(0) 870 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
871 | AT_XDMAC_CC_DIF(0)
868 | AT_XDMAC_CC_SIF(0) 872 | AT_XDMAC_CC_SIF(0)
869 | AT_XDMAC_CC_MBSIZE_SIXTEEN 873 | AT_XDMAC_CC_MBSIZE_SIXTEEN
870 | AT_XDMAC_CC_TYPE_MEM_TRAN; 874 | AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1039,8 +1043,12 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1039 * access DDR through both ports (at least on SAMA5D4x), so we can use 1043 * access DDR through both ports (at least on SAMA5D4x), so we can use
1040 * the same interface for source and dest, that solves the fact we 1044 * the same interface for source and dest, that solves the fact we
1041 * don't know the direction. 1045 * don't know the direction.
1046 * ERRATA: Even if useless for memory transfers, the PERID has to not
1047 * match the one of another channel. If not, it could lead to spurious
1048 * flag status.
1042 */ 1049 */
1043 u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM 1050 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1051 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1044 | AT_XDMAC_CC_SAM_INCREMENTED_AM 1052 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1045 | AT_XDMAC_CC_DIF(0) 1053 | AT_XDMAC_CC_DIF(0)
1046 | AT_XDMAC_CC_SIF(0) 1054 | AT_XDMAC_CC_SIF(0)
@@ -1140,8 +1148,12 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1140 * access. Hopefully we can access DDR through both ports (at least on 1148 * access. Hopefully we can access DDR through both ports (at least on
1141 * SAMA5D4x), so we can use the same interface for source and dest, 1149 * SAMA5D4x), so we can use the same interface for source and dest,
1142 * that solves the fact we don't know the direction. 1150 * that solves the fact we don't know the direction.
1151 * ERRATA: Even if useless for memory transfers, the PERID has to not
1152 * match the one of another channel. If not, it could lead to spurious
1153 * flag status.
1143 */ 1154 */
1144 u32 chan_cc = AT_XDMAC_CC_DAM_UBS_AM 1155 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1156 | AT_XDMAC_CC_DAM_UBS_AM
1145 | AT_XDMAC_CC_SAM_INCREMENTED_AM 1157 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1146 | AT_XDMAC_CC_DIF(0) 1158 | AT_XDMAC_CC_DIF(0)
1147 | AT_XDMAC_CC_SIF(0) 1159 | AT_XDMAC_CC_SIF(0)
@@ -1995,8 +2007,6 @@ static int at_xdmac_remove(struct platform_device *pdev)
1995 dma_async_device_unregister(&atxdmac->dma); 2007 dma_async_device_unregister(&atxdmac->dma);
1996 clk_disable_unprepare(atxdmac->clk); 2008 clk_disable_unprepare(atxdmac->clk);
1997 2009
1998 synchronize_irq(atxdmac->irq);
1999
2000 free_irq(atxdmac->irq, atxdmac->dma.dev); 2010 free_irq(atxdmac->irq, atxdmac->dma.dev);
2001 2011
2002 for (i = 0; i < atxdmac->dma.chancnt; i++) { 2012 for (i = 0; i < atxdmac->dma.chancnt; i++) {
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 5b2395e7e04d..c3468094393e 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -307,6 +307,13 @@ static int axi_dmac_terminate_all(struct dma_chan *c)
307 return 0; 307 return 0;
308} 308}
309 309
310static void axi_dmac_synchronize(struct dma_chan *c)
311{
312 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
313
314 vchan_synchronize(&chan->vchan);
315}
316
310static void axi_dmac_issue_pending(struct dma_chan *c) 317static void axi_dmac_issue_pending(struct dma_chan *c)
311{ 318{
312 struct axi_dmac_chan *chan = to_axi_dmac_chan(c); 319 struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
@@ -613,6 +620,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
613 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic; 620 dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
614 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved; 621 dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
615 dma_dev->device_terminate_all = axi_dmac_terminate_all; 622 dma_dev->device_terminate_all = axi_dmac_terminate_all;
623 dma_dev->device_synchronize = axi_dmac_synchronize;
616 dma_dev->dev = &pdev->dev; 624 dma_dev->dev = &pdev->dev;
617 dma_dev->chancnt = 1; 625 dma_dev->chancnt = 1;
618 dma_dev->src_addr_widths = BIT(dmac->chan.src_width); 626 dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ecec1445adf..c50a247be2e0 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -43,6 +43,7 @@
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 45
46#include <linux/platform_device.h>
46#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <linux/module.h> 49#include <linux/module.h>
@@ -265,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan)
265 module_put(dma_chan_to_owner(chan)); 266 module_put(dma_chan_to_owner(chan));
266 267
267 /* This channel is not in use anymore, free it */ 268 /* This channel is not in use anymore, free it */
268 if (!chan->client_count && chan->device->device_free_chan_resources) 269 if (!chan->client_count && chan->device->device_free_chan_resources) {
270 /* Make sure all operations have completed */
271 dmaengine_synchronize(chan);
269 chan->device->device_free_chan_resources(chan); 272 chan->device->device_free_chan_resources(chan);
273 }
270 274
271 /* If the channel is used via a DMA request router, free the mapping */ 275 /* If the channel is used via a DMA request router, free the mapping */
272 if (chan->router && chan->router->route_free) { 276 if (chan->router && chan->router->route_free) {
@@ -493,6 +497,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
493 caps->dst_addr_widths = device->dst_addr_widths; 497 caps->dst_addr_widths = device->dst_addr_widths;
494 caps->directions = device->directions; 498 caps->directions = device->directions;
495 caps->residue_granularity = device->residue_granularity; 499 caps->residue_granularity = device->residue_granularity;
500 caps->descriptor_reuse = device->descriptor_reuse;
496 501
497 /* 502 /*
498 * Some devices implement only pause (e.g. to get residuum) but no 503 * Some devices implement only pause (e.g. to get residuum) but no
@@ -511,7 +516,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
511{ 516{
512 struct dma_chan *chan; 517 struct dma_chan *chan;
513 518
514 if (!__dma_device_satisfies_mask(dev, mask)) { 519 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
515 pr_debug("%s: wrong capabilities\n", __func__); 520 pr_debug("%s: wrong capabilities\n", __func__);
516 return NULL; 521 return NULL;
517 } 522 }
@@ -542,6 +547,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
542 return NULL; 547 return NULL;
543} 548}
544 549
550static struct dma_chan *find_candidate(struct dma_device *device,
551 const dma_cap_mask_t *mask,
552 dma_filter_fn fn, void *fn_param)
553{
554 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
555 int err;
556
557 if (chan) {
558 /* Found a suitable channel, try to grab, prep, and return it.
559 * We first set DMA_PRIVATE to disable balance_ref_count as this
560 * channel will not be published in the general-purpose
561 * allocator
562 */
563 dma_cap_set(DMA_PRIVATE, device->cap_mask);
564 device->privatecnt++;
565 err = dma_chan_get(chan);
566
567 if (err) {
568 if (err == -ENODEV) {
569 pr_debug("%s: %s module removed\n", __func__,
570 dma_chan_name(chan));
571 list_del_rcu(&device->global_node);
572 } else
573 pr_debug("%s: failed to get %s: (%d)\n",
574 __func__, dma_chan_name(chan), err);
575
576 if (--device->privatecnt == 0)
577 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
578
579 chan = ERR_PTR(err);
580 }
581 }
582
583 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
584}
585
545/** 586/**
546 * dma_get_slave_channel - try to get specific channel exclusively 587 * dma_get_slave_channel - try to get specific channel exclusively
547 * @chan: target channel 588 * @chan: target channel
@@ -580,7 +621,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
580{ 621{
581 dma_cap_mask_t mask; 622 dma_cap_mask_t mask;
582 struct dma_chan *chan; 623 struct dma_chan *chan;
583 int err;
584 624
585 dma_cap_zero(mask); 625 dma_cap_zero(mask);
586 dma_cap_set(DMA_SLAVE, mask); 626 dma_cap_set(DMA_SLAVE, mask);
@@ -588,23 +628,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
588 /* lock against __dma_request_channel */ 628 /* lock against __dma_request_channel */
589 mutex_lock(&dma_list_mutex); 629 mutex_lock(&dma_list_mutex);
590 630
591 chan = private_candidate(&mask, device, NULL, NULL); 631 chan = find_candidate(device, &mask, NULL, NULL);
592 if (chan) {
593 dma_cap_set(DMA_PRIVATE, device->cap_mask);
594 device->privatecnt++;
595 err = dma_chan_get(chan);
596 if (err) {
597 pr_debug("%s: failed to get %s: (%d)\n",
598 __func__, dma_chan_name(chan), err);
599 chan = NULL;
600 if (--device->privatecnt == 0)
601 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
602 }
603 }
604 632
605 mutex_unlock(&dma_list_mutex); 633 mutex_unlock(&dma_list_mutex);
606 634
607 return chan; 635 return IS_ERR(chan) ? NULL : chan;
608} 636}
609EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); 637EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
610 638
@@ -621,35 +649,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
621{ 649{
622 struct dma_device *device, *_d; 650 struct dma_device *device, *_d;
623 struct dma_chan *chan = NULL; 651 struct dma_chan *chan = NULL;
624 int err;
625 652
626 /* Find a channel */ 653 /* Find a channel */
627 mutex_lock(&dma_list_mutex); 654 mutex_lock(&dma_list_mutex);
628 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { 655 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
629 chan = private_candidate(mask, device, fn, fn_param); 656 chan = find_candidate(device, mask, fn, fn_param);
630 if (chan) { 657 if (!IS_ERR(chan))
631 /* Found a suitable channel, try to grab, prep, and 658 break;
632 * return it. We first set DMA_PRIVATE to disable
633 * balance_ref_count as this channel will not be
634 * published in the general-purpose allocator
635 */
636 dma_cap_set(DMA_PRIVATE, device->cap_mask);
637 device->privatecnt++;
638 err = dma_chan_get(chan);
639 659
640 if (err == -ENODEV) { 660 chan = NULL;
641 pr_debug("%s: %s module removed\n",
642 __func__, dma_chan_name(chan));
643 list_del_rcu(&device->global_node);
644 } else if (err)
645 pr_debug("%s: failed to get %s: (%d)\n",
646 __func__, dma_chan_name(chan), err);
647 else
648 break;
649 if (--device->privatecnt == 0)
650 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
651 chan = NULL;
652 }
653 } 661 }
654 mutex_unlock(&dma_list_mutex); 662 mutex_unlock(&dma_list_mutex);
655 663
@@ -662,27 +670,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
662} 670}
663EXPORT_SYMBOL_GPL(__dma_request_channel); 671EXPORT_SYMBOL_GPL(__dma_request_channel);
664 672
673static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
674 const char *name,
675 struct device *dev)
676{
677 int i;
678
679 if (!device->filter.mapcnt)
680 return NULL;
681
682 for (i = 0; i < device->filter.mapcnt; i++) {
683 const struct dma_slave_map *map = &device->filter.map[i];
684
685 if (!strcmp(map->devname, dev_name(dev)) &&
686 !strcmp(map->slave, name))
687 return map;
688 }
689
690 return NULL;
691}
692
665/** 693/**
666 * dma_request_slave_channel_reason - try to allocate an exclusive slave channel 694 * dma_request_chan - try to allocate an exclusive slave channel
667 * @dev: pointer to client device structure 695 * @dev: pointer to client device structure
668 * @name: slave channel name 696 * @name: slave channel name
669 * 697 *
670 * Returns pointer to appropriate DMA channel on success or an error pointer. 698 * Returns pointer to appropriate DMA channel on success or an error pointer.
671 */ 699 */
672struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 700struct dma_chan *dma_request_chan(struct device *dev, const char *name)
673 const char *name)
674{ 701{
702 struct dma_device *d, *_d;
703 struct dma_chan *chan = NULL;
704
675 /* If device-tree is present get slave info from here */ 705 /* If device-tree is present get slave info from here */
676 if (dev->of_node) 706 if (dev->of_node)
677 return of_dma_request_slave_channel(dev->of_node, name); 707 chan = of_dma_request_slave_channel(dev->of_node, name);
678 708
679 /* If device was enumerated by ACPI get slave info from here */ 709 /* If device was enumerated by ACPI get slave info from here */
680 if (ACPI_HANDLE(dev)) 710 if (has_acpi_companion(dev) && !chan)
681 return acpi_dma_request_slave_chan_by_name(dev, name); 711 chan = acpi_dma_request_slave_chan_by_name(dev, name);
712
713 if (chan) {
714 /* Valid channel found or requester need to be deferred */
715 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
716 return chan;
717 }
682 718
683 return ERR_PTR(-ENODEV); 719 /* Try to find the channel via the DMA filter map(s) */
720 mutex_lock(&dma_list_mutex);
721 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
722 dma_cap_mask_t mask;
723 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
724
725 if (!map)
726 continue;
727
728 dma_cap_zero(mask);
729 dma_cap_set(DMA_SLAVE, mask);
730
731 chan = find_candidate(d, &mask, d->filter.fn, map->param);
732 if (!IS_ERR(chan))
733 break;
734 }
735 mutex_unlock(&dma_list_mutex);
736
737 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
684} 738}
685EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); 739EXPORT_SYMBOL_GPL(dma_request_chan);
686 740
687/** 741/**
688 * dma_request_slave_channel - try to allocate an exclusive slave channel 742 * dma_request_slave_channel - try to allocate an exclusive slave channel
@@ -694,17 +748,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
694struct dma_chan *dma_request_slave_channel(struct device *dev, 748struct dma_chan *dma_request_slave_channel(struct device *dev,
695 const char *name) 749 const char *name)
696{ 750{
697 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); 751 struct dma_chan *ch = dma_request_chan(dev, name);
698 if (IS_ERR(ch)) 752 if (IS_ERR(ch))
699 return NULL; 753 return NULL;
700 754
701 dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
702 ch->device->privatecnt++;
703
704 return ch; 755 return ch;
705} 756}
706EXPORT_SYMBOL_GPL(dma_request_slave_channel); 757EXPORT_SYMBOL_GPL(dma_request_slave_channel);
707 758
759/**
760 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
761 * @mask: capabilities that the channel must satisfy
762 *
763 * Returns pointer to appropriate DMA channel on success or an error pointer.
764 */
765struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
766{
767 struct dma_chan *chan;
768
769 if (!mask)
770 return ERR_PTR(-ENODEV);
771
772 chan = __dma_request_channel(mask, NULL, NULL);
773 if (!chan)
774 chan = ERR_PTR(-ENODEV);
775
776 return chan;
777}
778EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
779
708void dma_release_channel(struct dma_chan *chan) 780void dma_release_channel(struct dma_chan *chan)
709{ 781{
710 mutex_lock(&dma_list_mutex); 782 mutex_lock(&dma_list_mutex);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 68a4815750b5..5a417bbdfbd7 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -103,18 +103,21 @@ dw_dma_parse_dt(struct platform_device *pdev)
103 struct device_node *np = pdev->dev.of_node; 103 struct device_node *np = pdev->dev.of_node;
104 struct dw_dma_platform_data *pdata; 104 struct dw_dma_platform_data *pdata;
105 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; 105 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
106 u32 nr_channels;
106 107
107 if (!np) { 108 if (!np) {
108 dev_err(&pdev->dev, "Missing DT data\n"); 109 dev_err(&pdev->dev, "Missing DT data\n");
109 return NULL; 110 return NULL;
110 } 111 }
111 112
113 if (of_property_read_u32(np, "dma-channels", &nr_channels))
114 return NULL;
115
112 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 116 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
113 if (!pdata) 117 if (!pdata)
114 return NULL; 118 return NULL;
115 119
116 if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels)) 120 pdata->nr_channels = nr_channels;
117 return NULL;
118 121
119 if (of_property_read_bool(np, "is_private")) 122 if (of_property_read_bool(np, "is_private"))
120 pdata->is_private = true; 123 pdata->is_private = true;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e84e6b..6b3e9d991010 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -2297,6 +2297,10 @@ static int edma_probe(struct platform_device *pdev)
2297 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); 2297 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2298 } 2298 }
2299 2299
2300 ecc->dma_slave.filter.map = info->slave_map;
2301 ecc->dma_slave.filter.mapcnt = info->slavecnt;
2302 ecc->dma_slave.filter.fn = edma_filter_fn;
2303
2300 ret = dma_async_device_register(&ecc->dma_slave); 2304 ret = dma_async_device_register(&ecc->dma_slave);
2301 if (ret) { 2305 if (ret) {
2302 dev_err(dev, "slave ddev registration failed (%d)\n", ret); 2306 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
@@ -2404,7 +2408,13 @@ static struct platform_driver edma_driver = {
2404 }, 2408 },
2405}; 2409};
2406 2410
2411static int edma_tptc_probe(struct platform_device *pdev)
2412{
2413 return 0;
2414}
2415
2407static struct platform_driver edma_tptc_driver = { 2416static struct platform_driver edma_tptc_driver = {
2417 .probe = edma_tptc_probe,
2408 .driver = { 2418 .driver = {
2409 .name = "edma3-tptc", 2419 .name = "edma3-tptc",
2410 .of_match_table = edma_tptc_of_ids, 2420 .of_match_table = edma_tptc_of_ids,
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 915eec3cc279..be2e62b87948 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -116,6 +116,10 @@
116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119enum fsl_edma_pm_state {
120 RUNNING = 0,
121 SUSPENDED,
122};
119 123
120struct fsl_edma_hw_tcd { 124struct fsl_edma_hw_tcd {
121 __le32 saddr; 125 __le32 saddr;
@@ -147,6 +151,9 @@ struct fsl_edma_slave_config {
147struct fsl_edma_chan { 151struct fsl_edma_chan {
148 struct virt_dma_chan vchan; 152 struct virt_dma_chan vchan;
149 enum dma_status status; 153 enum dma_status status;
154 enum fsl_edma_pm_state pm_state;
155 bool idle;
156 u32 slave_id;
150 struct fsl_edma_engine *edma; 157 struct fsl_edma_engine *edma;
151 struct fsl_edma_desc *edesc; 158 struct fsl_edma_desc *edesc;
152 struct fsl_edma_slave_config fsc; 159 struct fsl_edma_slave_config fsc;
@@ -298,6 +305,7 @@ static int fsl_edma_terminate_all(struct dma_chan *chan)
298 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 305 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
299 fsl_edma_disable_request(fsl_chan); 306 fsl_edma_disable_request(fsl_chan);
300 fsl_chan->edesc = NULL; 307 fsl_chan->edesc = NULL;
308 fsl_chan->idle = true;
301 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 309 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
302 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 310 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
303 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 311 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
@@ -313,6 +321,7 @@ static int fsl_edma_pause(struct dma_chan *chan)
313 if (fsl_chan->edesc) { 321 if (fsl_chan->edesc) {
314 fsl_edma_disable_request(fsl_chan); 322 fsl_edma_disable_request(fsl_chan);
315 fsl_chan->status = DMA_PAUSED; 323 fsl_chan->status = DMA_PAUSED;
324 fsl_chan->idle = true;
316 } 325 }
317 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 326 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
318 return 0; 327 return 0;
@@ -327,6 +336,7 @@ static int fsl_edma_resume(struct dma_chan *chan)
327 if (fsl_chan->edesc) { 336 if (fsl_chan->edesc) {
328 fsl_edma_enable_request(fsl_chan); 337 fsl_edma_enable_request(fsl_chan);
329 fsl_chan->status = DMA_IN_PROGRESS; 338 fsl_chan->status = DMA_IN_PROGRESS;
339 fsl_chan->idle = false;
330 } 340 }
331 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 341 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
332 return 0; 342 return 0;
@@ -648,6 +658,7 @@ static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
648 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); 658 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
649 fsl_edma_enable_request(fsl_chan); 659 fsl_edma_enable_request(fsl_chan);
650 fsl_chan->status = DMA_IN_PROGRESS; 660 fsl_chan->status = DMA_IN_PROGRESS;
661 fsl_chan->idle = false;
651} 662}
652 663
653static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) 664static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
@@ -676,6 +687,7 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
676 vchan_cookie_complete(&fsl_chan->edesc->vdesc); 687 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
677 fsl_chan->edesc = NULL; 688 fsl_chan->edesc = NULL;
678 fsl_chan->status = DMA_COMPLETE; 689 fsl_chan->status = DMA_COMPLETE;
690 fsl_chan->idle = true;
679 } else { 691 } else {
680 vchan_cyclic_callback(&fsl_chan->edesc->vdesc); 692 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
681 } 693 }
@@ -704,6 +716,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
704 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), 716 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
705 fsl_edma->membase + EDMA_CERR); 717 fsl_edma->membase + EDMA_CERR);
706 fsl_edma->chans[ch].status = DMA_ERROR; 718 fsl_edma->chans[ch].status = DMA_ERROR;
719 fsl_edma->chans[ch].idle = true;
707 } 720 }
708 } 721 }
709 return IRQ_HANDLED; 722 return IRQ_HANDLED;
@@ -724,6 +737,12 @@ static void fsl_edma_issue_pending(struct dma_chan *chan)
724 737
725 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 738 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
726 739
740 if (unlikely(fsl_chan->pm_state != RUNNING)) {
741 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
742 /* cannot submit due to suspend */
743 return;
744 }
745
727 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) 746 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
728 fsl_edma_xfer_desc(fsl_chan); 747 fsl_edma_xfer_desc(fsl_chan);
729 748
@@ -735,6 +754,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
735{ 754{
736 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; 755 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
737 struct dma_chan *chan, *_chan; 756 struct dma_chan *chan, *_chan;
757 struct fsl_edma_chan *fsl_chan;
738 unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; 758 unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
739 759
740 if (dma_spec->args_count != 2) 760 if (dma_spec->args_count != 2)
@@ -748,8 +768,10 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
748 chan = dma_get_slave_channel(chan); 768 chan = dma_get_slave_channel(chan);
749 if (chan) { 769 if (chan) {
750 chan->device->privatecnt++; 770 chan->device->privatecnt++;
751 fsl_edma_chan_mux(to_fsl_edma_chan(chan), 771 fsl_chan = to_fsl_edma_chan(chan);
752 dma_spec->args[1], true); 772 fsl_chan->slave_id = dma_spec->args[1];
773 fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
774 true);
753 mutex_unlock(&fsl_edma->fsl_edma_mutex); 775 mutex_unlock(&fsl_edma->fsl_edma_mutex);
754 return chan; 776 return chan;
755 } 777 }
@@ -888,7 +910,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
888 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i]; 910 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
889 911
890 fsl_chan->edma = fsl_edma; 912 fsl_chan->edma = fsl_edma;
891 913 fsl_chan->pm_state = RUNNING;
914 fsl_chan->slave_id = 0;
915 fsl_chan->idle = true;
892 fsl_chan->vchan.desc_free = fsl_edma_free_desc; 916 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
893 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); 917 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
894 918
@@ -959,6 +983,60 @@ static int fsl_edma_remove(struct platform_device *pdev)
959 return 0; 983 return 0;
960} 984}
961 985
986static int fsl_edma_suspend_late(struct device *dev)
987{
988 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
989 struct fsl_edma_chan *fsl_chan;
990 unsigned long flags;
991 int i;
992
993 for (i = 0; i < fsl_edma->n_chans; i++) {
994 fsl_chan = &fsl_edma->chans[i];
995 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
996 /* Make sure chan is idle or will force disable. */
997 if (unlikely(!fsl_chan->idle)) {
998 dev_warn(dev, "WARN: There is non-idle channel.");
999 fsl_edma_disable_request(fsl_chan);
1000 fsl_edma_chan_mux(fsl_chan, 0, false);
1001 }
1002
1003 fsl_chan->pm_state = SUSPENDED;
1004 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1005 }
1006
1007 return 0;
1008}
1009
1010static int fsl_edma_resume_early(struct device *dev)
1011{
1012 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
1013 struct fsl_edma_chan *fsl_chan;
1014 int i;
1015
1016 for (i = 0; i < fsl_edma->n_chans; i++) {
1017 fsl_chan = &fsl_edma->chans[i];
1018 fsl_chan->pm_state = RUNNING;
1019 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
1020 if (fsl_chan->slave_id != 0)
1021 fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
1022 }
1023
1024 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
1025 fsl_edma->membase + EDMA_CR);
1026
1027 return 0;
1028}
1029
1030/*
1031 * eDMA provides the service to others, so it should be suspend late
1032 * and resume early. When eDMA suspend, all of the clients should stop
1033 * the DMA data transmission and let the channel idle.
1034 */
1035static const struct dev_pm_ops fsl_edma_pm_ops = {
1036 .suspend_late = fsl_edma_suspend_late,
1037 .resume_early = fsl_edma_resume_early,
1038};
1039
962static const struct of_device_id fsl_edma_dt_ids[] = { 1040static const struct of_device_id fsl_edma_dt_ids[] = {
963 { .compatible = "fsl,vf610-edma", }, 1041 { .compatible = "fsl,vf610-edma", },
964 { /* sentinel */ } 1042 { /* sentinel */ }
@@ -969,6 +1047,7 @@ static struct platform_driver fsl_edma_driver = {
969 .driver = { 1047 .driver = {
970 .name = "fsl-edma", 1048 .name = "fsl-edma",
971 .of_match_table = fsl_edma_dt_ids, 1049 .of_match_table = fsl_edma_dt_ids,
1050 .pm = &fsl_edma_pm_ops,
972 }, 1051 },
973 .probe = fsl_edma_probe, 1052 .probe = fsl_edma_probe,
974 .remove = fsl_edma_remove, 1053 .remove = fsl_edma_remove,
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 823ad728aecf..eef145edb936 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
228 for_each_sg(sgl, sg, sg_len, i) { 228 for_each_sg(sgl, sg, sg_len, i) {
229 desc->sg[i].addr = sg_dma_address(sg); 229 desc->sg[i].addr = sg_dma_address(sg);
230 desc->sg[i].len = sg_dma_len(sg); 230 desc->sg[i].len = sg_dma_len(sg);
231
232 desc->length += sg_dma_len(sg);
231 } 233 }
232 234
233 desc->nents = sg_len; 235 desc->nents = sg_len;
@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
249 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 251 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
250} 252}
251 253
252static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
253{
254 size_t bytes = 0;
255 unsigned int i;
256
257 for (i = desc->active; i < desc->nents; i++)
258 bytes += desc->sg[i].len;
259
260 return bytes;
261}
262
263static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) 254static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
264{ 255{
265 struct hsu_dma_desc *desc = hsuc->desc; 256 struct hsu_dma_desc *desc = hsuc->desc;
266 size_t bytes = hsu_dma_desc_size(desc); 257 size_t bytes = desc->length;
267 int i; 258 int i;
268 259
269 i = desc->active % HSU_DMA_CHAN_NR_DESC; 260 i = desc->active % HSU_DMA_CHAN_NR_DESC;
@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
294 dma_set_residue(state, bytes); 285 dma_set_residue(state, bytes);
295 status = hsuc->desc->status; 286 status = hsuc->desc->status;
296 } else if (vdesc) { 287 } else if (vdesc) {
297 bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); 288 bytes = to_hsu_dma_desc(vdesc)->length;
298 dma_set_residue(state, bytes); 289 dma_set_residue(state, bytes);
299 } 290 }
300 spin_unlock_irqrestore(&hsuc->vchan.lock, flags); 291 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index f06579c6d548..578a8ee8cd05 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -65,6 +65,7 @@ struct hsu_dma_desc {
65 enum dma_transfer_direction direction; 65 enum dma_transfer_direction direction;
66 struct hsu_dma_sg *sg; 66 struct hsu_dma_sg *sg;
67 unsigned int nents; 67 unsigned int nents;
68 size_t length;
68 unsigned int active; 69 unsigned int active;
69 enum dma_status status; 70 enum dma_status status;
70}; 71};
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 7d56b47e4fcf..3cb7b2c78197 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -178,20 +178,12 @@ static irqreturn_t idma64_irq(int irq, void *dev)
178 if (!status) 178 if (!status)
179 return IRQ_NONE; 179 return IRQ_NONE;
180 180
181 /* Disable interrupts */
182 channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
183 channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
184
185 status_xfer = dma_readl(idma64, RAW(XFER)); 181 status_xfer = dma_readl(idma64, RAW(XFER));
186 status_err = dma_readl(idma64, RAW(ERROR)); 182 status_err = dma_readl(idma64, RAW(ERROR));
187 183
188 for (i = 0; i < idma64->dma.chancnt; i++) 184 for (i = 0; i < idma64->dma.chancnt; i++)
189 idma64_chan_irq(idma64, i, status_err, status_xfer); 185 idma64_chan_irq(idma64, i, status_err, status_xfer);
190 186
191 /* Re-enable interrupts */
192 channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
193 channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
194
195 return IRQ_HANDLED; 187 return IRQ_HANDLED;
196} 188}
197 189
@@ -239,7 +231,7 @@ static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
239 idma64_desc_free(idma64c, to_idma64_desc(vdesc)); 231 idma64_desc_free(idma64c, to_idma64_desc(vdesc));
240} 232}
241 233
242static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, 234static void idma64_hw_desc_fill(struct idma64_hw_desc *hw,
243 struct dma_slave_config *config, 235 struct dma_slave_config *config,
244 enum dma_transfer_direction direction, u64 llp) 236 enum dma_transfer_direction direction, u64 llp)
245{ 237{
@@ -276,26 +268,26 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
276 IDMA64C_CTLL_SRC_WIDTH(src_width); 268 IDMA64C_CTLL_SRC_WIDTH(src_width);
277 269
278 lli->llp = llp; 270 lli->llp = llp;
279 return hw->llp;
280} 271}
281 272
282static void idma64_desc_fill(struct idma64_chan *idma64c, 273static void idma64_desc_fill(struct idma64_chan *idma64c,
283 struct idma64_desc *desc) 274 struct idma64_desc *desc)
284{ 275{
285 struct dma_slave_config *config = &idma64c->config; 276 struct dma_slave_config *config = &idma64c->config;
286 struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1]; 277 unsigned int i = desc->ndesc;
278 struct idma64_hw_desc *hw = &desc->hw[i - 1];
287 struct idma64_lli *lli = hw->lli; 279 struct idma64_lli *lli = hw->lli;
288 u64 llp = 0; 280 u64 llp = 0;
289 unsigned int i = desc->ndesc;
290 281
291 /* Fill the hardware descriptors and link them to a list */ 282 /* Fill the hardware descriptors and link them to a list */
292 do { 283 do {
293 hw = &desc->hw[--i]; 284 hw = &desc->hw[--i];
294 llp = idma64_hw_desc_fill(hw, config, desc->direction, llp); 285 idma64_hw_desc_fill(hw, config, desc->direction, llp);
286 llp = hw->llp;
295 desc->length += hw->len; 287 desc->length += hw->len;
296 } while (i); 288 } while (i);
297 289
298 /* Trigger interrupt after last block */ 290 /* Trigger an interrupt after the last block is transfered */
299 lli->ctllo |= IDMA64C_CTLL_INT_EN; 291 lli->ctllo |= IDMA64C_CTLL_INT_EN;
300} 292}
301 293
@@ -596,6 +588,8 @@ static int idma64_probe(struct idma64_chip *chip)
596 588
597 idma64->dma.dev = chip->dev; 589 idma64->dma.dev = chip->dev;
598 590
591 dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
592
599 ret = dma_async_device_register(&idma64->dma); 593 ret = dma_async_device_register(&idma64->dma);
600 if (ret) 594 if (ret)
601 return ret; 595 return ret;
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index f6aeff0af8a5..8423f13ed0da 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -54,7 +54,8 @@
54#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */ 54#define IDMA64C_CTLL_LLP_S_EN (1 << 28) /* src block chain */
55 55
56/* Bitfields in CTL_HI */ 56/* Bitfields in CTL_HI */
57#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & ((1 << 17) - 1)) 57#define IDMA64C_CTLH_BLOCK_TS_MASK ((1 << 17) - 1)
58#define IDMA64C_CTLH_BLOCK_TS(x) ((x) & IDMA64C_CTLH_BLOCK_TS_MASK)
58#define IDMA64C_CTLH_DONE (1 << 17) 59#define IDMA64C_CTLH_DONE (1 << 17)
59 60
60/* Bitfields in CFG_LO */ 61/* Bitfields in CFG_LO */
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 9ca56830cc63..a4c53be482cf 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -651,6 +651,48 @@ static enum dma_status mdc_tx_status(struct dma_chan *chan,
651 return ret; 651 return ret;
652} 652}
653 653
654static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
655{
656 u32 val, processed, done1, done2;
657 unsigned int ret;
658
659 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
660 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
661 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
662 /*
663 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
664 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
665 * didn't miss a command completion.
666 */
667 do {
668 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
669
670 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
671 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
672
673 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
674 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
675 MDC_CMDS_PROCESSED_INT_ACTIVE);
676
677 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
678
679 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
680
681 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
682
683 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
684 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
685 } while (done1 != done2);
686
687 if (done1 >= processed)
688 ret = done1 - processed;
689 else
690 ret = ((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1) -
691 processed) + done1;
692
693 return ret;
694}
695
654static int mdc_terminate_all(struct dma_chan *chan) 696static int mdc_terminate_all(struct dma_chan *chan)
655{ 697{
656 struct mdc_chan *mchan = to_mdc_chan(chan); 698 struct mdc_chan *mchan = to_mdc_chan(chan);
@@ -667,6 +709,8 @@ static int mdc_terminate_all(struct dma_chan *chan)
667 mchan->desc = NULL; 709 mchan->desc = NULL;
668 vchan_get_all_descriptors(&mchan->vc, &head); 710 vchan_get_all_descriptors(&mchan->vc, &head);
669 711
712 mdc_get_new_events(mchan);
713
670 spin_unlock_irqrestore(&mchan->vc.lock, flags); 714 spin_unlock_irqrestore(&mchan->vc.lock, flags);
671 715
672 if (mdesc) 716 if (mdesc)
@@ -703,35 +747,17 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
703{ 747{
704 struct mdc_chan *mchan = (struct mdc_chan *)dev_id; 748 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
705 struct mdc_tx_desc *mdesc; 749 struct mdc_tx_desc *mdesc;
706 u32 val, processed, done1, done2; 750 unsigned int i, new_events;
707 unsigned int i;
708 751
709 spin_lock(&mchan->vc.lock); 752 spin_lock(&mchan->vc.lock);
710 753
711 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
712 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
713 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
714 /*
715 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
716 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
717 * didn't miss a command completion.
718 */
719 do {
720 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
721 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
722 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
723 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
724 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
725 MDC_CMDS_PROCESSED_INT_ACTIVE);
726 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
727 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
728 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
729 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
730 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
731 } while (done1 != done2);
732
733 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); 754 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
734 755
756 new_events = mdc_get_new_events(mchan);
757
758 if (!new_events)
759 goto out;
760
735 mdesc = mchan->desc; 761 mdesc = mchan->desc;
736 if (!mdesc) { 762 if (!mdesc) {
737 dev_warn(mdma2dev(mchan->mdma), 763 dev_warn(mdma2dev(mchan->mdma),
@@ -740,8 +766,7 @@ static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
740 goto out; 766 goto out;
741 } 767 }
742 768
743 for (i = processed; i != done1; 769 for (i = 0; i < new_events; i++) {
744 i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
745 /* 770 /*
746 * The first interrupt in a transfer indicates that the 771 * The first interrupt in a transfer indicates that the
747 * command list has been loaded, not that a command has 772 * command list has been loaded, not that a command has
@@ -979,7 +1004,6 @@ static int mdc_dma_remove(struct platform_device *pdev)
979 vc.chan.device_node) { 1004 vc.chan.device_node) {
980 list_del(&mchan->vc.chan.device_node); 1005 list_del(&mchan->vc.chan.device_node);
981 1006
982 synchronize_irq(mchan->irq);
983 devm_free_irq(&pdev->dev, mchan->irq, mchan); 1007 devm_free_irq(&pdev->dev, mchan->irq, mchan);
984 1008
985 tasklet_kill(&mchan->vc.task); 1009 tasklet_kill(&mchan->vc.task);
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 2cb7c308d5c7..0b9b6b07db9e 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -224,7 +224,7 @@ static u8 ioat_dca_get_tag(struct dca_provider *dca,
224 return tag; 224 return tag;
225} 225}
226 226
227static struct dca_ops ioat_dca_ops = { 227static const struct dca_ops ioat_dca_ops = {
228 .add_requester = ioat_dca_add_requester, 228 .add_requester = ioat_dca_add_requester,
229 .remove_requester = ioat_dca_remove_requester, 229 .remove_requester = ioat_dca_remove_requester,
230 .get_tag = ioat_dca_get_tag, 230 .get_tag = ioat_dca_get_tag,
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 8f4e607d5817..b8f48074789f 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -235,43 +235,11 @@ ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
235 return ioat_dma->idx[index]; 235 return ioat_dma->idx[index];
236} 236}
237 237
238static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
239{
240 u8 ver = ioat_chan->ioat_dma->version;
241 u64 status;
242 u32 status_lo;
243
244 /* We need to read the low address first as this causes the
245 * chipset to latch the upper bits for the subsequent read
246 */
247 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
248 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
249 status <<= 32;
250 status |= status_lo;
251
252 return status;
253}
254
255#if BITS_PER_LONG == 64
256
257static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) 238static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
258{ 239{
259 u8 ver = ioat_chan->ioat_dma->version; 240 return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
260 u64 status;
261
262 /* With IOAT v3.3 the status register is 64bit. */
263 if (ver >= IOAT_VER_3_3)
264 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
265 else
266 status = ioat_chansts_32(ioat_chan);
267
268 return status;
269} 241}
270 242
271#else
272#define ioat_chansts ioat_chansts_32
273#endif
274
275static inline u64 ioat_chansts_to_addr(u64 status) 243static inline u64 ioat_chansts_to_addr(u64 status)
276{ 244{
277 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; 245 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 909352f74c89..4994a3623aee 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -99,19 +99,9 @@
99#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ 99#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
100#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */ 100#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
101 101
102 102/* IOAT1 define left for i7300_idle driver to not fail compiling */
103#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ 103#define IOAT1_CHANSTS_OFFSET 0x04
104#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */ 104#define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
105#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
106 ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
107#define IOAT1_CHANSTS_OFFSET_LOW 0x04
108#define IOAT2_CHANSTS_OFFSET_LOW 0x08
109#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
110 ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
111#define IOAT1_CHANSTS_OFFSET_HIGH 0x08
112#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
113#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
114 ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
115#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) 105#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
116#define IOAT_CHANSTS_SOFT_ERR 0x10ULL 106#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
117#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL 107#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 1dfc71c90123..9794b073d7d7 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -28,8 +28,6 @@
28struct omap_dmadev { 28struct omap_dmadev {
29 struct dma_device ddev; 29 struct dma_device ddev;
30 spinlock_t lock; 30 spinlock_t lock;
31 struct tasklet_struct task;
32 struct list_head pending;
33 void __iomem *base; 31 void __iomem *base;
34 const struct omap_dma_reg *reg_map; 32 const struct omap_dma_reg *reg_map;
35 struct omap_system_dma_plat_info *plat; 33 struct omap_system_dma_plat_info *plat;
@@ -42,7 +40,6 @@ struct omap_dmadev {
42 40
43struct omap_chan { 41struct omap_chan {
44 struct virt_dma_chan vc; 42 struct virt_dma_chan vc;
45 struct list_head node;
46 void __iomem *channel_base; 43 void __iomem *channel_base;
47 const struct omap_dma_reg *reg_map; 44 const struct omap_dma_reg *reg_map;
48 uint32_t ccr; 45 uint32_t ccr;
@@ -454,33 +451,6 @@ static void omap_dma_callback(int ch, u16 status, void *data)
454 spin_unlock_irqrestore(&c->vc.lock, flags); 451 spin_unlock_irqrestore(&c->vc.lock, flags);
455} 452}
456 453
457/*
458 * This callback schedules all pending channels. We could be more
459 * clever here by postponing allocation of the real DMA channels to
460 * this point, and freeing them when our virtual channel becomes idle.
461 *
462 * We would then need to deal with 'all channels in-use'
463 */
464static void omap_dma_sched(unsigned long data)
465{
466 struct omap_dmadev *d = (struct omap_dmadev *)data;
467 LIST_HEAD(head);
468
469 spin_lock_irq(&d->lock);
470 list_splice_tail_init(&d->pending, &head);
471 spin_unlock_irq(&d->lock);
472
473 while (!list_empty(&head)) {
474 struct omap_chan *c = list_first_entry(&head,
475 struct omap_chan, node);
476
477 spin_lock_irq(&c->vc.lock);
478 list_del_init(&c->node);
479 omap_dma_start_desc(c);
480 spin_unlock_irq(&c->vc.lock);
481 }
482}
483
484static irqreturn_t omap_dma_irq(int irq, void *devid) 454static irqreturn_t omap_dma_irq(int irq, void *devid)
485{ 455{
486 struct omap_dmadev *od = devid; 456 struct omap_dmadev *od = devid;
@@ -703,8 +673,14 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
703 struct omap_chan *c = to_omap_dma_chan(chan); 673 struct omap_chan *c = to_omap_dma_chan(chan);
704 struct virt_dma_desc *vd; 674 struct virt_dma_desc *vd;
705 enum dma_status ret; 675 enum dma_status ret;
676 uint32_t ccr;
706 unsigned long flags; 677 unsigned long flags;
707 678
679 ccr = omap_dma_chan_read(c, CCR);
680 /* The channel is no longer active, handle the completion right away */
681 if (!(ccr & CCR_ENABLE))
682 omap_dma_callback(c->dma_ch, 0, c);
683
708 ret = dma_cookie_status(chan, cookie, txstate); 684 ret = dma_cookie_status(chan, cookie, txstate);
709 if (ret == DMA_COMPLETE || !txstate) 685 if (ret == DMA_COMPLETE || !txstate)
710 return ret; 686 return ret;
@@ -719,7 +695,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
719 695
720 if (d->dir == DMA_MEM_TO_DEV) 696 if (d->dir == DMA_MEM_TO_DEV)
721 pos = omap_dma_get_src_pos(c); 697 pos = omap_dma_get_src_pos(c);
722 else if (d->dir == DMA_DEV_TO_MEM) 698 else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
723 pos = omap_dma_get_dst_pos(c); 699 pos = omap_dma_get_dst_pos(c);
724 else 700 else
725 pos = 0; 701 pos = 0;
@@ -739,22 +715,8 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
739 unsigned long flags; 715 unsigned long flags;
740 716
741 spin_lock_irqsave(&c->vc.lock, flags); 717 spin_lock_irqsave(&c->vc.lock, flags);
742 if (vchan_issue_pending(&c->vc) && !c->desc) { 718 if (vchan_issue_pending(&c->vc) && !c->desc)
743 /* 719 omap_dma_start_desc(c);
744 * c->cyclic is used only by audio and in this case the DMA need
745 * to be started without delay.
746 */
747 if (!c->cyclic) {
748 struct omap_dmadev *d = to_omap_dma_dev(chan->device);
749 spin_lock(&d->lock);
750 if (list_empty(&c->node))
751 list_add_tail(&c->node, &d->pending);
752 spin_unlock(&d->lock);
753 tasklet_schedule(&d->task);
754 } else {
755 omap_dma_start_desc(c);
756 }
757 }
758 spin_unlock_irqrestore(&c->vc.lock, flags); 720 spin_unlock_irqrestore(&c->vc.lock, flags);
759} 721}
760 722
@@ -768,7 +730,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
768 struct scatterlist *sgent; 730 struct scatterlist *sgent;
769 struct omap_desc *d; 731 struct omap_desc *d;
770 dma_addr_t dev_addr; 732 dma_addr_t dev_addr;
771 unsigned i, j = 0, es, en, frame_bytes; 733 unsigned i, es, en, frame_bytes;
772 u32 burst; 734 u32 burst;
773 735
774 if (dir == DMA_DEV_TO_MEM) { 736 if (dir == DMA_DEV_TO_MEM) {
@@ -845,13 +807,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
845 en = burst; 807 en = burst;
846 frame_bytes = es_bytes[es] * en; 808 frame_bytes = es_bytes[es] * en;
847 for_each_sg(sgl, sgent, sglen, i) { 809 for_each_sg(sgl, sgent, sglen, i) {
848 d->sg[j].addr = sg_dma_address(sgent); 810 d->sg[i].addr = sg_dma_address(sgent);
849 d->sg[j].en = en; 811 d->sg[i].en = en;
850 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; 812 d->sg[i].fn = sg_dma_len(sgent) / frame_bytes;
851 j++;
852 } 813 }
853 814
854 d->sglen = j; 815 d->sglen = sglen;
855 816
856 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 817 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
857} 818}
@@ -1018,17 +979,11 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config
1018static int omap_dma_terminate_all(struct dma_chan *chan) 979static int omap_dma_terminate_all(struct dma_chan *chan)
1019{ 980{
1020 struct omap_chan *c = to_omap_dma_chan(chan); 981 struct omap_chan *c = to_omap_dma_chan(chan);
1021 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
1022 unsigned long flags; 982 unsigned long flags;
1023 LIST_HEAD(head); 983 LIST_HEAD(head);
1024 984
1025 spin_lock_irqsave(&c->vc.lock, flags); 985 spin_lock_irqsave(&c->vc.lock, flags);
1026 986
1027 /* Prevent this channel being scheduled */
1028 spin_lock(&d->lock);
1029 list_del_init(&c->node);
1030 spin_unlock(&d->lock);
1031
1032 /* 987 /*
1033 * Stop DMA activity: we assume the callback will not be called 988 * Stop DMA activity: we assume the callback will not be called
1034 * after omap_dma_stop() returns (even if it does, it will see 989 * after omap_dma_stop() returns (even if it does, it will see
@@ -1102,14 +1057,12 @@ static int omap_dma_chan_init(struct omap_dmadev *od)
1102 c->reg_map = od->reg_map; 1057 c->reg_map = od->reg_map;
1103 c->vc.desc_free = omap_dma_desc_free; 1058 c->vc.desc_free = omap_dma_desc_free;
1104 vchan_init(&c->vc, &od->ddev); 1059 vchan_init(&c->vc, &od->ddev);
1105 INIT_LIST_HEAD(&c->node);
1106 1060
1107 return 0; 1061 return 0;
1108} 1062}
1109 1063
1110static void omap_dma_free(struct omap_dmadev *od) 1064static void omap_dma_free(struct omap_dmadev *od)
1111{ 1065{
1112 tasklet_kill(&od->task);
1113 while (!list_empty(&od->ddev.channels)) { 1066 while (!list_empty(&od->ddev.channels)) {
1114 struct omap_chan *c = list_first_entry(&od->ddev.channels, 1067 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1115 struct omap_chan, vc.chan.device_node); 1068 struct omap_chan, vc.chan.device_node);
@@ -1165,12 +1118,9 @@ static int omap_dma_probe(struct platform_device *pdev)
1165 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1118 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1166 od->ddev.dev = &pdev->dev; 1119 od->ddev.dev = &pdev->dev;
1167 INIT_LIST_HEAD(&od->ddev.channels); 1120 INIT_LIST_HEAD(&od->ddev.channels);
1168 INIT_LIST_HEAD(&od->pending);
1169 spin_lock_init(&od->lock); 1121 spin_lock_init(&od->lock);
1170 spin_lock_init(&od->irq_lock); 1122 spin_lock_init(&od->irq_lock);
1171 1123
1172 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
1173
1174 od->dma_requests = OMAP_SDMA_REQUESTS; 1124 od->dma_requests = OMAP_SDMA_REQUESTS;
1175 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, 1125 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1176 "dma-requests", 1126 "dma-requests",
@@ -1203,6 +1153,10 @@ static int omap_dma_probe(struct platform_device *pdev)
1203 return rc; 1153 return rc;
1204 } 1154 }
1205 1155
1156 od->ddev.filter.map = od->plat->slave_map;
1157 od->ddev.filter.mapcnt = od->plat->slavecnt;
1158 od->ddev.filter.fn = omap_dma_filter_fn;
1159
1206 rc = dma_async_device_register(&od->ddev); 1160 rc = dma_async_device_register(&od->ddev);
1207 if (rc) { 1161 if (rc) {
1208 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 1162 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index fc4156afa070..f2a0310ae771 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1414,6 +1414,7 @@ static int pxad_probe(struct platform_device *op)
1414 pdev->slave.dst_addr_widths = widths; 1414 pdev->slave.dst_addr_widths = widths;
1415 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); 1415 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1416 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1416 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1417 pdev->slave.descriptor_reuse = true;
1417 1418
1418 pdev->slave.dev = &op->dev; 1419 pdev->slave.dev = &op->dev;
1419 ret = pxad_init_dmadev(op, pdev, dma_channels); 1420 ret = pxad_init_dmadev(op, pdev, dma_channels);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 9fda65af841e..f32c430eb16c 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -47,12 +47,6 @@ config RCAR_DMAC
47 This driver supports the general purpose DMA controller found in the 47 This driver supports the general purpose DMA controller found in the
48 Renesas R-Car second generation SoCs. 48 Renesas R-Car second generation SoCs.
49 49
50config RCAR_HPB_DMAE
51 tristate "Renesas R-Car HPB DMAC support"
52 depends on SH_DMAE_BASE
53 help
54 Enable support for the Renesas R-Car series DMA controllers.
55
56config RENESAS_USB_DMAC 50config RENESAS_USB_DMAC
57 tristate "Renesas USB-DMA Controller" 51 tristate "Renesas USB-DMA Controller"
58 depends on ARCH_SHMOBILE || COMPILE_TEST 52 depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 0133e4658196..f1e2fd64f279 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -14,6 +14,5 @@ shdma-objs := $(shdma-y)
14obj-$(CONFIG_SH_DMAE) += shdma.o 14obj-$(CONFIG_SH_DMAE) += shdma.o
15 15
16obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o 16obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
18obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o 17obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
19obj-$(CONFIG_SUDMAC) += sudmac.o 18obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
deleted file mode 100644
index 749f26ecd3b3..000000000000
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ /dev/null
@@ -1,669 +0,0 @@
1/*
2 * Copyright (C) 2011-2013 Renesas Electronics Corporation
3 * Copyright (C) 2013 Cogent Embedded, Inc.
4 *
5 * This file is based on the drivers/dma/sh/shdma.c
6 *
7 * Renesas SuperH DMA Engine support
8 *
9 * This is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * - DMA of SuperH does not have Hardware DMA chain mode.
15 * - max DMA size is 16MB.
16 *
17 */
18
19#include <linux/dmaengine.h>
20#include <linux/delay.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/platform_data/dma-rcar-hpbdma.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/shdma-base.h>
29#include <linux/slab.h>
30
31/* DMA channel registers */
32#define HPB_DMAE_DSAR0 0x00
33#define HPB_DMAE_DDAR0 0x04
34#define HPB_DMAE_DTCR0 0x08
35#define HPB_DMAE_DSAR1 0x0C
36#define HPB_DMAE_DDAR1 0x10
37#define HPB_DMAE_DTCR1 0x14
38#define HPB_DMAE_DSASR 0x18
39#define HPB_DMAE_DDASR 0x1C
40#define HPB_DMAE_DTCSR 0x20
41#define HPB_DMAE_DPTR 0x24
42#define HPB_DMAE_DCR 0x28
43#define HPB_DMAE_DCMDR 0x2C
44#define HPB_DMAE_DSTPR 0x30
45#define HPB_DMAE_DSTSR 0x34
46#define HPB_DMAE_DDBGR 0x38
47#define HPB_DMAE_DDBGR2 0x3C
48#define HPB_DMAE_CHAN(n) (0x40 * (n))
49
50/* DMA command register (DCMDR) bits */
51#define HPB_DMAE_DCMDR_BDOUT BIT(7)
52#define HPB_DMAE_DCMDR_DQSPD BIT(6)
53#define HPB_DMAE_DCMDR_DQSPC BIT(5)
54#define HPB_DMAE_DCMDR_DMSPD BIT(4)
55#define HPB_DMAE_DCMDR_DMSPC BIT(3)
56#define HPB_DMAE_DCMDR_DQEND BIT(2)
57#define HPB_DMAE_DCMDR_DNXT BIT(1)
58#define HPB_DMAE_DCMDR_DMEN BIT(0)
59
60/* DMA forced stop register (DSTPR) bits */
61#define HPB_DMAE_DSTPR_DMSTP BIT(0)
62
63/* DMA status register (DSTSR) bits */
64#define HPB_DMAE_DSTSR_DQSTS BIT(2)
65#define HPB_DMAE_DSTSR_DMSTS BIT(0)
66
67/* DMA common registers */
68#define HPB_DMAE_DTIMR 0x00
69#define HPB_DMAE_DINTSR0 0x0C
70#define HPB_DMAE_DINTSR1 0x10
71#define HPB_DMAE_DINTCR0 0x14
72#define HPB_DMAE_DINTCR1 0x18
73#define HPB_DMAE_DINTMR0 0x1C
74#define HPB_DMAE_DINTMR1 0x20
75#define HPB_DMAE_DACTSR0 0x24
76#define HPB_DMAE_DACTSR1 0x28
77#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
78#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
79#define HPB_DMAE_HPB_DMLVLR0 0x160
80#define HPB_DMAE_HPB_DMLVLR1 0x164
81#define HPB_DMAE_HPB_DMSHPT0 0x168
82#define HPB_DMAE_HPB_DMSHPT1 0x16C
83
84#define HPB_DMA_SLAVE_NUMBER 256
85#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
86
87struct hpb_dmae_chan {
88 struct shdma_chan shdma_chan;
89 int xfer_mode; /* DMA transfer mode */
90#define XFER_SINGLE 1
91#define XFER_DOUBLE 2
92 unsigned plane_idx; /* current DMA information set */
93 bool first_desc; /* first/next transfer */
94 int xmit_shift; /* log_2(bytes_per_xfer) */
95 void __iomem *base;
96 const struct hpb_dmae_slave_config *cfg;
97 char dev_id[16]; /* unique name per DMAC of channel */
98 dma_addr_t slave_addr;
99};
100
101struct hpb_dmae_device {
102 struct shdma_dev shdma_dev;
103 spinlock_t reg_lock; /* comm_reg operation lock */
104 struct hpb_dmae_pdata *pdata;
105 void __iomem *chan_reg;
106 void __iomem *comm_reg;
107 void __iomem *reset_reg;
108 void __iomem *mode_reg;
109};
110
111struct hpb_dmae_regs {
112 u32 sar; /* SAR / source address */
113 u32 dar; /* DAR / destination address */
114 u32 tcr; /* TCR / transfer count */
115};
116
117struct hpb_desc {
118 struct shdma_desc shdma_desc;
119 struct hpb_dmae_regs hw;
120 unsigned plane_idx;
121};
122
123#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
124#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
125#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
126 struct hpb_dmae_device, shdma_dev.dma_dev)
127
128static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
129{
130 iowrite32(data, hpb_dc->base + reg);
131}
132
133static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
134{
135 return ioread32(hpb_dc->base + reg);
136}
137
138static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
139{
140 iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
141}
142
143static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
144{
145 iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
146}
147
148static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
149{
150 u32 v;
151
152 if (ch < 32)
153 v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
154 else
155 v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
156 return v & 0x1;
157}
158
159static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
160{
161 if (ch < 32)
162 iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
163 else
164 iowrite32((0x1 << (ch - 32)),
165 hpbdev->comm_reg + HPB_DMAE_DINTCR1);
166}
167
168static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
169{
170 iowrite32(data, hpbdev->mode_reg);
171}
172
173static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
174{
175 return ioread32(hpbdev->mode_reg);
176}
177
178static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
179{
180 u32 intreg;
181
182 spin_lock_irq(&hpbdev->reg_lock);
183 if (ch < 32) {
184 intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
185 iowrite32(BIT(ch) | intreg,
186 hpbdev->comm_reg + HPB_DMAE_DINTMR0);
187 } else {
188 intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
189 iowrite32(BIT(ch - 32) | intreg,
190 hpbdev->comm_reg + HPB_DMAE_DINTMR1);
191 }
192 spin_unlock_irq(&hpbdev->reg_lock);
193}
194
195static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
196{
197 u32 rstr;
198 int timeout = 10000; /* 100 ms */
199
200 spin_lock(&hpbdev->reg_lock);
201 rstr = ioread32(hpbdev->reset_reg);
202 rstr |= data;
203 iowrite32(rstr, hpbdev->reset_reg);
204 do {
205 rstr = ioread32(hpbdev->reset_reg);
206 if ((rstr & data) == data)
207 break;
208 udelay(10);
209 } while (timeout--);
210
211 if (timeout < 0)
212 dev_err(hpbdev->shdma_dev.dma_dev.dev,
213 "%s timeout\n", __func__);
214
215 rstr &= ~data;
216 iowrite32(rstr, hpbdev->reset_reg);
217 spin_unlock(&hpbdev->reg_lock);
218}
219
220static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
221 u32 mask, u32 data)
222{
223 u32 mode;
224
225 spin_lock_irq(&hpbdev->reg_lock);
226 mode = asyncmdr_read(hpbdev);
227 mode &= ~mask;
228 mode |= data;
229 asyncmdr_write(hpbdev, mode);
230 spin_unlock_irq(&hpbdev->reg_lock);
231}
232
233static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
234{
235 dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
236}
237
238static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
239{
240 u32 ch;
241
242 for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
243 hsrstr_write(hpbdev, ch);
244}
245
246static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
247{
248 struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
249 struct hpb_dmae_pdata *pdata = hpbdev->pdata;
250 int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
251 int i;
252
253 switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
254 case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
255 default:
256 i = XMIT_SZ_8BIT;
257 break;
258 case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
259 i = XMIT_SZ_16BIT;
260 break;
261 case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
262 i = XMIT_SZ_32BIT;
263 break;
264 }
265 return pdata->ts_shift[i];
266}
267
268static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
269 struct hpb_dmae_regs *hw, unsigned plane)
270{
271 ch_reg_write(hpb_chan, hw->sar,
272 plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
273 ch_reg_write(hpb_chan, hw->dar,
274 plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
275 ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
276 plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
277}
278
279static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
280{
281 ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
282 HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
283}
284
285static void hpb_dmae_halt(struct shdma_chan *schan)
286{
287 struct hpb_dmae_chan *chan = to_chan(schan);
288
289 ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
290 ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
291
292 chan->plane_idx = 0;
293 chan->first_desc = true;
294}
295
296static const struct hpb_dmae_slave_config *
297hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
298{
299 struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
300 struct hpb_dmae_pdata *pdata = hpbdev->pdata;
301 int i;
302
303 if (slave_id >= HPB_DMA_SLAVE_NUMBER)
304 return NULL;
305
306 for (i = 0; i < pdata->num_slaves; i++)
307 if (pdata->slaves[i].id == slave_id)
308 return pdata->slaves + i;
309
310 return NULL;
311}
312
313static void hpb_dmae_start_xfer(struct shdma_chan *schan,
314 struct shdma_desc *sdesc)
315{
316 struct hpb_dmae_chan *chan = to_chan(schan);
317 struct hpb_dmae_device *hpbdev = to_dev(chan);
318 struct hpb_desc *desc = to_desc(sdesc);
319
320 if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
321 hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
322
323 desc->plane_idx = chan->plane_idx;
324 hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
325 hpb_dmae_start(chan, !chan->first_desc);
326
327 if (chan->xfer_mode == XFER_DOUBLE) {
328 chan->plane_idx ^= 1;
329 chan->first_desc = false;
330 }
331}
332
333static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
334 struct shdma_desc *sdesc)
335{
336 /*
337 * This is correct since we always have at most single
338 * outstanding DMA transfer per channel, and by the time
339 * we get completion interrupt the transfer is completed.
340 * This will change if we ever use alternating DMA
341 * information sets and submit two descriptors at once.
342 */
343 return true;
344}
345
346static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
347{
348 struct hpb_dmae_chan *chan = to_chan(schan);
349 struct hpb_dmae_device *hpbdev = to_dev(chan);
350 int ch = chan->cfg->dma_ch;
351
352 /* Check Complete DMA Transfer */
353 if (dintsr_read(hpbdev, ch)) {
354 /* Clear Interrupt status */
355 dintcr_write(hpbdev, ch);
356 return true;
357 }
358 return false;
359}
360
361static int hpb_dmae_desc_setup(struct shdma_chan *schan,
362 struct shdma_desc *sdesc,
363 dma_addr_t src, dma_addr_t dst, size_t *len)
364{
365 struct hpb_desc *desc = to_desc(sdesc);
366
367 if (*len > (size_t)HPB_DMA_TCR_MAX)
368 *len = (size_t)HPB_DMA_TCR_MAX;
369
370 desc->hw.sar = src;
371 desc->hw.dar = dst;
372 desc->hw.tcr = *len;
373
374 return 0;
375}
376
377static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
378 struct shdma_desc *sdesc)
379{
380 struct hpb_desc *desc = to_desc(sdesc);
381 struct hpb_dmae_chan *chan = to_chan(schan);
382 u32 tcr = ch_reg_read(chan, desc->plane_idx ?
383 HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
384
385 return (desc->hw.tcr - tcr) << chan->xmit_shift;
386}
387
388static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
389{
390 struct hpb_dmae_chan *chan = to_chan(schan);
391 u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
392
393 if (chan->xfer_mode == XFER_DOUBLE)
394 return dstsr & HPB_DMAE_DSTSR_DQSTS;
395 else
396 return dstsr & HPB_DMAE_DSTSR_DMSTS;
397}
398
399static int
400hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
401 const struct hpb_dmae_slave_config *cfg)
402{
403 struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
404 struct hpb_dmae_pdata *pdata = hpbdev->pdata;
405 const struct hpb_dmae_channel *channel = pdata->channels;
406 int slave_id = cfg->id;
407 int i, err;
408
409 for (i = 0; i < pdata->num_channels; i++, channel++) {
410 if (channel->s_id == slave_id) {
411 struct device *dev = hpb_chan->shdma_chan.dev;
412
413 hpb_chan->base = hpbdev->chan_reg +
414 HPB_DMAE_CHAN(cfg->dma_ch);
415
416 dev_dbg(dev, "Detected Slave device\n");
417 dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
418 dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
419 dev_dbg(dev, " -- channel->ch_irq: %d\n",
420 channel->ch_irq);
421 break;
422 }
423 }
424
425 err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
426 IRQF_SHARED, hpb_chan->dev_id);
427 if (err) {
428 dev_err(hpb_chan->shdma_chan.dev,
429 "DMA channel request_irq %d failed with error %d\n",
430 channel->ch_irq, err);
431 return err;
432 }
433
434 hpb_chan->plane_idx = 0;
435 hpb_chan->first_desc = true;
436
437 if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
438 hpb_chan->xfer_mode = XFER_SINGLE;
439 } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
440 (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
441 hpb_chan->xfer_mode = XFER_DOUBLE;
442 } else {
443 dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
444 return -EINVAL;
445 }
446
447 if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
448 hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
449 ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
450 ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
451 hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
452 hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
453
454 return 0;
455}
456
457static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
458 dma_addr_t slave_addr, bool try)
459{
460 struct hpb_dmae_chan *chan = to_chan(schan);
461 const struct hpb_dmae_slave_config *sc =
462 hpb_dmae_find_slave(chan, slave_id);
463
464 if (!sc)
465 return -ENODEV;
466 if (try)
467 return 0;
468 chan->cfg = sc;
469 chan->slave_addr = slave_addr ? : sc->addr;
470 return hpb_dmae_alloc_chan_resources(chan, sc);
471}
472
473static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
474{
475}
476
477static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
478{
479 struct hpb_dmae_chan *chan = to_chan(schan);
480
481 return chan->slave_addr;
482}
483
484static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
485{
486 return &((struct hpb_desc *)buf)[i].shdma_desc;
487}
488
489static const struct shdma_ops hpb_dmae_ops = {
490 .desc_completed = hpb_dmae_desc_completed,
491 .halt_channel = hpb_dmae_halt,
492 .channel_busy = hpb_dmae_channel_busy,
493 .slave_addr = hpb_dmae_slave_addr,
494 .desc_setup = hpb_dmae_desc_setup,
495 .set_slave = hpb_dmae_set_slave,
496 .setup_xfer = hpb_dmae_setup_xfer,
497 .start_xfer = hpb_dmae_start_xfer,
498 .embedded_desc = hpb_dmae_embedded_desc,
499 .chan_irq = hpb_dmae_chan_irq,
500 .get_partial = hpb_dmae_get_partial,
501};
502
503static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
504{
505 struct shdma_dev *sdev = &hpbdev->shdma_dev;
506 struct platform_device *pdev =
507 to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
508 struct hpb_dmae_chan *new_hpb_chan;
509 struct shdma_chan *schan;
510
511 /* Alloc channel */
512 new_hpb_chan = devm_kzalloc(&pdev->dev,
513 sizeof(struct hpb_dmae_chan), GFP_KERNEL);
514 if (!new_hpb_chan) {
515 dev_err(hpbdev->shdma_dev.dma_dev.dev,
516 "No free memory for allocating DMA channels!\n");
517 return -ENOMEM;
518 }
519
520 schan = &new_hpb_chan->shdma_chan;
521 schan->max_xfer_len = HPB_DMA_TCR_MAX;
522
523 shdma_chan_probe(sdev, schan, id);
524
525 if (pdev->id >= 0)
526 snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
527 "hpb-dmae%d.%d", pdev->id, id);
528 else
529 snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
530 "hpb-dma.%d", id);
531
532 return 0;
533}
534
535static int hpb_dmae_probe(struct platform_device *pdev)
536{
537 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
538 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
539 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
540 struct hpb_dmae_device *hpbdev;
541 struct dma_device *dma_dev;
542 struct resource *chan, *comm, *rest, *mode, *irq_res;
543 int err, i;
544
545 /* Get platform data */
546 if (!pdata || !pdata->num_channels)
547 return -ENODEV;
548
549 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
550 comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
551 rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
552 mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
553
554 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
555 if (!irq_res)
556 return -ENODEV;
557
558 hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
559 GFP_KERNEL);
560 if (!hpbdev) {
561 dev_err(&pdev->dev, "Not enough memory\n");
562 return -ENOMEM;
563 }
564
565 hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
566 if (IS_ERR(hpbdev->chan_reg))
567 return PTR_ERR(hpbdev->chan_reg);
568
569 hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
570 if (IS_ERR(hpbdev->comm_reg))
571 return PTR_ERR(hpbdev->comm_reg);
572
573 hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
574 if (IS_ERR(hpbdev->reset_reg))
575 return PTR_ERR(hpbdev->reset_reg);
576
577 hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
578 if (IS_ERR(hpbdev->mode_reg))
579 return PTR_ERR(hpbdev->mode_reg);
580
581 dma_dev = &hpbdev->shdma_dev.dma_dev;
582
583 spin_lock_init(&hpbdev->reg_lock);
584
585 /* Platform data */
586 hpbdev->pdata = pdata;
587
588 pm_runtime_enable(&pdev->dev);
589 err = pm_runtime_get_sync(&pdev->dev);
590 if (err < 0)
591 dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
592
593 /* Reset DMA controller */
594 hpb_dmae_reset(hpbdev);
595
596 pm_runtime_put(&pdev->dev);
597
598 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
599 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
600 dma_dev->src_addr_widths = widths;
601 dma_dev->dst_addr_widths = widths;
602 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
603 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
604
605 hpbdev->shdma_dev.ops = &hpb_dmae_ops;
606 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
607 err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
608 if (err < 0)
609 goto error;
610
611 /* Create DMA channels */
612 for (i = 0; i < pdata->num_channels; i++)
613 hpb_dmae_chan_probe(hpbdev, i);
614
615 platform_set_drvdata(pdev, hpbdev);
616 err = dma_async_device_register(dma_dev);
617 if (!err)
618 return 0;
619
620 shdma_cleanup(&hpbdev->shdma_dev);
621error:
622 pm_runtime_disable(&pdev->dev);
623 return err;
624}
625
626static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
627{
628 struct shdma_chan *schan;
629 int i;
630
631 shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
632 BUG_ON(!schan);
633
634 shdma_chan_remove(schan);
635 }
636}
637
638static int hpb_dmae_remove(struct platform_device *pdev)
639{
640 struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
641
642 dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
643
644 pm_runtime_disable(&pdev->dev);
645
646 hpb_dmae_chan_remove(hpbdev);
647
648 return 0;
649}
650
651static void hpb_dmae_shutdown(struct platform_device *pdev)
652{
653 struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
654 hpb_dmae_ctl_stop(hpbdev);
655}
656
657static struct platform_driver hpb_dmae_driver = {
658 .probe = hpb_dmae_probe,
659 .remove = hpb_dmae_remove,
660 .shutdown = hpb_dmae_shutdown,
661 .driver = {
662 .name = "hpb-dma-engine",
663 },
664};
665module_platform_driver(hpb_dmae_driver);
666
667MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
668MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
669MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f398b0..16fb33006a17 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -448,7 +448,7 @@ usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
448static int usb_dmac_chan_terminate_all(struct dma_chan *chan) 448static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
449{ 449{
450 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); 450 struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
451 struct usb_dmac_desc *desc; 451 struct usb_dmac_desc *desc, *_desc;
452 unsigned long flags; 452 unsigned long flags;
453 LIST_HEAD(head); 453 LIST_HEAD(head);
454 LIST_HEAD(list); 454 LIST_HEAD(list);
@@ -459,7 +459,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
459 if (uchan->desc) 459 if (uchan->desc)
460 uchan->desc = NULL; 460 uchan->desc = NULL;
461 list_splice_init(&uchan->desc_got, &list); 461 list_splice_init(&uchan->desc_got, &list);
462 list_for_each_entry(desc, &list, node) 462 list_for_each_entry_safe(desc, _desc, &list, node)
463 list_move_tail(&desc->node, &uchan->desc_freed); 463 list_move_tail(&desc->node, &uchan->desc_freed);
464 spin_unlock_irqrestore(&uchan->vc.lock, flags); 464 spin_unlock_irqrestore(&uchan->vc.lock, flags);
465 vchan_dma_desc_free_list(&uchan->vc, &head); 465 vchan_dma_desc_free_list(&uchan->vc, &head);
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 6f80432a3f0a..a35c211857dd 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_add_tail(&vd->node, &vc->desc_submitted); 32 list_move_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
39} 39}
40EXPORT_SYMBOL_GPL(vchan_tx_submit); 40EXPORT_SYMBOL_GPL(vchan_tx_submit);
41 41
42/**
43 * vchan_tx_desc_free - free a reusable descriptor
44 * @tx: the transfer
45 *
46 * This function frees a previously allocated reusable descriptor. The only
47 * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
48 * transfer.
49 *
50 * Returns 0 upon success
51 */
52int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
53{
54 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
55 struct virt_dma_desc *vd = to_virt_desc(tx);
56 unsigned long flags;
57
58 spin_lock_irqsave(&vc->lock, flags);
59 list_del(&vd->node);
60 spin_unlock_irqrestore(&vc->lock, flags);
61
62 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
63 vc, vd, vd->tx.cookie);
64 vc->desc_free(vd);
65 return 0;
66}
67EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
68
42struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, 69struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
43 dma_cookie_t cookie) 70 dma_cookie_t cookie)
44{ 71{
@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 110 cb_data = vd->tx.callback_param;
84 111
85 list_del(&vd->node); 112 list_del(&vd->node);
86 113 if (dmaengine_desc_test_reuse(&vd->tx))
87 vc->desc_free(vd); 114 list_add(&vd->node, &vc->desc_allocated);
115 else
116 vc->desc_free(vd);
88 117
89 if (cb) 118 if (cb)
90 cb(cb_data); 119 cb(cb_data);
@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
96 while (!list_empty(head)) { 125 while (!list_empty(head)) {
97 struct virt_dma_desc *vd = list_first_entry(head, 126 struct virt_dma_desc *vd = list_first_entry(head,
98 struct virt_dma_desc, node); 127 struct virt_dma_desc, node);
99 list_del(&vd->node); 128 if (dmaengine_desc_test_reuse(&vd->tx)) {
100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); 129 list_move_tail(&vd->node, &vc->desc_allocated);
101 vc->desc_free(vd); 130 } else {
131 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
132 list_del(&vd->node);
133 vc->desc_free(vd);
134 }
102 } 135 }
103} 136}
104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 137EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
108 dma_cookie_init(&vc->chan); 141 dma_cookie_init(&vc->chan);
109 142
110 spin_lock_init(&vc->lock); 143 spin_lock_init(&vc->lock);
144 INIT_LIST_HEAD(&vc->desc_allocated);
111 INIT_LIST_HEAD(&vc->desc_submitted); 145 INIT_LIST_HEAD(&vc->desc_submitted);
112 INIT_LIST_HEAD(&vc->desc_issued); 146 INIT_LIST_HEAD(&vc->desc_issued);
113 INIT_LIST_HEAD(&vc->desc_completed); 147 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 2fa47745a41f..d9731ca5e262 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,6 +29,7 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
32 struct list_head desc_submitted; 33 struct list_head desc_submitted;
33 struct list_head desc_issued; 34 struct list_head desc_issued;
34 struct list_head desc_completed; 35 struct list_head desc_completed;
@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
55 struct virt_dma_desc *vd, unsigned long tx_flags) 56 struct virt_dma_desc *vd, unsigned long tx_flags)
56{ 57{
57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
60 unsigned long flags;
58 61
59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 62 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
60 vd->tx.flags = tx_flags; 63 vd->tx.flags = tx_flags;
61 vd->tx.tx_submit = vchan_tx_submit; 64 vd->tx.tx_submit = vchan_tx_submit;
65 vd->tx.desc_free = vchan_tx_desc_free;
66
67 spin_lock_irqsave(&vc->lock, flags);
68 list_add_tail(&vd->node, &vc->desc_allocated);
69 spin_unlock_irqrestore(&vc->lock, flags);
62 70
63 return &vd->tx; 71 return &vd->tx;
64} 72}
@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 142static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
135 struct list_head *head) 143 struct list_head *head)
136{ 144{
145 list_splice_tail_init(&vc->desc_allocated, head);
137 list_splice_tail_init(&vc->desc_submitted, head); 146 list_splice_tail_init(&vc->desc_submitted, head);
138 list_splice_tail_init(&vc->desc_issued, head); 147 list_splice_tail_init(&vc->desc_issued, head);
139 list_splice_tail_init(&vc->desc_completed, head); 148 list_splice_tail_init(&vc->desc_completed, head);
@@ -141,14 +150,30 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
141 150
142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 151static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
143{ 152{
153 struct virt_dma_desc *vd;
144 unsigned long flags; 154 unsigned long flags;
145 LIST_HEAD(head); 155 LIST_HEAD(head);
146 156
147 spin_lock_irqsave(&vc->lock, flags); 157 spin_lock_irqsave(&vc->lock, flags);
148 vchan_get_all_descriptors(vc, &head); 158 vchan_get_all_descriptors(vc, &head);
159 list_for_each_entry(vd, &head, node)
160 dmaengine_desc_clear_reuse(&vd->tx);
149 spin_unlock_irqrestore(&vc->lock, flags); 161 spin_unlock_irqrestore(&vc->lock, flags);
150 162
151 vchan_dma_desc_free_list(vc, &head); 163 vchan_dma_desc_free_list(vc, &head);
152} 164}
153 165
166/**
167 * vchan_synchronize() - synchronize callback execution to the current context
168 * @vc: virtual channel to synchronize
169 *
170 * Makes sure that all scheduled or active callbacks have finished running. For
171 * proper operation the caller has to ensure that no new callbacks are scheduled
172 * after the invocation of this function started.
173 */
174static inline void vchan_synchronize(struct virt_dma_chan *vc)
175{
176 tasklet_kill(&vc->task);
177}
178
154#endif 179#endif
diff --git a/include/linux/dca.h b/include/linux/dca.h
index d27a7a05718d..ad956c2e07a8 100644
--- a/include/linux/dca.h
+++ b/include/linux/dca.h
@@ -34,7 +34,7 @@ void dca_unregister_notify(struct notifier_block *nb);
34 34
35struct dca_provider { 35struct dca_provider {
36 struct list_head node; 36 struct list_head node;
37 struct dca_ops *ops; 37 const struct dca_ops *ops;
38 struct device *cd; 38 struct device *cd;
39 int id; 39 int id;
40}; 40};
@@ -53,7 +53,8 @@ struct dca_ops {
53 int (*dev_managed) (struct dca_provider *, struct device *); 53 int (*dev_managed) (struct dca_provider *, struct device *);
54}; 54};
55 55
56struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size); 56struct dca_provider *alloc_dca_provider(const struct dca_ops *ops,
57 int priv_size);
57void free_dca_provider(struct dca_provider *dca); 58void free_dca_provider(struct dca_provider *dca);
58int register_dca_provider(struct dca_provider *dca, struct device *dev); 59int register_dca_provider(struct dca_provider *dca, struct device *dev);
59void unregister_dca_provider(struct dca_provider *dca, struct device *dev); 60void unregister_dca_provider(struct dca_provider *dca, struct device *dev);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c47c68e535e8..16a1cad30c33 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -607,11 +607,38 @@ enum dmaengine_alignment {
607}; 607};
608 608
609/** 609/**
610 * struct dma_slave_map - associates slave device and it's slave channel with
611 * parameter to be used by a filter function
612 * @devname: name of the device
613 * @slave: slave channel name
614 * @param: opaque parameter to pass to struct dma_filter.fn
615 */
616struct dma_slave_map {
617 const char *devname;
618 const char *slave;
619 void *param;
620};
621
622/**
623 * struct dma_filter - information for slave device/channel to filter_fn/param
624 * mapping
625 * @fn: filter function callback
626 * @mapcnt: number of slave device/channel in the map
627 * @map: array of channel to filter mapping data
628 */
629struct dma_filter {
630 dma_filter_fn fn;
631 int mapcnt;
632 const struct dma_slave_map *map;
633};
634
635/**
610 * struct dma_device - info on the entity supplying DMA services 636 * struct dma_device - info on the entity supplying DMA services
611 * @chancnt: how many DMA channels are supported 637 * @chancnt: how many DMA channels are supported
612 * @privatecnt: how many DMA channels are requested by dma_request_channel 638 * @privatecnt: how many DMA channels are requested by dma_request_channel
613 * @channels: the list of struct dma_chan 639 * @channels: the list of struct dma_chan
614 * @global_node: list_head for global dma_device_list 640 * @global_node: list_head for global dma_device_list
641 * @filter: information for device/slave to filter function/param mapping
615 * @cap_mask: one or more dma_capability flags 642 * @cap_mask: one or more dma_capability flags
616 * @max_xor: maximum number of xor sources, 0 if no capability 643 * @max_xor: maximum number of xor sources, 0 if no capability
617 * @max_pq: maximum number of PQ sources and PQ-continue capability 644 * @max_pq: maximum number of PQ sources and PQ-continue capability
@@ -654,11 +681,14 @@ enum dmaengine_alignment {
654 * paused. Returns 0 or an error code 681 * paused. Returns 0 or an error code
655 * @device_terminate_all: Aborts all transfers on a channel. Returns 0 682 * @device_terminate_all: Aborts all transfers on a channel. Returns 0
656 * or an error code 683 * or an error code
684 * @device_synchronize: Synchronizes the termination of a transfers to the
685 * current context.
657 * @device_tx_status: poll for transaction completion, the optional 686 * @device_tx_status: poll for transaction completion, the optional
658 * txstate parameter can be supplied with a pointer to get a 687 * txstate parameter can be supplied with a pointer to get a
659 * struct with auxiliary transfer status information, otherwise the call 688 * struct with auxiliary transfer status information, otherwise the call
660 * will just return a simple status code 689 * will just return a simple status code
661 * @device_issue_pending: push pending transactions to hardware 690 * @device_issue_pending: push pending transactions to hardware
691 * @descriptor_reuse: a submitted transfer can be resubmitted after completion
662 */ 692 */
663struct dma_device { 693struct dma_device {
664 694
@@ -666,6 +696,7 @@ struct dma_device {
666 unsigned int privatecnt; 696 unsigned int privatecnt;
667 struct list_head channels; 697 struct list_head channels;
668 struct list_head global_node; 698 struct list_head global_node;
699 struct dma_filter filter;
669 dma_cap_mask_t cap_mask; 700 dma_cap_mask_t cap_mask;
670 unsigned short max_xor; 701 unsigned short max_xor;
671 unsigned short max_pq; 702 unsigned short max_pq;
@@ -681,6 +712,7 @@ struct dma_device {
681 u32 src_addr_widths; 712 u32 src_addr_widths;
682 u32 dst_addr_widths; 713 u32 dst_addr_widths;
683 u32 directions; 714 u32 directions;
715 bool descriptor_reuse;
684 enum dma_residue_granularity residue_granularity; 716 enum dma_residue_granularity residue_granularity;
685 717
686 int (*device_alloc_chan_resources)(struct dma_chan *chan); 718 int (*device_alloc_chan_resources)(struct dma_chan *chan);
@@ -737,6 +769,7 @@ struct dma_device {
737 int (*device_pause)(struct dma_chan *chan); 769 int (*device_pause)(struct dma_chan *chan);
738 int (*device_resume)(struct dma_chan *chan); 770 int (*device_resume)(struct dma_chan *chan);
739 int (*device_terminate_all)(struct dma_chan *chan); 771 int (*device_terminate_all)(struct dma_chan *chan);
772 void (*device_synchronize)(struct dma_chan *chan);
740 773
741 enum dma_status (*device_tx_status)(struct dma_chan *chan, 774 enum dma_status (*device_tx_status)(struct dma_chan *chan,
742 dma_cookie_t cookie, 775 dma_cookie_t cookie,
@@ -828,6 +861,13 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
828 src_sg, src_nents, flags); 861 src_sg, src_nents, flags);
829} 862}
830 863
864/**
865 * dmaengine_terminate_all() - Terminate all active DMA transfers
866 * @chan: The channel for which to terminate the transfers
867 *
868 * This function is DEPRECATED use either dmaengine_terminate_sync() or
869 * dmaengine_terminate_async() instead.
870 */
831static inline int dmaengine_terminate_all(struct dma_chan *chan) 871static inline int dmaengine_terminate_all(struct dma_chan *chan)
832{ 872{
833 if (chan->device->device_terminate_all) 873 if (chan->device->device_terminate_all)
@@ -836,6 +876,88 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
836 return -ENOSYS; 876 return -ENOSYS;
837} 877}
838 878
879/**
880 * dmaengine_terminate_async() - Terminate all active DMA transfers
881 * @chan: The channel for which to terminate the transfers
882 *
883 * Calling this function will terminate all active and pending descriptors
884 * that have previously been submitted to the channel. It is not guaranteed
885 * though that the transfer for the active descriptor has stopped when the
886 * function returns. Furthermore it is possible the complete callback of a
887 * submitted transfer is still running when this function returns.
888 *
889 * dmaengine_synchronize() needs to be called before it is safe to free
890 * any memory that is accessed by previously submitted descriptors or before
891 * freeing any resources accessed from within the completion callback of any
892 * perviously submitted descriptors.
893 *
894 * This function can be called from atomic context as well as from within a
895 * complete callback of a descriptor submitted on the same channel.
896 *
897 * If none of the two conditions above apply consider using
898 * dmaengine_terminate_sync() instead.
899 */
900static inline int dmaengine_terminate_async(struct dma_chan *chan)
901{
902 if (chan->device->device_terminate_all)
903 return chan->device->device_terminate_all(chan);
904
905 return -EINVAL;
906}
907
908/**
909 * dmaengine_synchronize() - Synchronize DMA channel termination
910 * @chan: The channel to synchronize
911 *
912 * Synchronizes to the DMA channel termination to the current context. When this
913 * function returns it is guaranteed that all transfers for previously issued
914 * descriptors have stopped and and it is safe to free the memory assoicated
915 * with them. Furthermore it is guaranteed that all complete callback functions
916 * for a previously submitted descriptor have finished running and it is safe to
917 * free resources accessed from within the complete callbacks.
918 *
919 * The behavior of this function is undefined if dma_async_issue_pending() has
920 * been called between dmaengine_terminate_async() and this function.
921 *
922 * This function must only be called from non-atomic context and must not be
923 * called from within a complete callback of a descriptor submitted on the same
924 * channel.
925 */
926static inline void dmaengine_synchronize(struct dma_chan *chan)
927{
928 might_sleep();
929
930 if (chan->device->device_synchronize)
931 chan->device->device_synchronize(chan);
932}
933
934/**
935 * dmaengine_terminate_sync() - Terminate all active DMA transfers
936 * @chan: The channel for which to terminate the transfers
937 *
938 * Calling this function will terminate all active and pending transfers
939 * that have previously been submitted to the channel. It is similar to
940 * dmaengine_terminate_async() but guarantees that the DMA transfer has actually
941 * stopped and that all complete callbacks have finished running when the
942 * function returns.
943 *
944 * This function must only be called from non-atomic context and must not be
945 * called from within a complete callback of a descriptor submitted on the same
946 * channel.
947 */
948static inline int dmaengine_terminate_sync(struct dma_chan *chan)
949{
950 int ret;
951
952 ret = dmaengine_terminate_async(chan);
953 if (ret)
954 return ret;
955
956 dmaengine_synchronize(chan);
957
958 return 0;
959}
960
839static inline int dmaengine_pause(struct dma_chan *chan) 961static inline int dmaengine_pause(struct dma_chan *chan)
840{ 962{
841 if (chan->device->device_pause) 963 if (chan->device->device_pause)
@@ -1140,9 +1262,11 @@ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
1140void dma_issue_pending_all(void); 1262void dma_issue_pending_all(void);
1141struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1263struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1142 dma_filter_fn fn, void *fn_param); 1264 dma_filter_fn fn, void *fn_param);
1143struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
1144 const char *name);
1145struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1265struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1266
1267struct dma_chan *dma_request_chan(struct device *dev, const char *name);
1268struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
1269
1146void dma_release_channel(struct dma_chan *chan); 1270void dma_release_channel(struct dma_chan *chan);
1147int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); 1271int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1148#else 1272#else
@@ -1166,16 +1290,21 @@ static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
1166{ 1290{
1167 return NULL; 1291 return NULL;
1168} 1292}
1169static inline struct dma_chan *dma_request_slave_channel_reason(
1170 struct device *dev, const char *name)
1171{
1172 return ERR_PTR(-ENODEV);
1173}
1174static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1293static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1175 const char *name) 1294 const char *name)
1176{ 1295{
1177 return NULL; 1296 return NULL;
1178} 1297}
1298static inline struct dma_chan *dma_request_chan(struct device *dev,
1299 const char *name)
1300{
1301 return ERR_PTR(-ENODEV);
1302}
1303static inline struct dma_chan *dma_request_chan_by_mask(
1304 const dma_cap_mask_t *mask)
1305{
1306 return ERR_PTR(-ENODEV);
1307}
1179static inline void dma_release_channel(struct dma_chan *chan) 1308static inline void dma_release_channel(struct dma_chan *chan)
1180{ 1309{
1181} 1310}
@@ -1186,6 +1315,8 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
1186} 1315}
1187#endif 1316#endif
1188 1317
1318#define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name)
1319
1189static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) 1320static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
1190{ 1321{
1191 struct dma_slave_caps caps; 1322 struct dma_slave_caps caps;
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 88fa8af2b937..1d99b61adc65 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -267,6 +267,9 @@ struct omap_dma_reg {
267 u8 type; 267 u8 type;
268}; 268};
269 269
270#define SDMA_FILTER_PARAM(hw_req) ((int[]) { (hw_req) })
271struct dma_slave_map;
272
270/* System DMA platform data structure */ 273/* System DMA platform data structure */
271struct omap_system_dma_plat_info { 274struct omap_system_dma_plat_info {
272 const struct omap_dma_reg *reg_map; 275 const struct omap_dma_reg *reg_map;
@@ -278,6 +281,9 @@ struct omap_system_dma_plat_info {
278 void (*clear_dma)(int lch); 281 void (*clear_dma)(int lch);
279 void (*dma_write)(u32 val, int reg, int lch); 282 void (*dma_write)(u32 val, int reg, int lch);
280 u32 (*dma_read)(int reg, int lch); 283 u32 (*dma_read)(int reg, int lch);
284
285 const struct dma_slave_map *slave_map;
286 int slavecnt;
281}; 287};
282 288
283#ifdef CONFIG_ARCH_OMAP2PLUS 289#ifdef CONFIG_ARCH_OMAP2PLUS
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h
deleted file mode 100644
index 648b8ea61a22..000000000000
--- a/include/linux/platform_data/dma-rcar-hpbdma.h
+++ /dev/null
@@ -1,103 +0,0 @@
1/*
2 * Copyright (C) 2011-2013 Renesas Electronics Corporation
3 * Copyright (C) 2013 Cogent Embedded, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.
8 */
9
10#ifndef __DMA_RCAR_HPBDMA_H
11#define __DMA_RCAR_HPBDMA_H
12
13#include <linux/bitops.h>
14#include <linux/types.h>
15
16/* Transmit sizes and respective register values */
17enum {
18 XMIT_SZ_8BIT = 0,
19 XMIT_SZ_16BIT = 1,
20 XMIT_SZ_32BIT = 2,
21 XMIT_SZ_MAX
22};
23
24/* DMA control register (DCR) bits */
25#define HPB_DMAE_DCR_DTAMD (1u << 26)
26#define HPB_DMAE_DCR_DTAC (1u << 25)
27#define HPB_DMAE_DCR_DTAU (1u << 24)
28#define HPB_DMAE_DCR_DTAU1 (1u << 23)
29#define HPB_DMAE_DCR_SWMD (1u << 22)
30#define HPB_DMAE_DCR_BTMD (1u << 21)
31#define HPB_DMAE_DCR_PKMD (1u << 20)
32#define HPB_DMAE_DCR_CT (1u << 18)
33#define HPB_DMAE_DCR_ACMD (1u << 17)
34#define HPB_DMAE_DCR_DIP (1u << 16)
35#define HPB_DMAE_DCR_SMDL (1u << 13)
36#define HPB_DMAE_DCR_SPDAM (1u << 12)
37#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10)
38#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10)
39#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10)
40#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10)
41#define HPB_DMAE_DCR_SPDS_MASK (3u << 8)
42#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8)
43#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8)
44#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8)
45#define HPB_DMAE_DCR_DMDL (1u << 5)
46#define HPB_DMAE_DCR_DPDAM (1u << 4)
47#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2)
48#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2)
49#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2)
50#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2)
51#define HPB_DMAE_DCR_DPDS_MASK (3u << 0)
52#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0)
53#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0)
54#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0)
55
56/* Asynchronous reset register (ASYNCRSTR) bits */
57#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10)
58#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9)
59#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8)
60#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7)
61#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6)
62#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5)
63#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4)
64#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3)
65#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2)
66#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1)
67#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0)
68
69struct hpb_dmae_slave_config {
70 unsigned int id;
71 dma_addr_t addr;
72 u32 dcr;
73 u32 port;
74 u32 rstr;
75 u32 mdr;
76 u32 mdm;
77 u32 flags;
78#define HPB_DMAE_SET_ASYNC_RESET BIT(0)
79#define HPB_DMAE_SET_ASYNC_MODE BIT(1)
80 u32 dma_ch;
81};
82
83#define HPB_DMAE_CHANNEL(_irq, _s_id) \
84{ \
85 .ch_irq = _irq, \
86 .s_id = _s_id, \
87}
88
89struct hpb_dmae_channel {
90 unsigned int ch_irq;
91 unsigned int s_id;
92};
93
94struct hpb_dmae_pdata {
95 const struct hpb_dmae_slave_config *slaves;
96 int num_slaves;
97 const struct hpb_dmae_channel *channels;
98 int num_channels;
99 const unsigned int ts_shift[XMIT_SZ_MAX];
100 int num_hw_channels;
101};
102
103#endif
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
index e2878baeb90e..105700e62ea1 100644
--- a/include/linux/platform_data/edma.h
+++ b/include/linux/platform_data/edma.h
@@ -53,12 +53,16 @@ enum dma_event_q {
53#define EDMA_CTLR(i) ((i) >> 16) 53#define EDMA_CTLR(i) ((i) >> 16)
54#define EDMA_CHAN_SLOT(i) ((i) & 0xffff) 54#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
55 55
56#define EDMA_FILTER_PARAM(ctlr, chan) ((int[]) { EDMA_CTLR_CHAN(ctlr, chan) })
57
56struct edma_rsv_info { 58struct edma_rsv_info {
57 59
58 const s16 (*rsv_chans)[2]; 60 const s16 (*rsv_chans)[2];
59 const s16 (*rsv_slots)[2]; 61 const s16 (*rsv_slots)[2];
60}; 62};
61 63
64struct dma_slave_map;
65
62/* platform_data for EDMA driver */ 66/* platform_data for EDMA driver */
63struct edma_soc_info { 67struct edma_soc_info {
64 /* 68 /*
@@ -76,6 +80,9 @@ struct edma_soc_info {
76 80
77 s8 (*queue_priority_mapping)[2]; 81 s8 (*queue_priority_mapping)[2];
78 const s16 (*xbar_chans)[2]; 82 const s16 (*xbar_chans)[2];
83
84 const struct dma_slave_map *slave_map;
85 int slavecnt;
79}; 86};
80 87
81#endif 88#endif
diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c
index fba365a78390..697c166acf05 100644
--- a/sound/core/pcm_dmaengine.c
+++ b/sound/core/pcm_dmaengine.c
@@ -202,13 +202,13 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
202 if (runtime->info & SNDRV_PCM_INFO_PAUSE) 202 if (runtime->info & SNDRV_PCM_INFO_PAUSE)
203 dmaengine_pause(prtd->dma_chan); 203 dmaengine_pause(prtd->dma_chan);
204 else 204 else
205 dmaengine_terminate_all(prtd->dma_chan); 205 dmaengine_terminate_async(prtd->dma_chan);
206 break; 206 break;
207 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 207 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
208 dmaengine_pause(prtd->dma_chan); 208 dmaengine_pause(prtd->dma_chan);
209 break; 209 break;
210 case SNDRV_PCM_TRIGGER_STOP: 210 case SNDRV_PCM_TRIGGER_STOP:
211 dmaengine_terminate_all(prtd->dma_chan); 211 dmaengine_terminate_async(prtd->dma_chan);
212 break; 212 break;
213 default: 213 default:
214 return -EINVAL; 214 return -EINVAL;
@@ -346,6 +346,7 @@ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream)
346{ 346{
347 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); 347 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
348 348
349 dmaengine_synchronize(prtd->dma_chan);
349 kfree(prtd); 350 kfree(prtd);
350 351
351 return 0; 352 return 0;
@@ -362,9 +363,11 @@ int snd_dmaengine_pcm_close_release_chan(struct snd_pcm_substream *substream)
362{ 363{
363 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); 364 struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
364 365
366 dmaengine_synchronize(prtd->dma_chan);
365 dma_release_channel(prtd->dma_chan); 367 dma_release_channel(prtd->dma_chan);
368 kfree(prtd);
366 369
367 return snd_dmaengine_pcm_close(substream); 370 return 0;
368} 371}
369EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan); 372EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close_release_chan);
370 373