aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-18 11:49:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-18 11:49:20 -0500
commitce1d3fde87d1a21f1ec1147dde32b2825dd3a276 (patch)
tree6ffab43e47e3a22a76bf9bf4efeecdf1b90dcb6f
parent928fce2f6d8152d897790c1a5bbeef5642f69e0e (diff)
parent88987d2c7534a0269f567fb101e6d71a08f0f01d (diff)
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This update brings: - the big cleanup up by Maxime for device control and slave capabilities. This makes the API much cleaner. - new IMG MDC driver by Andrew - new Renesas R-Car Gen2 DMA Controller driver by Laurent along with bunch of fixes on rcar drivers - odd fixes and updates spread over driver" * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (130 commits) dmaengine: pl330: add DMA_PAUSE feature dmaengine: pl330: improve pl330_tx_status() function dmaengine: rcar-dmac: Disable channel 0 when using IOMMU dmaengine: rcar-dmac: Work around descriptor mode IOMMU errata dmaengine: rcar-dmac: Allocate hardware descriptors with DMAC device dmaengine: rcar-dmac: Fix oops due to unintialized list in error ISR dmaengine: rcar-dmac: Fix spinlock issues in interrupt dmaenegine: edma: fix sparse warnings dmaengine: rcar-dmac: Fix uninitialized variable usage dmaengine: shdmac: extend PM methods dmaengine: shdmac: use SET_RUNTIME_PM_OPS() dmaengine: pl330: fix bug that cause start the same descs in cyclic dmaengine: at_xdmac: allow muliple dwidths when doing slave transfers dmaengine: at_xdmac: simplify channel configuration stuff dmaengine: at_xdmac: introduce save_cc field dmaengine: at_xdmac: wait for in-progress transaction to complete after pausing a channel ioat: fail self-test if wait_for_completion times out dmaengine: dw: define DW_DMA_MAX_NR_MASTERS dmaengine: dw: amend description of dma_dev field dmatest: move src_off, dst_off, len inside loop ...
-rw-r--r--Documentation/devicetree/bindings/dma/img-mdc-dma.txt57
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt2
-rw-r--r--Documentation/dmaengine/provider.txt97
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi2
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi4
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c2
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/dma/Kconfig9
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c156
-rw-r--r--drivers/dma/at_hdmac.c130
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c186
-rw-r--r--drivers/dma/bcm2835-dma.c46
-rw-r--r--drivers/dma/coh901318.c153
-rw-r--r--drivers/dma/cppi41.c30
-rw-r--r--drivers/dma/dma-jz4740.c20
-rw-r--r--drivers/dma/dmaengine.c84
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/dw/core.c101
-rw-r--r--drivers/dma/dw/platform.c4
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c73
-rw-r--r--drivers/dma/ep93xx_dma.c43
-rw-r--r--drivers/dma/fsl-edma.c123
-rw-r--r--drivers/dma/fsldma.c97
-rw-r--r--drivers/dma/fsldma.h4
-rw-r--r--drivers/dma/img-mdc-dma.c1011
-rw-r--r--drivers/dma/imx-dma.c108
-rw-r--r--drivers/dma/imx-sdma.c150
-rw-r--r--drivers/dma/intel_mid_dma.c25
-rw-r--r--drivers/dma/ioat/dma_v3.c25
-rw-r--r--drivers/dma/ioat/hw.h5
-rw-r--r--drivers/dma/ioat/pci.c5
-rw-r--r--drivers/dma/ipu/ipu_idmac.c96
-rw-r--r--drivers/dma/k3dma.c203
-rw-r--r--drivers/dma/mmp_pdma.c109
-rw-r--r--drivers/dma/mmp_tdma.c85
-rw-r--r--drivers/dma/moxart-dma.c25
-rw-r--r--drivers/dma/mpc512x_dma.c111
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/mxs-dma.c65
-rw-r--r--drivers/dma/nbpfaxi.c112
-rw-r--r--drivers/dma/of-dma.c4
-rw-r--r--drivers/dma/omap-dma.c69
-rw-r--r--drivers/dma/pch_dma.c8
-rw-r--r--drivers/dma/pl330.c230
-rw-r--r--drivers/dma/qcom_bam_dma.c85
-rw-r--r--drivers/dma/s3c24xx-dma.c73
-rw-r--r--drivers/dma/sa11x0-dma.c157
-rw-r--r--drivers/dma/sh/Kconfig14
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-dmac.c1770
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c6
-rw-r--r--drivers/dma/sh/shdma-base.c72
-rw-r--r--drivers/dma/sh/shdmac.c23
-rw-r--r--drivers/dma/sirf-dma.c59
-rw-r--r--drivers/dma/ste_dma40.c63
-rw-r--r--drivers/dma/sun6i-dma.c160
-rw-r--r--drivers/dma/tegra20-apb-dma.c42
-rw-r--r--drivers/dma/timb_dma.c8
-rw-r--r--drivers/dma/txx9dmac.c9
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c29
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--include/linux/dmaengine.h120
-rw-r--r--include/linux/platform_data/dma-dw.h6
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h7
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c2
71 files changed, 4736 insertions, 1911 deletions
diff --git a/Documentation/devicetree/bindings/dma/img-mdc-dma.txt b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
new file mode 100644
index 000000000000..28c1341db346
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt
@@ -0,0 +1,57 @@
1* IMG Multi-threaded DMA Controller (MDC)
2
3Required properties:
4- compatible: Must be "img,pistachio-mdc-dma".
5- reg: Must contain the base address and length of the MDC registers.
6- interrupts: Must contain all the per-channel DMA interrupts.
7- clocks: Must contain an entry for each entry in clock-names.
8 See ../clock/clock-bindings.txt for details.
9- clock-names: Must include the following entries:
10 - sys: MDC system interface clock.
11- img,cr-periph: Must contain a phandle to the peripheral control syscon
12 node which contains the DMA request to channel mapping registers.
13- img,max-burst-multiplier: Must be the maximum supported burst size multiplier.
14 The maximum burst size is this value multiplied by the hardware-reported bus
15 width.
16- #dma-cells: Must be 3:
17 - The first cell is the peripheral's DMA request line.
18 - The second cell is a bitmap specifying to which channels the DMA request
19 line may be mapped (i.e. bit N set indicates channel N is usable).
20 - The third cell is the thread ID to be used by the channel.
21
22Optional properties:
23- dma-channels: Number of supported DMA channels, up to 32. If not specified
24 the number reported by the hardware is used.
25
26Example:
27
28mdc: dma-controller@18143000 {
29 compatible = "img,pistachio-mdc-dma";
30 reg = <0x18143000 0x1000>;
31 interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>,
32 <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>,
33 <GIC_SHARED 29 IRQ_TYPE_LEVEL_HIGH>,
34 <GIC_SHARED 30 IRQ_TYPE_LEVEL_HIGH>,
35 <GIC_SHARED 31 IRQ_TYPE_LEVEL_HIGH>,
36 <GIC_SHARED 32 IRQ_TYPE_LEVEL_HIGH>,
37 <GIC_SHARED 33 IRQ_TYPE_LEVEL_HIGH>,
38 <GIC_SHARED 34 IRQ_TYPE_LEVEL_HIGH>,
39 <GIC_SHARED 35 IRQ_TYPE_LEVEL_HIGH>,
40 <GIC_SHARED 36 IRQ_TYPE_LEVEL_HIGH>,
41 <GIC_SHARED 37 IRQ_TYPE_LEVEL_HIGH>,
42 <GIC_SHARED 38 IRQ_TYPE_LEVEL_HIGH>;
43 clocks = <&system_clk>;
44 clock-names = "sys";
45
46 img,max-burst-multiplier = <16>;
47 img,cr-periph = <&cr_periph>;
48
49 #dma-cells = <3>;
50};
51
52spi@18100f00 {
53 ...
54 dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>;
55 dma-names = "tx", "rx";
56 ...
57};
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index f7e21b1c2a05..09daeef1ff22 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -5,9 +5,6 @@ controller instances named DMAC capable of serving multiple clients. Channels
5can be dedicated to specific clients or shared between a large number of 5can be dedicated to specific clients or shared between a large number of
6clients. 6clients.
7 7
8DMA clients are connected to the DMAC ports referenced by an 8-bit identifier
9called MID/RID.
10
11Each DMA client is connected to one dedicated port of the DMAC, identified by 8Each DMA client is connected to one dedicated port of the DMAC, identified by
12an 8-bit port number called the MID/RID. A DMA controller can thus serve up to 9an 8-bit port number called the MID/RID. A DMA controller can thus serve up to
13256 clients in total. When the number of hardware channels is lower than the 10256 clients in total. When the number of hardware channels is lower than the
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index d58675ea1abf..c261598164a7 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -38,7 +38,7 @@ Example:
38 chan_allocation_order = <1>; 38 chan_allocation_order = <1>;
39 chan_priority = <1>; 39 chan_priority = <1>;
40 block_size = <0xfff>; 40 block_size = <0xfff>;
41 data_width = <3 3 0 0>; 41 data_width = <3 3>;
42 }; 42 };
43 43
44DMA clients connected to the Designware DMA controller must use the format 44DMA clients connected to the Designware DMA controller must use the format
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index 766658ccf235..05d2280190f1 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -113,6 +113,31 @@ need to initialize a few fields in there:
113 * channels: should be initialized as a list using the 113 * channels: should be initialized as a list using the
114 INIT_LIST_HEAD macro for example 114 INIT_LIST_HEAD macro for example
115 115
116 * src_addr_widths:
117 - should contain a bitmask of the supported source transfer width
118
119 * dst_addr_widths:
120 - should contain a bitmask of the supported destination transfer
121 width
122
123 * directions:
124 - should contain a bitmask of the supported slave directions
125 (i.e. excluding mem2mem transfers)
126
127 * residue_granularity:
128 - Granularity of the transfer residue reported to dma_set_residue.
129 - This can be either:
130 + Descriptor
131 -> Your device doesn't support any kind of residue
132 reporting. The framework will only know that a particular
133 transaction descriptor is done.
134 + Segment
135 -> Your device is able to report which chunks have been
136 transferred
137 + Burst
138 -> Your device is able to report which burst have been
139 transferred
140
116 * dev: should hold the pointer to the struct device associated 141 * dev: should hold the pointer to the struct device associated
117 to your current driver instance. 142 to your current driver instance.
118 143
@@ -274,48 +299,36 @@ supported.
274 account the current period. 299 account the current period.
275 - This function can be called in an interrupt context. 300 - This function can be called in an interrupt context.
276 301
277 * device_control 302 * device_config
278 - Used by client drivers to control and configure the channel it 303 - Reconfigures the channel with the configuration given as
279 has a handle on. 304 argument
280 - Called with a command and an argument 305 - This command should NOT perform synchronously, or on any
281 + The command is one of the values listed by the enum 306 currently queued transfers, but only on subsequent ones
282 dma_ctrl_cmd. The valid commands are: 307 - In this case, the function will receive a dma_slave_config
283 + DMA_PAUSE 308 structure pointer as an argument, that will detail which
284 + Pauses a transfer on the channel 309 configuration to use.
285 + This command should operate synchronously on the channel, 310 - Even though that structure contains a direction field, this
286 pausing right away the work of the given channel 311 field is deprecated in favor of the direction argument given to
287 + DMA_RESUME 312 the prep_* functions
288 + Restarts a transfer on the channel 313 - This call is mandatory for slave operations only. This should NOT be
289 + This command should operate synchronously on the channel, 314 set or expected to be set for memcpy operations.
290 resuming right away the work of the given channel 315 If a driver support both, it should use this call for slave
291 + DMA_TERMINATE_ALL 316 operations only and not for memcpy ones.
292 + Aborts all the pending and ongoing transfers on the 317
293 channel 318 * device_pause
294 + This command should operate synchronously on the channel, 319 - Pauses a transfer on the channel
295 terminating right away all the channels 320 - This command should operate synchronously on the channel,
296 + DMA_SLAVE_CONFIG 321 pausing right away the work of the given channel
297 + Reconfigures the channel with passed configuration 322
298 + This command should NOT perform synchronously, or on any 323 * device_resume
299 currently queued transfers, but only on subsequent ones 324 - Resumes a transfer on the channel
300 + In this case, the function will receive a 325 - This command should operate synchronously on the channel,
301 dma_slave_config structure pointer as an argument, that 326 pausing right away the work of the given channel
302 will detail which configuration to use. 327
303 + Even though that structure contains a direction field, 328 * device_terminate_all
304 this field is deprecated in favor of the direction 329 - Aborts all the pending and ongoing transfers on the channel
305 argument given to the prep_* functions 330 - This command should operate synchronously on the channel,
306 + FSLDMA_EXTERNAL_START 331 terminating right away all the channels
307 + TODO: Why does that even exist?
308 + The argument is an opaque unsigned long. This actually is a
309 pointer to a struct dma_slave_config that should be used only
310 in the DMA_SLAVE_CONFIG.
311
312 * device_slave_caps
313 - Called through the framework by client drivers in order to have
314 an idea of what are the properties of the channel allocated to
315 them.
316 - Such properties are the buswidth, available directions, etc.
317 - Required for every generic layer doing DMA transfers, such as
318 ASoC.
319 332
320Misc notes (stuff that should be documented, but don't really know 333Misc notes (stuff that should be documented, but don't really know
321where to put them) 334where to put them)
diff --git a/MAINTAINERS b/MAINTAINERS
index 7ac95f8ba6ca..4f4915cbeab9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8503,6 +8503,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER
8503M: Viresh Kumar <viresh.linux@gmail.com> 8503M: Viresh Kumar <viresh.linux@gmail.com>
8504M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8504M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8505S: Maintained 8505S: Maintained
8506F: include/linux/dma/dw.h
8506F: include/linux/platform_data/dma-dw.h 8507F: include/linux/platform_data/dma-dw.h
8507F: drivers/dma/dw/ 8508F: drivers/dma/dw/
8508 8509
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index a098d7c05e96..cfb5052239a1 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -112,7 +112,7 @@
112 chan_allocation_order = <0>; 112 chan_allocation_order = <0>;
113 chan_priority = <1>; 113 chan_priority = <1>;
114 block_size = <0x7ff>; 114 block_size = <0x7ff>;
115 data_width = <2 0 0 0>; 115 data_width = <2>;
116 clocks = <&ahb_clk>; 116 clocks = <&ahb_clk>;
117 clock-names = "hclk"; 117 clock-names = "hclk";
118 }; 118 };
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index a6eb5436d26d..40accc87e3a2 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -117,7 +117,7 @@
117 chan_priority = <1>; 117 chan_priority = <1>;
118 block_size = <0xfff>; 118 block_size = <0xfff>;
119 dma-masters = <2>; 119 dma-masters = <2>;
120 data_width = <3 3 0 0>; 120 data_width = <3 3>;
121 }; 121 };
122 122
123 dma@eb000000 { 123 dma@eb000000 {
@@ -133,7 +133,7 @@
133 chan_allocation_order = <1>; 133 chan_allocation_order = <1>;
134 chan_priority = <1>; 134 chan_priority = <1>;
135 block_size = <0xfff>; 135 block_size = <0xfff>;
136 data_width = <3 3 0 0>; 136 data_width = <3 3>;
137 }; 137 };
138 138
139 fsmc: flash@b0000000 { 139 fsmc: flash@b0000000 {
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index cc92cdb9994c..1d8b147282cf 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -607,7 +607,7 @@ static struct dw_dma_platform_data dw_dmac0_data = {
607 .nr_channels = 3, 607 .nr_channels = 3,
608 .block_size = 4095U, 608 .block_size = 4095U,
609 .nr_masters = 2, 609 .nr_masters = 2,
610 .data_width = { 2, 2, 0, 0 }, 610 .data_width = { 2, 2 },
611}; 611};
612 612
613static struct resource dw_dmac0_resource[] = { 613static struct resource dw_dmac0_resource[] = {
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d594ae962ed2..fded0a5cfcd7 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
606 dev_dbg(ctx->device->dev, "[%s]: ", __func__); 606 dev_dbg(ctx->device->dev, "[%s]: ", __func__);
607 607
608 chan = ctx->device->dma.chan_mem2cryp; 608 chan = ctx->device->dma.chan_mem2cryp;
609 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 609 dmaengine_terminate_all(chan);
610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, 610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE); 611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
612 612
613 chan = ctx->device->dma.chan_cryp2mem; 613 chan = ctx->device->dma.chan_cryp2mem;
614 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 614 dmaengine_terminate_all(chan);
615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, 615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); 616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
617} 617}
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 70a20871e998..187a8fd7eee7 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
202 struct dma_chan *chan; 202 struct dma_chan *chan;
203 203
204 chan = ctx->device->dma.chan_mem2hash; 204 chan = ctx->device->dma.chan_mem2hash;
205 dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 205 dmaengine_terminate_all(chan);
206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, 206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
207 ctx->device->dma.sg_len, DMA_TO_DEVICE); 207 ctx->device->dma.sg_len, DMA_TO_DEVICE);
208} 208}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index faf30a4e642b..a874b6ec6650 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -416,6 +416,15 @@ config NBPFAXI_DMA
416 help 416 help
417 Support for "Type-AXI" NBPF DMA IPs from Renesas 417 Support for "Type-AXI" NBPF DMA IPs from Renesas
418 418
419config IMG_MDC_DMA
420 tristate "IMG MDC support"
421 depends on MIPS || COMPILE_TEST
422 depends on MFD_SYSCON
423 select DMA_ENGINE
424 select DMA_VIRTUAL_CHANNELS
425 help
426 Enable support for the IMG multi-threaded DMA controller (MDC).
427
419config DMA_ENGINE 428config DMA_ENGINE
420 bool 429 bool
421 430
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2022b5451377..f915f61ec574 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o 19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
20obj-$(CONFIG_MX3_IPU) += ipu/ 20obj-$(CONFIG_MX3_IPU) += ipu/
21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
22obj-$(CONFIG_SH_DMAE_BASE) += sh/ 22obj-$(CONFIG_RENESAS_DMA) += sh/
23obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 23obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
24obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 24obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
25obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 25obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -50,3 +50,4 @@ obj-y += xilinx/
50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o 50obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
51obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o 51obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
52obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o 52obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
53obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 1364d00881dd..4a5fd245014e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1386 return pl08x_cctl(cctl); 1386 return pl08x_cctl(cctl);
1387} 1387}
1388 1388
1389static int dma_set_runtime_config(struct dma_chan *chan,
1390 struct dma_slave_config *config)
1391{
1392 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1393 struct pl08x_driver_data *pl08x = plchan->host;
1394
1395 if (!plchan->slave)
1396 return -EINVAL;
1397
1398 /* Reject definitely invalid configurations */
1399 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1400 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1401 return -EINVAL;
1402
1403 if (config->device_fc && pl08x->vd->pl080s) {
1404 dev_err(&pl08x->adev->dev,
1405 "%s: PL080S does not support peripheral flow control\n",
1406 __func__);
1407 return -EINVAL;
1408 }
1409
1410 plchan->cfg = *config;
1411
1412 return 0;
1413}
1414
1415/* 1389/*
1416 * Slave transactions callback to the slave device to allow 1390 * Slave transactions callback to the slave device to allow
1417 * synchronization of slave DMA signals with the DMAC enable 1391 * synchronization of slave DMA signals with the DMAC enable
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1693 return vchan_tx_prep(&plchan->vc, &txd->vd, flags); 1667 return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
1694} 1668}
1695 1669
1696static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1670static int pl08x_config(struct dma_chan *chan,
1697 unsigned long arg) 1671 struct dma_slave_config *config)
1672{
1673 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1674 struct pl08x_driver_data *pl08x = plchan->host;
1675
1676 if (!plchan->slave)
1677 return -EINVAL;
1678
1679 /* Reject definitely invalid configurations */
1680 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1681 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1682 return -EINVAL;
1683
1684 if (config->device_fc && pl08x->vd->pl080s) {
1685 dev_err(&pl08x->adev->dev,
1686 "%s: PL080S does not support peripheral flow control\n",
1687 __func__);
1688 return -EINVAL;
1689 }
1690
1691 plchan->cfg = *config;
1692
1693 return 0;
1694}
1695
1696static int pl08x_terminate_all(struct dma_chan *chan)
1698{ 1697{
1699 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1698 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1700 struct pl08x_driver_data *pl08x = plchan->host; 1699 struct pl08x_driver_data *pl08x = plchan->host;
1701 unsigned long flags; 1700 unsigned long flags;
1702 int ret = 0;
1703 1701
1704 /* Controls applicable to inactive channels */ 1702 spin_lock_irqsave(&plchan->vc.lock, flags);
1705 if (cmd == DMA_SLAVE_CONFIG) { 1703 if (!plchan->phychan && !plchan->at) {
1706 return dma_set_runtime_config(chan, 1704 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1707 (struct dma_slave_config *)arg); 1705 return 0;
1708 } 1706 }
1709 1707
1708 plchan->state = PL08X_CHAN_IDLE;
1709
1710 if (plchan->phychan) {
1711 /*
1712 * Mark physical channel as free and free any slave
1713 * signal
1714 */
1715 pl08x_phy_free(plchan);
1716 }
1717 /* Dequeue jobs and free LLIs */
1718 if (plchan->at) {
1719 pl08x_desc_free(&plchan->at->vd);
1720 plchan->at = NULL;
1721 }
1722 /* Dequeue jobs not yet fired as well */
1723 pl08x_free_txd_list(pl08x, plchan);
1724
1725 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1726
1727 return 0;
1728}
1729
1730static int pl08x_pause(struct dma_chan *chan)
1731{
1732 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1733 unsigned long flags;
1734
1710 /* 1735 /*
1711 * Anything succeeds on channels with no physical allocation and 1736 * Anything succeeds on channels with no physical allocation and
1712 * no queued transfers. 1737 * no queued transfers.
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1717 return 0; 1742 return 0;
1718 } 1743 }
1719 1744
1720 switch (cmd) { 1745 pl08x_pause_phy_chan(plchan->phychan);
1721 case DMA_TERMINATE_ALL: 1746 plchan->state = PL08X_CHAN_PAUSED;
1722 plchan->state = PL08X_CHAN_IDLE;
1723 1747
1724 if (plchan->phychan) { 1748 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1725 /* 1749
1726 * Mark physical channel as free and free any slave 1750 return 0;
1727 * signal 1751}
1728 */ 1752
1729 pl08x_phy_free(plchan); 1753static int pl08x_resume(struct dma_chan *chan)
1730 } 1754{
1731 /* Dequeue jobs and free LLIs */ 1755 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1732 if (plchan->at) { 1756 unsigned long flags;
1733 pl08x_desc_free(&plchan->at->vd); 1757
1734 plchan->at = NULL; 1758 /*
1735 } 1759 * Anything succeeds on channels with no physical allocation and
1736 /* Dequeue jobs not yet fired as well */ 1760 * no queued transfers.
1737 pl08x_free_txd_list(pl08x, plchan); 1761 */
1738 break; 1762 spin_lock_irqsave(&plchan->vc.lock, flags);
1739 case DMA_PAUSE: 1763 if (!plchan->phychan && !plchan->at) {
1740 pl08x_pause_phy_chan(plchan->phychan); 1764 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1741 plchan->state = PL08X_CHAN_PAUSED; 1765 return 0;
1742 break;
1743 case DMA_RESUME:
1744 pl08x_resume_phy_chan(plchan->phychan);
1745 plchan->state = PL08X_CHAN_RUNNING;
1746 break;
1747 default:
1748 /* Unknown command */
1749 ret = -ENXIO;
1750 break;
1751 } 1766 }
1752 1767
1768 pl08x_resume_phy_chan(plchan->phychan);
1769 plchan->state = PL08X_CHAN_RUNNING;
1770
1753 spin_unlock_irqrestore(&plchan->vc.lock, flags); 1771 spin_unlock_irqrestore(&plchan->vc.lock, flags);
1754 1772
1755 return ret; 1773 return 0;
1756} 1774}
1757 1775
1758bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1776bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2048 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 2066 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2049 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 2067 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2050 pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 2068 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2051 pl08x->memcpy.device_control = pl08x_control; 2069 pl08x->memcpy.device_config = pl08x_config;
2070 pl08x->memcpy.device_pause = pl08x_pause;
2071 pl08x->memcpy.device_resume = pl08x_resume;
2072 pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
2052 2073
2053 /* Initialize slave engine */ 2074 /* Initialize slave engine */
2054 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 2075 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2061 pl08x->slave.device_issue_pending = pl08x_issue_pending; 2082 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2062 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 2083 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2063 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; 2084 pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
2064 pl08x->slave.device_control = pl08x_control; 2085 pl08x->slave.device_config = pl08x_config;
2086 pl08x->slave.device_pause = pl08x_pause;
2087 pl08x->slave.device_resume = pl08x_resume;
2088 pl08x->slave.device_terminate_all = pl08x_terminate_all;
2065 2089
2066 /* Get the platform data */ 2090 /* Get the platform data */
2067 pl08x->pd = dev_get_platdata(&adev->dev); 2091 pl08x->pd = dev_get_platdata(&adev->dev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ca9dd2613283..1e1a4c567542 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -42,6 +42,11 @@
42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 42#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF)) 44 |ATC_DIF(AT_DMA_MEM_IF))
45#define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
45 50
46/* 51/*
47 * Initial number of descriptors to allocate for each channel. This could 52 * Initial number of descriptors to allocate for each channel. This could
@@ -972,11 +977,13 @@ err_out:
972 return NULL; 977 return NULL;
973} 978}
974 979
975static int set_runtime_config(struct dma_chan *chan, 980static int atc_config(struct dma_chan *chan,
976 struct dma_slave_config *sconfig) 981 struct dma_slave_config *sconfig)
977{ 982{
978 struct at_dma_chan *atchan = to_at_dma_chan(chan); 983 struct at_dma_chan *atchan = to_at_dma_chan(chan);
979 984
985 dev_vdbg(chan2dev(chan), "%s\n", __func__);
986
980 /* Check if it is chan is configured for slave transfers */ 987 /* Check if it is chan is configured for slave transfers */
981 if (!chan->private) 988 if (!chan->private)
982 return -EINVAL; 989 return -EINVAL;
@@ -989,9 +996,28 @@ static int set_runtime_config(struct dma_chan *chan,
989 return 0; 996 return 0;
990} 997}
991 998
999static int atc_pause(struct dma_chan *chan)
1000{
1001 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1002 struct at_dma *atdma = to_at_dma(chan->device);
1003 int chan_id = atchan->chan_common.chan_id;
1004 unsigned long flags;
992 1005
993static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1006 LIST_HEAD(list);
994 unsigned long arg) 1007
1008 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1009
1010 spin_lock_irqsave(&atchan->lock, flags);
1011
1012 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1013 set_bit(ATC_IS_PAUSED, &atchan->status);
1014
1015 spin_unlock_irqrestore(&atchan->lock, flags);
1016
1017 return 0;
1018}
1019
1020static int atc_resume(struct dma_chan *chan)
995{ 1021{
996 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1022 struct at_dma_chan *atchan = to_at_dma_chan(chan);
997 struct at_dma *atdma = to_at_dma(chan->device); 1023 struct at_dma *atdma = to_at_dma(chan->device);
@@ -1000,60 +1026,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1000 1026
1001 LIST_HEAD(list); 1027 LIST_HEAD(list);
1002 1028
1003 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 1029 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1004 1030
1005 if (cmd == DMA_PAUSE) { 1031 if (!atc_chan_is_paused(atchan))
1006 spin_lock_irqsave(&atchan->lock, flags); 1032 return 0;
1007 1033
1008 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 1034 spin_lock_irqsave(&atchan->lock, flags);
1009 set_bit(ATC_IS_PAUSED, &atchan->status);
1010 1035
1011 spin_unlock_irqrestore(&atchan->lock, flags); 1036 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1012 } else if (cmd == DMA_RESUME) { 1037 clear_bit(ATC_IS_PAUSED, &atchan->status);
1013 if (!atc_chan_is_paused(atchan))
1014 return 0;
1015 1038
1016 spin_lock_irqsave(&atchan->lock, flags); 1039 spin_unlock_irqrestore(&atchan->lock, flags);
1017 1040
1018 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 1041 return 0;
1019 clear_bit(ATC_IS_PAUSED, &atchan->status); 1042}
1020 1043
1021 spin_unlock_irqrestore(&atchan->lock, flags); 1044static int atc_terminate_all(struct dma_chan *chan)
1022 } else if (cmd == DMA_TERMINATE_ALL) { 1045{
1023 struct at_desc *desc, *_desc; 1046 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1024 /* 1047 struct at_dma *atdma = to_at_dma(chan->device);
1025 * This is only called when something went wrong elsewhere, so 1048 int chan_id = atchan->chan_common.chan_id;
1026 * we don't really care about the data. Just disable the 1049 struct at_desc *desc, *_desc;
1027 * channel. We still have to poll the channel enable bit due 1050 unsigned long flags;
1028 * to AHB/HSB limitations.
1029 */
1030 spin_lock_irqsave(&atchan->lock, flags);
1031 1051
1032 /* disabling channel: must also remove suspend state */ 1052 LIST_HEAD(list);
1033 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1034 1053
1035 /* confirm that this channel is disabled */ 1054 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1036 while (dma_readl(atdma, CHSR) & atchan->mask)
1037 cpu_relax();
1038 1055
1039 /* active_list entries will end up before queued entries */ 1056 /*
1040 list_splice_init(&atchan->queue, &list); 1057 * This is only called when something went wrong elsewhere, so
1041 list_splice_init(&atchan->active_list, &list); 1058 * we don't really care about the data. Just disable the
1059 * channel. We still have to poll the channel enable bit due
1060 * to AHB/HSB limitations.
1061 */
1062 spin_lock_irqsave(&atchan->lock, flags);
1042 1063
1043 /* Flush all pending and queued descriptors */ 1064 /* disabling channel: must also remove suspend state */
1044 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1065 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1045 atc_chain_complete(atchan, desc);
1046 1066
1047 clear_bit(ATC_IS_PAUSED, &atchan->status); 1067 /* confirm that this channel is disabled */
1048 /* if channel dedicated to cyclic operations, free it */ 1068 while (dma_readl(atdma, CHSR) & atchan->mask)
1049 clear_bit(ATC_IS_CYCLIC, &atchan->status); 1069 cpu_relax();
1050 1070
1051 spin_unlock_irqrestore(&atchan->lock, flags); 1071 /* active_list entries will end up before queued entries */
1052 } else if (cmd == DMA_SLAVE_CONFIG) { 1072 list_splice_init(&atchan->queue, &list);
1053 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1073 list_splice_init(&atchan->active_list, &list);
1054 } else { 1074
1055 return -ENXIO; 1075 /* Flush all pending and queued descriptors */
1056 } 1076 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1077 atc_chain_complete(atchan, desc);
1078
1079 clear_bit(ATC_IS_PAUSED, &atchan->status);
1080 /* if channel dedicated to cyclic operations, free it */
1081 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1082
1083 spin_unlock_irqrestore(&atchan->lock, flags);
1057 1084
1058 return 0; 1085 return 0;
1059} 1086}
@@ -1505,7 +1532,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
1505 /* controller can do slave DMA: can trigger cyclic transfers */ 1532 /* controller can do slave DMA: can trigger cyclic transfers */
1506 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1533 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1507 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1534 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1508 atdma->dma_common.device_control = atc_control; 1535 atdma->dma_common.device_config = atc_config;
1536 atdma->dma_common.device_pause = atc_pause;
1537 atdma->dma_common.device_resume = atc_resume;
1538 atdma->dma_common.device_terminate_all = atc_terminate_all;
1539 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1540 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1541 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1542 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1509 } 1543 }
1510 1544
1511 dma_writel(atdma, EN, AT_DMA_ENABLE); 1545 dma_writel(atdma, EN, AT_DMA_ENABLE);
@@ -1622,7 +1656,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1622 if (!atc_chan_is_paused(atchan)) { 1656 if (!atc_chan_is_paused(atchan)) {
1623 dev_warn(chan2dev(chan), 1657 dev_warn(chan2dev(chan),
1624 "cyclic channel not paused, should be done by channel user\n"); 1658 "cyclic channel not paused, should be done by channel user\n");
1625 atc_control(chan, DMA_PAUSE, 0); 1659 atc_pause(chan);
1626 } 1660 }
1627 1661
1628 /* now preserve additional data for cyclic operations */ 1662 /* now preserve additional data for cyclic operations */
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 2787aba60c6b..d6bba6c636c2 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -232,7 +232,8 @@ enum atc_status {
232 * @save_dscr: for cyclic operations, preserve next descriptor address in 232 * @save_dscr: for cyclic operations, preserve next descriptor address in
233 * the cyclic list on suspend/resume cycle 233 * the cyclic list on suspend/resume cycle
234 * @remain_desc: to save remain desc length 234 * @remain_desc: to save remain desc length
235 * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG 235 * @dma_sconfig: configuration for slave transfers, passed via
236 * .device_config
236 * @lock: serializes enqueue/dequeue operations to descriptors lists 237 * @lock: serializes enqueue/dequeue operations to descriptors lists
237 * @active_list: list of descriptors dmaengine is being running on 238 * @active_list: list of descriptors dmaengine is being running on
238 * @queue: list of descriptors ready to be submitted to engine 239 * @queue: list of descriptors ready to be submitted to engine
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b60d77a22df6..09e2825a547a 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -25,6 +25,7 @@
25#include <linux/dmapool.h> 25#include <linux/dmapool.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/kernel.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/of_dma.h> 31#include <linux/of_dma.h>
@@ -174,6 +175,13 @@
174 175
175#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
176 177
178#define AT_XDMAC_DMA_BUSWIDTHS\
179 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
180 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
181 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
182 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
183 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
184
177enum atc_status { 185enum atc_status {
178 AT_XDMAC_CHAN_IS_CYCLIC = 0, 186 AT_XDMAC_CHAN_IS_CYCLIC = 0,
179 AT_XDMAC_CHAN_IS_PAUSED, 187 AT_XDMAC_CHAN_IS_PAUSED,
@@ -184,15 +192,15 @@ struct at_xdmac_chan {
184 struct dma_chan chan; 192 struct dma_chan chan;
185 void __iomem *ch_regs; 193 void __iomem *ch_regs;
186 u32 mask; /* Channel Mask */ 194 u32 mask; /* Channel Mask */
187 u32 cfg[3]; /* Channel Configuration Register */ 195 u32 cfg[2]; /* Channel Configuration Register */
188 #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ 196 #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
189 #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ 197 #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
190 #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
191 u8 perid; /* Peripheral ID */ 198 u8 perid; /* Peripheral ID */
192 u8 perif; /* Peripheral Interface */ 199 u8 perif; /* Peripheral Interface */
193 u8 memif; /* Memory Interface */ 200 u8 memif; /* Memory Interface */
194 u32 per_src_addr; 201 u32 per_src_addr;
195 u32 per_dst_addr; 202 u32 per_dst_addr;
203 u32 save_cc;
196 u32 save_cim; 204 u32 save_cim;
197 u32 save_cnda; 205 u32 save_cnda;
198 u32 save_cndc; 206 u32 save_cndc;
@@ -344,20 +352,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
344 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); 352 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
345 353
346 /* 354 /*
347 * When doing memory to memory transfer we need to use the next 355 * When doing non cyclic transfer we need to use the next
348 * descriptor view 2 since some fields of the configuration register 356 * descriptor view 2 since some fields of the configuration register
349 * depend on transfer size and src/dest addresses. 357 * depend on transfer size and src/dest addresses.
350 */ 358 */
351 if (is_slave_direction(first->direction)) { 359 if (at_xdmac_chan_is_cyclic(atchan)) {
352 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 360 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
353 if (first->direction == DMA_MEM_TO_DEV) 361 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
354 atchan->cfg[AT_XDMAC_CUR_CFG] =
355 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
356 else
357 atchan->cfg[AT_XDMAC_CUR_CFG] =
358 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
359 at_xdmac_chan_write(atchan, AT_XDMAC_CC,
360 atchan->cfg[AT_XDMAC_CUR_CFG]);
361 } else { 362 } else {
362 /* 363 /*
363 * No need to write AT_XDMAC_CC reg, it will be done when the 364 * No need to write AT_XDMAC_CC reg, it will be done when the
@@ -561,7 +562,6 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
561 struct at_xdmac_desc *first = NULL, *prev = NULL; 562 struct at_xdmac_desc *first = NULL, *prev = NULL;
562 struct scatterlist *sg; 563 struct scatterlist *sg;
563 int i; 564 int i;
564 u32 cfg;
565 unsigned int xfer_size = 0; 565 unsigned int xfer_size = 0;
566 566
567 if (!sgl) 567 if (!sgl)
@@ -583,7 +583,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
583 /* Prepare descriptors. */ 583 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) { 584 for_each_sg(sgl, sg, sg_len, i) {
585 struct at_xdmac_desc *desc = NULL; 585 struct at_xdmac_desc *desc = NULL;
586 u32 len, mem; 586 u32 len, mem, dwidth, fixed_dwidth;
587 587
588 len = sg_dma_len(sg); 588 len = sg_dma_len(sg);
589 mem = sg_dma_address(sg); 589 mem = sg_dma_address(sg);
@@ -608,17 +608,21 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
608 if (direction == DMA_DEV_TO_MEM) { 608 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr; 609 desc->lld.mbr_sa = atchan->per_src_addr;
610 desc->lld.mbr_da = mem; 610 desc->lld.mbr_da = mem;
611 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; 611 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else { 612 } else {
613 desc->lld.mbr_sa = mem; 613 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr; 614 desc->lld.mbr_da = atchan->per_dst_addr;
615 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; 615 desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 } 616 }
617 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ 617 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
618 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 618 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
619 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 619 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
620 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ 620 : AT_XDMAC_CC_DWIDTH_BYTE;
621 | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ 621 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
622 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
623 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
624 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
625 | (len >> fixed_dwidth); /* microblock length */
622 dev_dbg(chan2dev(chan), 626 dev_dbg(chan2dev(chan),
623 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 627 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
624 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 628 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
@@ -882,7 +886,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
882 enum dma_status ret; 886 enum dma_status ret;
883 int residue; 887 int residue;
884 u32 cur_nda, mask, value; 888 u32 cur_nda, mask, value;
885 u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); 889 u8 dwidth = 0;
886 890
887 ret = dma_cookie_status(chan, cookie, txstate); 891 ret = dma_cookie_status(chan, cookie, txstate);
888 if (ret == DMA_COMPLETE) 892 if (ret == DMA_COMPLETE)
@@ -912,7 +916,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
912 */ 916 */
913 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; 917 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
914 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; 918 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
915 if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) { 919 if ((desc->lld.mbr_cfg & mask) == value) {
916 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); 920 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
917 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) 921 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
918 cpu_relax(); 922 cpu_relax();
@@ -926,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
926 */ 930 */
927 descs_list = &desc->descs_list; 931 descs_list = &desc->descs_list;
928 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { 932 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
933 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
929 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; 934 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
930 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) 935 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
931 break; 936 break;
@@ -1107,58 +1112,80 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
1107 return; 1112 return;
1108} 1113}
1109 1114
1110static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1115static int at_xdmac_device_config(struct dma_chan *chan,
1111 unsigned long arg) 1116 struct dma_slave_config *config)
1117{
1118 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1119 int ret;
1120
1121 dev_dbg(chan2dev(chan), "%s\n", __func__);
1122
1123 spin_lock_bh(&atchan->lock);
1124 ret = at_xdmac_set_slave_config(chan, config);
1125 spin_unlock_bh(&atchan->lock);
1126
1127 return ret;
1128}
1129
1130static int at_xdmac_device_pause(struct dma_chan *chan)
1112{ 1131{
1113 struct at_xdmac_desc *desc, *_desc;
1114 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1132 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1115 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); 1133 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1116 int ret = 0;
1117 1134
1118 dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); 1135 dev_dbg(chan2dev(chan), "%s\n", __func__);
1136
1137 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1138 return 0;
1119 1139
1120 spin_lock_bh(&atchan->lock); 1140 spin_lock_bh(&atchan->lock);
1141 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1142 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1143 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1144 cpu_relax();
1145 spin_unlock_bh(&atchan->lock);
1121 1146
1122 switch (cmd) { 1147 return 0;
1123 case DMA_PAUSE: 1148}
1124 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1125 set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1126 break;
1127 1149
1128 case DMA_RESUME: 1150static int at_xdmac_device_resume(struct dma_chan *chan)
1129 if (!at_xdmac_chan_is_paused(atchan)) 1151{
1130 break; 1152 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1153 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1131 1154
1132 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); 1155 dev_dbg(chan2dev(chan), "%s\n", __func__);
1133 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1134 break;
1135 1156
1136 case DMA_TERMINATE_ALL: 1157 spin_lock_bh(&atchan->lock);
1137 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1158 if (!at_xdmac_chan_is_paused(atchan))
1138 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) 1159 return 0;
1139 cpu_relax(); 1160
1161 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1162 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1163 spin_unlock_bh(&atchan->lock);
1140 1164
1141 /* Cancel all pending transfers. */ 1165 return 0;
1142 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) 1166}
1143 at_xdmac_remove_xfer(atchan, desc); 1167
1168static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1169{
1170 struct at_xdmac_desc *desc, *_desc;
1171 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1172 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1144 1173
1145 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); 1174 dev_dbg(chan2dev(chan), "%s\n", __func__);
1146 break;
1147 1175
1148 case DMA_SLAVE_CONFIG: 1176 spin_lock_bh(&atchan->lock);
1149 ret = at_xdmac_set_slave_config(chan, 1177 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1150 (struct dma_slave_config *)arg); 1178 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1151 break; 1179 cpu_relax();
1152 1180
1153 default: 1181 /* Cancel all pending transfers. */
1154 dev_err(chan2dev(chan), 1182 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1155 "unmanaged or unknown dma control cmd: %d\n", cmd); 1183 at_xdmac_remove_xfer(atchan, desc);
1156 ret = -ENXIO;
1157 }
1158 1184
1185 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1159 spin_unlock_bh(&atchan->lock); 1186 spin_unlock_bh(&atchan->lock);
1160 1187
1161 return ret; 1188 return 0;
1162} 1189}
1163 1190
1164static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) 1191static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
@@ -1217,27 +1244,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1217 return; 1244 return;
1218} 1245}
1219 1246
1220#define AT_XDMAC_DMA_BUSWIDTHS\
1221 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
1222 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
1223 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
1224 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
1225 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
1226
1227static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
1228 struct dma_slave_caps *caps)
1229{
1230
1231 caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1232 caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1233 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1234 caps->cmd_pause = true;
1235 caps->cmd_terminate = true;
1236 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1237
1238 return 0;
1239}
1240
1241#ifdef CONFIG_PM 1247#ifdef CONFIG_PM
1242static int atmel_xdmac_prepare(struct device *dev) 1248static int atmel_xdmac_prepare(struct device *dev)
1243{ 1249{
@@ -1268,9 +1274,10 @@ static int atmel_xdmac_suspend(struct device *dev)
1268 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { 1274 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1269 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1275 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1270 1276
1277 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1271 if (at_xdmac_chan_is_cyclic(atchan)) { 1278 if (at_xdmac_chan_is_cyclic(atchan)) {
1272 if (!at_xdmac_chan_is_paused(atchan)) 1279 if (!at_xdmac_chan_is_paused(atchan))
1273 at_xdmac_control(chan, DMA_PAUSE, 0); 1280 at_xdmac_device_pause(chan);
1274 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1281 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1275 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); 1282 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1276 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); 1283 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
@@ -1290,7 +1297,6 @@ static int atmel_xdmac_resume(struct device *dev)
1290 struct at_xdmac_chan *atchan; 1297 struct at_xdmac_chan *atchan;
1291 struct dma_chan *chan, *_chan; 1298 struct dma_chan *chan, *_chan;
1292 int i; 1299 int i;
1293 u32 cfg;
1294 1300
1295 clk_prepare_enable(atxdmac->clk); 1301 clk_prepare_enable(atxdmac->clk);
1296 1302
@@ -1305,8 +1311,7 @@ static int atmel_xdmac_resume(struct device *dev)
1305 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); 1311 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1306 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { 1312 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1307 atchan = to_at_xdmac_chan(chan); 1313 atchan = to_at_xdmac_chan(chan);
1308 cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; 1314 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1309 at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
1310 if (at_xdmac_chan_is_cyclic(atchan)) { 1315 if (at_xdmac_chan_is_cyclic(atchan)) {
1311 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); 1316 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1312 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); 1317 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
@@ -1407,8 +1412,14 @@ static int at_xdmac_probe(struct platform_device *pdev)
1407 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; 1412 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1408 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; 1413 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1409 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; 1414 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1410 atxdmac->dma.device_control = at_xdmac_control; 1415 atxdmac->dma.device_config = at_xdmac_device_config;
1411 atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; 1416 atxdmac->dma.device_pause = at_xdmac_device_pause;
1417 atxdmac->dma.device_resume = at_xdmac_device_resume;
1418 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
1419 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1420 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1421 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1422 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1412 1423
1413 /* Disable all chans and interrupts. */ 1424 /* Disable all chans and interrupts. */
1414 at_xdmac_off(atxdmac); 1425 at_xdmac_off(atxdmac);
@@ -1507,7 +1518,6 @@ static struct platform_driver at_xdmac_driver = {
1507 .remove = at_xdmac_remove, 1518 .remove = at_xdmac_remove,
1508 .driver = { 1519 .driver = {
1509 .name = "at_xdmac", 1520 .name = "at_xdmac",
1510 .owner = THIS_MODULE,
1511 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), 1521 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
1512 .pm = &atmel_xdmac_dev_pm_ops, 1522 .pm = &atmel_xdmac_dev_pm_ops,
1513 } 1523 }
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 918b7b3f766f..0723096fb50a 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
436 return vchan_tx_prep(&c->vc, &d->vd, flags); 436 return vchan_tx_prep(&c->vc, &d->vd, flags);
437} 437}
438 438
439static int bcm2835_dma_slave_config(struct bcm2835_chan *c, 439static int bcm2835_dma_slave_config(struct dma_chan *chan,
440 struct dma_slave_config *cfg) 440 struct dma_slave_config *cfg)
441{ 441{
442 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
443
442 if ((cfg->direction == DMA_DEV_TO_MEM && 444 if ((cfg->direction == DMA_DEV_TO_MEM &&
443 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 445 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
444 (cfg->direction == DMA_MEM_TO_DEV && 446 (cfg->direction == DMA_MEM_TO_DEV &&
@@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
452 return 0; 454 return 0;
453} 455}
454 456
455static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) 457static int bcm2835_dma_terminate_all(struct dma_chan *chan)
456{ 458{
459 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
457 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 460 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
458 unsigned long flags; 461 unsigned long flags;
459 int timeout = 10000; 462 int timeout = 10000;
@@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
495 return 0; 498 return 0;
496} 499}
497 500
498static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
499 unsigned long arg)
500{
501 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
502
503 switch (cmd) {
504 case DMA_SLAVE_CONFIG:
505 return bcm2835_dma_slave_config(c,
506 (struct dma_slave_config *)arg);
507
508 case DMA_TERMINATE_ALL:
509 return bcm2835_dma_terminate_all(c);
510
511 default:
512 return -ENXIO;
513 }
514}
515
516static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) 501static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
517{ 502{
518 struct bcm2835_chan *c; 503 struct bcm2835_chan *c;
@@ -565,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
565 return chan; 550 return chan;
566} 551}
567 552
568static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
569 struct dma_slave_caps *caps)
570{
571 caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
572 caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
573 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
574 caps->cmd_pause = false;
575 caps->cmd_terminate = true;
576
577 return 0;
578}
579
580static int bcm2835_dma_probe(struct platform_device *pdev) 553static int bcm2835_dma_probe(struct platform_device *pdev)
581{ 554{
582 struct bcm2835_dmadev *od; 555 struct bcm2835_dmadev *od;
@@ -615,9 +588,12 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
615 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; 588 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
616 od->ddev.device_tx_status = bcm2835_dma_tx_status; 589 od->ddev.device_tx_status = bcm2835_dma_tx_status;
617 od->ddev.device_issue_pending = bcm2835_dma_issue_pending; 590 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
618 od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
619 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; 591 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
620 od->ddev.device_control = bcm2835_dma_control; 592 od->ddev.device_config = bcm2835_dma_slave_config;
593 od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
594 od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
595 od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
596 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
621 od->ddev.dev = &pdev->dev; 597 od->ddev.dev = &pdev->dev;
622 INIT_LIST_HEAD(&od->ddev.channels); 598 INIT_LIST_HEAD(&od->ddev.channels);
623 spin_lock_init(&od->lock); 599 spin_lock_init(&od->lock);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e88588d8ecd3..fd22dd36985f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan)
1690 * Pauses a transfer without losing data. Enables power save. 1690 * Pauses a transfer without losing data. Enables power save.
1691 * Use this function in conjunction with coh901318_resume. 1691 * Use this function in conjunction with coh901318_resume.
1692 */ 1692 */
1693static void coh901318_pause(struct dma_chan *chan) 1693static int coh901318_pause(struct dma_chan *chan)
1694{ 1694{
1695 u32 val; 1695 u32 val;
1696 unsigned long flags; 1696 unsigned long flags;
@@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan)
1730 enable_powersave(cohc); 1730 enable_powersave(cohc);
1731 1731
1732 spin_unlock_irqrestore(&cohc->lock, flags); 1732 spin_unlock_irqrestore(&cohc->lock, flags);
1733 return 0;
1733} 1734}
1734 1735
1735/* Resumes a transfer that has been stopped via 300_dma_stop(..). 1736/* Resumes a transfer that has been stopped via 300_dma_stop(..).
1736 Power save is handled. 1737 Power save is handled.
1737*/ 1738*/
1738static void coh901318_resume(struct dma_chan *chan) 1739static int coh901318_resume(struct dma_chan *chan)
1739{ 1740{
1740 u32 val; 1741 u32 val;
1741 unsigned long flags; 1742 unsigned long flags;
@@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan)
1760 } 1761 }
1761 1762
1762 spin_unlock_irqrestore(&cohc->lock, flags); 1763 spin_unlock_irqrestore(&cohc->lock, flags);
1764 return 0;
1763} 1765}
1764 1766
1765bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 1767bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
@@ -2114,6 +2116,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id)
2114 return IRQ_HANDLED; 2116 return IRQ_HANDLED;
2115} 2117}
2116 2118
2119static int coh901318_terminate_all(struct dma_chan *chan)
2120{
2121 unsigned long flags;
2122 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2123 struct coh901318_desc *cohd;
2124 void __iomem *virtbase = cohc->base->virtbase;
2125
2126 /* The remainder of this function terminates the transfer */
2127 coh901318_pause(chan);
2128 spin_lock_irqsave(&cohc->lock, flags);
2129
2130 /* Clear any pending BE or TC interrupt */
2131 if (cohc->id < 32) {
2132 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2133 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2134 } else {
2135 writel(1 << (cohc->id - 32), virtbase +
2136 COH901318_BE_INT_CLEAR2);
2137 writel(1 << (cohc->id - 32), virtbase +
2138 COH901318_TC_INT_CLEAR2);
2139 }
2140
2141 enable_powersave(cohc);
2142
2143 while ((cohd = coh901318_first_active_get(cohc))) {
2144 /* release the lli allocation*/
2145 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2146
2147 /* return desc to free-list */
2148 coh901318_desc_remove(cohd);
2149 coh901318_desc_free(cohc, cohd);
2150 }
2151
2152 while ((cohd = coh901318_first_queued(cohc))) {
2153 /* release the lli allocation*/
2154 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2155
2156 /* return desc to free-list */
2157 coh901318_desc_remove(cohd);
2158 coh901318_desc_free(cohc, cohd);
2159 }
2160
2161
2162 cohc->nbr_active_done = 0;
2163 cohc->busy = 0;
2164
2165 spin_unlock_irqrestore(&cohc->lock, flags);
2166
2167 return 0;
2168}
2169
2117static int coh901318_alloc_chan_resources(struct dma_chan *chan) 2170static int coh901318_alloc_chan_resources(struct dma_chan *chan)
2118{ 2171{
2119 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2172 struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -2156,7 +2209,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
2156 2209
2157 spin_unlock_irqrestore(&cohc->lock, flags); 2210 spin_unlock_irqrestore(&cohc->lock, flags);
2158 2211
2159 dmaengine_terminate_all(chan); 2212 coh901318_terminate_all(chan);
2160} 2213}
2161 2214
2162 2215
@@ -2461,8 +2514,8 @@ static const struct burst_table burst_sizes[] = {
2461 }, 2514 },
2462}; 2515};
2463 2516
2464static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, 2517static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2465 struct dma_slave_config *config) 2518 struct dma_slave_config *config)
2466{ 2519{
2467 struct coh901318_chan *cohc = to_coh901318_chan(chan); 2520 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2468 dma_addr_t addr; 2521 dma_addr_t addr;
@@ -2482,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2482 maxburst = config->dst_maxburst; 2535 maxburst = config->dst_maxburst;
2483 } else { 2536 } else {
2484 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); 2537 dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
2485 return; 2538 return -EINVAL;
2486 } 2539 }
2487 2540
2488 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", 2541 dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
@@ -2528,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2528 default: 2581 default:
2529 dev_err(COHC_2_DEV(cohc), 2582 dev_err(COHC_2_DEV(cohc),
2530 "bad runtimeconfig: alien address width\n"); 2583 "bad runtimeconfig: alien address width\n");
2531 return; 2584 return -EINVAL;
2532 } 2585 }
2533 2586
2534 ctrl |= burst_sizes[i].reg; 2587 ctrl |= burst_sizes[i].reg;
@@ -2538,84 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
2538 2591
2539 cohc->addr = addr; 2592 cohc->addr = addr;
2540 cohc->ctrl = ctrl; 2593 cohc->ctrl = ctrl;
2541}
2542
2543static int
2544coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2545 unsigned long arg)
2546{
2547 unsigned long flags;
2548 struct coh901318_chan *cohc = to_coh901318_chan(chan);
2549 struct coh901318_desc *cohd;
2550 void __iomem *virtbase = cohc->base->virtbase;
2551
2552 if (cmd == DMA_SLAVE_CONFIG) {
2553 struct dma_slave_config *config =
2554 (struct dma_slave_config *) arg;
2555
2556 coh901318_dma_set_runtimeconfig(chan, config);
2557 return 0;
2558 }
2559
2560 if (cmd == DMA_PAUSE) {
2561 coh901318_pause(chan);
2562 return 0;
2563 }
2564
2565 if (cmd == DMA_RESUME) {
2566 coh901318_resume(chan);
2567 return 0;
2568 }
2569
2570 if (cmd != DMA_TERMINATE_ALL)
2571 return -ENXIO;
2572
2573 /* The remainder of this function terminates the transfer */
2574 coh901318_pause(chan);
2575 spin_lock_irqsave(&cohc->lock, flags);
2576
2577 /* Clear any pending BE or TC interrupt */
2578 if (cohc->id < 32) {
2579 writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
2580 writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
2581 } else {
2582 writel(1 << (cohc->id - 32), virtbase +
2583 COH901318_BE_INT_CLEAR2);
2584 writel(1 << (cohc->id - 32), virtbase +
2585 COH901318_TC_INT_CLEAR2);
2586 }
2587
2588 enable_powersave(cohc);
2589
2590 while ((cohd = coh901318_first_active_get(cohc))) {
2591 /* release the lli allocation*/
2592 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2593
2594 /* return desc to free-list */
2595 coh901318_desc_remove(cohd);
2596 coh901318_desc_free(cohc, cohd);
2597 }
2598
2599 while ((cohd = coh901318_first_queued(cohc))) {
2600 /* release the lli allocation*/
2601 coh901318_lli_free(&cohc->base->pool, &cohd->lli);
2602
2603 /* return desc to free-list */
2604 coh901318_desc_remove(cohd);
2605 coh901318_desc_free(cohc, cohd);
2606 }
2607
2608
2609 cohc->nbr_active_done = 0;
2610 cohc->busy = 0;
2611
2612 spin_unlock_irqrestore(&cohc->lock, flags);
2613 2594
2614 return 0; 2595 return 0;
2615} 2596}
2616 2597
2617void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 2598static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
2618 struct coh901318_base *base) 2599 struct coh901318_base *base)
2619{ 2600{
2620 int chans_i; 2601 int chans_i;
2621 int i = 0; 2602 int i = 0;
@@ -2717,7 +2698,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
2717 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 2698 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
2718 base->dma_slave.device_tx_status = coh901318_tx_status; 2699 base->dma_slave.device_tx_status = coh901318_tx_status;
2719 base->dma_slave.device_issue_pending = coh901318_issue_pending; 2700 base->dma_slave.device_issue_pending = coh901318_issue_pending;
2720 base->dma_slave.device_control = coh901318_control; 2701 base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
2702 base->dma_slave.device_pause = coh901318_pause;
2703 base->dma_slave.device_resume = coh901318_resume;
2704 base->dma_slave.device_terminate_all = coh901318_terminate_all;
2721 base->dma_slave.dev = &pdev->dev; 2705 base->dma_slave.dev = &pdev->dev;
2722 2706
2723 err = dma_async_device_register(&base->dma_slave); 2707 err = dma_async_device_register(&base->dma_slave);
@@ -2737,7 +2721,10 @@ static int __init coh901318_probe(struct platform_device *pdev)
2737 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 2721 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
2738 base->dma_memcpy.device_tx_status = coh901318_tx_status; 2722 base->dma_memcpy.device_tx_status = coh901318_tx_status;
2739 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 2723 base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
2740 base->dma_memcpy.device_control = coh901318_control; 2724 base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
2725 base->dma_memcpy.device_pause = coh901318_pause;
2726 base->dma_memcpy.device_resume = coh901318_resume;
2727 base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
2741 base->dma_memcpy.dev = &pdev->dev; 2728 base->dma_memcpy.dev = &pdev->dev;
2742 /* 2729 /*
2743 * This controller can only access address at even 32bit boundaries, 2730 * This controller can only access address at even 32bit boundaries,
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index b743adf56465..512cb8e2805e 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
525 return &c->txd; 525 return &c->txd;
526} 526}
527 527
528static int cpp41_cfg_chan(struct cppi41_channel *c,
529 struct dma_slave_config *cfg)
530{
531 return 0;
532}
533
534static void cppi41_compute_td_desc(struct cppi41_desc *d) 528static void cppi41_compute_td_desc(struct cppi41_desc *d)
535{ 529{
536 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; 530 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
@@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan)
647 return 0; 641 return 0;
648} 642}
649 643
650static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
651 unsigned long arg)
652{
653 struct cppi41_channel *c = to_cpp41_chan(chan);
654 int ret;
655
656 switch (cmd) {
657 case DMA_SLAVE_CONFIG:
658 ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
659 break;
660
661 case DMA_TERMINATE_ALL:
662 ret = cppi41_stop_chan(chan);
663 break;
664
665 default:
666 ret = -ENXIO;
667 break;
668 }
669 return ret;
670}
671
672static void cleanup_chans(struct cppi41_dd *cdd) 644static void cleanup_chans(struct cppi41_dd *cdd)
673{ 645{
674 while (!list_empty(&cdd->ddev.channels)) { 646 while (!list_empty(&cdd->ddev.channels)) {
@@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
953 cdd->ddev.device_tx_status = cppi41_dma_tx_status; 925 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
954 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; 926 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
955 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; 927 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
956 cdd->ddev.device_control = cppi41_dma_control; 928 cdd->ddev.device_terminate_all = cppi41_stop_chan;
957 cdd->ddev.dev = dev; 929 cdd->ddev.dev = dev;
958 INIT_LIST_HEAD(&cdd->ddev.channels); 930 INIT_LIST_HEAD(&cdd->ddev.channels);
959 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; 931 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index bdeafeefa5f6..4527a3ebeac4 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
210} 210}
211 211
212static int jz4740_dma_slave_config(struct dma_chan *c, 212static int jz4740_dma_slave_config(struct dma_chan *c,
213 const struct dma_slave_config *config) 213 struct dma_slave_config *config)
214{ 214{
215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); 215 struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 216 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c)
290 return 0; 290 return 0;
291} 291}
292 292
293static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
294 unsigned long arg)
295{
296 struct dma_slave_config *config = (struct dma_slave_config *)arg;
297
298 switch (cmd) {
299 case DMA_SLAVE_CONFIG:
300 return jz4740_dma_slave_config(chan, config);
301 case DMA_TERMINATE_ALL:
302 return jz4740_dma_terminate_all(chan);
303 default:
304 return -ENOSYS;
305 }
306}
307
308static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) 293static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan)
309{ 294{
310 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); 295 struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
561 dd->device_issue_pending = jz4740_dma_issue_pending; 546 dd->device_issue_pending = jz4740_dma_issue_pending;
562 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; 547 dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg;
563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 548 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
564 dd->device_control = jz4740_dma_control; 549 dd->device_config = jz4740_dma_slave_config;
550 dd->device_terminate_all = jz4740_dma_terminate_all;
565 dd->dev = &pdev->dev; 551 dd->dev = &pdev->dev;
566 INIT_LIST_HEAD(&dd->channels); 552 INIT_LIST_HEAD(&dd->channels);
567 553
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e057935e3023..f15712f2fec6 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -222,31 +222,35 @@ static void balance_ref_count(struct dma_chan *chan)
222 */ 222 */
223static int dma_chan_get(struct dma_chan *chan) 223static int dma_chan_get(struct dma_chan *chan)
224{ 224{
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan); 225 struct module *owner = dma_chan_to_owner(chan);
226 int ret;
227 227
228 /* The channel is already in use, update client count */
228 if (chan->client_count) { 229 if (chan->client_count) {
229 __module_get(owner); 230 __module_get(owner);
230 err = 0; 231 goto out;
231 } else if (try_module_get(owner)) 232 }
232 err = 0;
233 233
234 if (err == 0) 234 if (!try_module_get(owner))
235 chan->client_count++; 235 return -ENODEV;
236 236
237 /* allocate upon first client reference */ 237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) { 238 if (chan->device->device_alloc_chan_resources) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan); 239 ret = chan->device->device_alloc_chan_resources(chan);
240 240 if (ret < 0)
241 if (desc_cnt < 0) { 241 goto err_out;
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
247 } 242 }
248 243
249 return err; 244 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
245 balance_ref_count(chan);
246
247out:
248 chan->client_count++;
249 return 0;
250
251err_out:
252 module_put(owner);
253 return ret;
250} 254}
251 255
252/** 256/**
@@ -257,11 +261,15 @@ static int dma_chan_get(struct dma_chan *chan)
257 */ 261 */
258static void dma_chan_put(struct dma_chan *chan) 262static void dma_chan_put(struct dma_chan *chan)
259{ 263{
264 /* This channel is not in use, bail out */
260 if (!chan->client_count) 265 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */ 266 return;
267
262 chan->client_count--; 268 chan->client_count--;
263 module_put(dma_chan_to_owner(chan)); 269 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0) 270
271 /* This channel is not in use anymore, free it */
272 if (!chan->client_count && chan->device->device_free_chan_resources)
265 chan->device->device_free_chan_resources(chan); 273 chan->device->device_free_chan_resources(chan);
266} 274}
267 275
@@ -471,6 +479,39 @@ static void dma_channel_rebalance(void)
471 } 479 }
472} 480}
473 481
482int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
483{
484 struct dma_device *device;
485
486 if (!chan || !caps)
487 return -EINVAL;
488
489 device = chan->device;
490
491 /* check if the channel supports slave transactions */
492 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
493 return -ENXIO;
494
495 /*
496 * Check whether it reports it uses the generic slave
497 * capabilities, if not, that means it doesn't support any
498 * kind of slave capabilities reporting.
499 */
500 if (!device->directions)
501 return -ENXIO;
502
503 caps->src_addr_widths = device->src_addr_widths;
504 caps->dst_addr_widths = device->dst_addr_widths;
505 caps->directions = device->directions;
506 caps->residue_granularity = device->residue_granularity;
507
508 caps->cmd_pause = !!device->device_pause;
509 caps->cmd_terminate = !!device->device_terminate_all;
510
511 return 0;
512}
513EXPORT_SYMBOL_GPL(dma_get_slave_caps);
514
474static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 515static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
475 struct dma_device *dev, 516 struct dma_device *dev,
476 dma_filter_fn fn, void *fn_param) 517 dma_filter_fn fn, void *fn_param)
@@ -811,17 +852,16 @@ int dma_async_device_register(struct dma_device *device)
811 !device->device_prep_dma_sg); 852 !device->device_prep_dma_sg);
812 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 853 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
813 !device->device_prep_dma_cyclic); 854 !device->device_prep_dma_cyclic);
814 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
815 !device->device_control);
816 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && 855 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
817 !device->device_prep_interleaved_dma); 856 !device->device_prep_interleaved_dma);
818 857
819 BUG_ON(!device->device_alloc_chan_resources);
820 BUG_ON(!device->device_free_chan_resources);
821 BUG_ON(!device->device_tx_status); 858 BUG_ON(!device->device_tx_status);
822 BUG_ON(!device->device_issue_pending); 859 BUG_ON(!device->device_issue_pending);
823 BUG_ON(!device->dev); 860 BUG_ON(!device->dev);
824 861
862 WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions,
863 "this driver doesn't support generic slave capabilities reporting\n");
864
825 /* note: this only matters in the 865 /* note: this only matters in the
826 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 866 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
827 */ 867 */
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a8d7809e2f4c..220ee49633e4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -349,14 +349,14 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
349 unsigned long data) 349 unsigned long data)
350{ 350{
351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", 351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
352 current->comm, n, err, src_off, dst_off, len, data); 352 current->comm, n, err, src_off, dst_off, len, data);
353} 353}
354 354
355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ 355#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
356 if (verbose) \ 356 if (verbose) \
357 result(err, n, src_off, dst_off, len, data); \ 357 result(err, n, src_off, dst_off, len, data); \
358 else \ 358 else \
359 dbg_result(err, n, src_off, dst_off, len, data); \ 359 dbg_result(err, n, src_off, dst_off, len, data);\
360}) 360})
361 361
362static unsigned long long dmatest_persec(s64 runtime, unsigned int val) 362static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
@@ -405,7 +405,6 @@ static int dmatest_func(void *data)
405 struct dmatest_params *params; 405 struct dmatest_params *params;
406 struct dma_chan *chan; 406 struct dma_chan *chan;
407 struct dma_device *dev; 407 struct dma_device *dev;
408 unsigned int src_off, dst_off, len;
409 unsigned int error_count; 408 unsigned int error_count;
410 unsigned int failed_tests = 0; 409 unsigned int failed_tests = 0;
411 unsigned int total_tests = 0; 410 unsigned int total_tests = 0;
@@ -484,6 +483,7 @@ static int dmatest_func(void *data)
484 struct dmaengine_unmap_data *um; 483 struct dmaengine_unmap_data *um;
485 dma_addr_t srcs[src_cnt]; 484 dma_addr_t srcs[src_cnt];
486 dma_addr_t *dsts; 485 dma_addr_t *dsts;
486 unsigned int src_off, dst_off, len;
487 u8 align = 0; 487 u8 align = 0;
488 488
489 total_tests++; 489 total_tests++;
@@ -502,15 +502,21 @@ static int dmatest_func(void *data)
502 break; 502 break;
503 } 503 }
504 504
505 if (params->noverify) { 505 if (params->noverify)
506 len = params->buf_size; 506 len = params->buf_size;
507 else
508 len = dmatest_random() % params->buf_size + 1;
509
510 len = (len >> align) << align;
511 if (!len)
512 len = 1 << align;
513
514 total_len += len;
515
516 if (params->noverify) {
507 src_off = 0; 517 src_off = 0;
508 dst_off = 0; 518 dst_off = 0;
509 } else { 519 } else {
510 len = dmatest_random() % params->buf_size + 1;
511 len = (len >> align) << align;
512 if (!len)
513 len = 1 << align;
514 src_off = dmatest_random() % (params->buf_size - len + 1); 520 src_off = dmatest_random() % (params->buf_size - len + 1);
515 dst_off = dmatest_random() % (params->buf_size - len + 1); 521 dst_off = dmatest_random() % (params->buf_size - len + 1);
516 522
@@ -523,11 +529,6 @@ static int dmatest_func(void *data)
523 params->buf_size); 529 params->buf_size);
524 } 530 }
525 531
526 len = (len >> align) << align;
527 if (!len)
528 len = 1 << align;
529 total_len += len;
530
531 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, 532 um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
532 GFP_KERNEL); 533 GFP_KERNEL);
533 if (!um) { 534 if (!um) {
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 5c062548957c..455b7a4f1e87 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -61,6 +61,13 @@
61 */ 61 */
62#define NR_DESCS_PER_CHANNEL 64 62#define NR_DESCS_PER_CHANNEL 64
63 63
64/* The set of bus widths supported by the DMA controller */
65#define DW_DMA_BUSWIDTHS \
66 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
67 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
68 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
69 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
70
64/*----------------------------------------------------------------------*/ 71/*----------------------------------------------------------------------*/
65 72
66static struct device *chan2dev(struct dma_chan *chan) 73static struct device *chan2dev(struct dma_chan *chan)
@@ -955,8 +962,7 @@ static inline void convert_burst(u32 *maxburst)
955 *maxburst = 0; 962 *maxburst = 0;
956} 963}
957 964
958static int 965static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
959set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
960{ 966{
961 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 967 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
962 968
@@ -973,16 +979,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
973 return 0; 979 return 0;
974} 980}
975 981
976static inline void dwc_chan_pause(struct dw_dma_chan *dwc) 982static int dwc_pause(struct dma_chan *chan)
977{ 983{
978 u32 cfglo = channel_readl(dwc, CFG_LO); 984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
979 unsigned int count = 20; /* timeout iterations */ 985 unsigned long flags;
986 unsigned int count = 20; /* timeout iterations */
987 u32 cfglo;
988
989 spin_lock_irqsave(&dwc->lock, flags);
980 990
991 cfglo = channel_readl(dwc, CFG_LO);
981 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 992 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
982 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 993 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
983 udelay(2); 994 udelay(2);
984 995
985 dwc->paused = true; 996 dwc->paused = true;
997
998 spin_unlock_irqrestore(&dwc->lock, flags);
999
1000 return 0;
986} 1001}
987 1002
988static inline void dwc_chan_resume(struct dw_dma_chan *dwc) 1003static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
@@ -994,53 +1009,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
994 dwc->paused = false; 1009 dwc->paused = false;
995} 1010}
996 1011
997static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1012static int dwc_resume(struct dma_chan *chan)
998 unsigned long arg)
999{ 1013{
1000 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1014 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1001 struct dw_dma *dw = to_dw_dma(chan->device);
1002 struct dw_desc *desc, *_desc;
1003 unsigned long flags; 1015 unsigned long flags;
1004 LIST_HEAD(list);
1005 1016
1006 if (cmd == DMA_PAUSE) { 1017 if (!dwc->paused)
1007 spin_lock_irqsave(&dwc->lock, flags); 1018 return 0;
1008 1019
1009 dwc_chan_pause(dwc); 1020 spin_lock_irqsave(&dwc->lock, flags);
1010 1021
1011 spin_unlock_irqrestore(&dwc->lock, flags); 1022 dwc_chan_resume(dwc);
1012 } else if (cmd == DMA_RESUME) {
1013 if (!dwc->paused)
1014 return 0;
1015 1023
1016 spin_lock_irqsave(&dwc->lock, flags); 1024 spin_unlock_irqrestore(&dwc->lock, flags);
1017 1025
1018 dwc_chan_resume(dwc); 1026 return 0;
1027}
1019 1028
1020 spin_unlock_irqrestore(&dwc->lock, flags); 1029static int dwc_terminate_all(struct dma_chan *chan)
1021 } else if (cmd == DMA_TERMINATE_ALL) { 1030{
1022 spin_lock_irqsave(&dwc->lock, flags); 1031 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1032 struct dw_dma *dw = to_dw_dma(chan->device);
1033 struct dw_desc *desc, *_desc;
1034 unsigned long flags;
1035 LIST_HEAD(list);
1023 1036
1024 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1037 spin_lock_irqsave(&dwc->lock, flags);
1025 1038
1026 dwc_chan_disable(dw, dwc); 1039 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1040
1041 dwc_chan_disable(dw, dwc);
1027 1042
1028 dwc_chan_resume(dwc); 1043 dwc_chan_resume(dwc);
1029 1044
1030 /* active_list entries will end up before queued entries */ 1045 /* active_list entries will end up before queued entries */
1031 list_splice_init(&dwc->queue, &list); 1046 list_splice_init(&dwc->queue, &list);
1032 list_splice_init(&dwc->active_list, &list); 1047 list_splice_init(&dwc->active_list, &list);
1033 1048
1034 spin_unlock_irqrestore(&dwc->lock, flags); 1049 spin_unlock_irqrestore(&dwc->lock, flags);
1035 1050
1036 /* Flush all pending and queued descriptors */ 1051 /* Flush all pending and queued descriptors */
1037 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1052 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1038 dwc_descriptor_complete(dwc, desc, false); 1053 dwc_descriptor_complete(dwc, desc, false);
1039 } else if (cmd == DMA_SLAVE_CONFIG) {
1040 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1041 } else {
1042 return -ENXIO;
1043 }
1044 1054
1045 return 0; 1055 return 0;
1046} 1056}
@@ -1551,7 +1561,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1551 } 1561 }
1552 } else { 1562 } else {
1553 dw->nr_masters = pdata->nr_masters; 1563 dw->nr_masters = pdata->nr_masters;
1554 memcpy(dw->data_width, pdata->data_width, 4); 1564 for (i = 0; i < dw->nr_masters; i++)
1565 dw->data_width[i] = pdata->data_width[i];
1555 } 1566 }
1556 1567
1557 /* Calculate all channel mask before DMA setup */ 1568 /* Calculate all channel mask before DMA setup */
@@ -1656,13 +1667,23 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1656 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1667 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1657 1668
1658 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1669 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1659
1660 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1670 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1661 dw->dma.device_control = dwc_control; 1671
1672 dw->dma.device_config = dwc_config;
1673 dw->dma.device_pause = dwc_pause;
1674 dw->dma.device_resume = dwc_resume;
1675 dw->dma.device_terminate_all = dwc_terminate_all;
1662 1676
1663 dw->dma.device_tx_status = dwc_tx_status; 1677 dw->dma.device_tx_status = dwc_tx_status;
1664 dw->dma.device_issue_pending = dwc_issue_pending; 1678 dw->dma.device_issue_pending = dwc_issue_pending;
1665 1679
1680 /* DMA capabilities */
1681 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1682 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1683 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1684 BIT(DMA_MEM_TO_MEM);
1685 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1686
1666 err = dma_async_device_register(&dw->dma); 1687 err = dma_async_device_register(&dw->dma);
1667 if (err) 1688 if (err)
1668 goto err_dma_register; 1689 goto err_dma_register;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 32ea1aca7a0e..6565a361e7e5 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -100,7 +100,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
100{ 100{
101 struct device_node *np = pdev->dev.of_node; 101 struct device_node *np = pdev->dev.of_node;
102 struct dw_dma_platform_data *pdata; 102 struct dw_dma_platform_data *pdata;
103 u32 tmp, arr[4]; 103 u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
104 104
105 if (!np) { 105 if (!np) {
106 dev_err(&pdev->dev, "Missing DT data\n"); 106 dev_err(&pdev->dev, "Missing DT data\n");
@@ -127,7 +127,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
127 pdata->block_size = tmp; 127 pdata->block_size = tmp;
128 128
129 if (!of_property_read_u32(np, "dma-masters", &tmp)) { 129 if (!of_property_read_u32(np, "dma-masters", &tmp)) {
130 if (tmp > 4) 130 if (tmp > DW_DMA_MAX_NR_MASTERS)
131 return NULL; 131 return NULL;
132 132
133 pdata->nr_masters = tmp; 133 pdata->nr_masters = tmp;
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 848e232f7cc7..241ff2b1402b 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,7 +252,7 @@ struct dw_dma_chan {
252 u8 src_master; 252 u8 src_master;
253 u8 dst_master; 253 u8 dst_master;
254 254
255 /* configuration passed via DMA_SLAVE_CONFIG */ 255 /* configuration passed via .device_config */
256 struct dma_slave_config dma_sconfig; 256 struct dma_slave_config dma_sconfig;
257}; 257};
258 258
@@ -285,7 +285,7 @@ struct dw_dma {
285 285
286 /* hardware configuration */ 286 /* hardware configuration */
287 unsigned char nr_masters; 287 unsigned char nr_masters;
288 unsigned char data_width[4]; 288 unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
289}; 289};
290 290
291static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) 291static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index b969206439b7..276157f22612 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/dmaengine.h> 16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/edma.h>
18#include <linux/err.h> 19#include <linux/err.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
@@ -244,8 +245,9 @@ static void edma_execute(struct edma_chan *echan)
244 } 245 }
245} 246}
246 247
247static int edma_terminate_all(struct edma_chan *echan) 248static int edma_terminate_all(struct dma_chan *chan)
248{ 249{
250 struct edma_chan *echan = to_edma_chan(chan);
249 unsigned long flags; 251 unsigned long flags;
250 LIST_HEAD(head); 252 LIST_HEAD(head);
251 253
@@ -273,9 +275,11 @@ static int edma_terminate_all(struct edma_chan *echan)
273 return 0; 275 return 0;
274} 276}
275 277
276static int edma_slave_config(struct edma_chan *echan, 278static int edma_slave_config(struct dma_chan *chan,
277 struct dma_slave_config *cfg) 279 struct dma_slave_config *cfg)
278{ 280{
281 struct edma_chan *echan = to_edma_chan(chan);
282
279 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 283 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
280 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 284 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
281 return -EINVAL; 285 return -EINVAL;
@@ -285,8 +289,10 @@ static int edma_slave_config(struct edma_chan *echan,
285 return 0; 289 return 0;
286} 290}
287 291
288static int edma_dma_pause(struct edma_chan *echan) 292static int edma_dma_pause(struct dma_chan *chan)
289{ 293{
294 struct edma_chan *echan = to_edma_chan(chan);
295
290 /* Pause/Resume only allowed with cyclic mode */ 296 /* Pause/Resume only allowed with cyclic mode */
291 if (!echan->edesc || !echan->edesc->cyclic) 297 if (!echan->edesc || !echan->edesc->cyclic)
292 return -EINVAL; 298 return -EINVAL;
@@ -295,8 +301,10 @@ static int edma_dma_pause(struct edma_chan *echan)
295 return 0; 301 return 0;
296} 302}
297 303
298static int edma_dma_resume(struct edma_chan *echan) 304static int edma_dma_resume(struct dma_chan *chan)
299{ 305{
306 struct edma_chan *echan = to_edma_chan(chan);
307
300 /* Pause/Resume only allowed with cyclic mode */ 308 /* Pause/Resume only allowed with cyclic mode */
301 if (!echan->edesc->cyclic) 309 if (!echan->edesc->cyclic)
302 return -EINVAL; 310 return -EINVAL;
@@ -305,36 +313,6 @@ static int edma_dma_resume(struct edma_chan *echan)
305 return 0; 313 return 0;
306} 314}
307 315
308static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
309 unsigned long arg)
310{
311 int ret = 0;
312 struct dma_slave_config *config;
313 struct edma_chan *echan = to_edma_chan(chan);
314
315 switch (cmd) {
316 case DMA_TERMINATE_ALL:
317 edma_terminate_all(echan);
318 break;
319 case DMA_SLAVE_CONFIG:
320 config = (struct dma_slave_config *)arg;
321 ret = edma_slave_config(echan, config);
322 break;
323 case DMA_PAUSE:
324 ret = edma_dma_pause(echan);
325 break;
326
327 case DMA_RESUME:
328 ret = edma_dma_resume(echan);
329 break;
330
331 default:
332 ret = -ENOSYS;
333 }
334
335 return ret;
336}
337
338/* 316/*
339 * A PaRAM set configuration abstraction used by other modes 317 * A PaRAM set configuration abstraction used by other modes
340 * @chan: Channel who's PaRAM set we're configuring 318 * @chan: Channel who's PaRAM set we're configuring
@@ -557,7 +535,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
557 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 535 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
558} 536}
559 537
560struct dma_async_tx_descriptor *edma_prep_dma_memcpy( 538static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
561 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 539 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
562 size_t len, unsigned long tx_flags) 540 size_t len, unsigned long tx_flags)
563{ 541{
@@ -994,19 +972,6 @@ static void __init edma_chan_init(struct edma_cc *ecc,
994 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 972 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
995 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 973 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
996 974
997static int edma_dma_device_slave_caps(struct dma_chan *dchan,
998 struct dma_slave_caps *caps)
999{
1000 caps->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1001 caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS;
1002 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1003 caps->cmd_pause = true;
1004 caps->cmd_terminate = true;
1005 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1006
1007 return 0;
1008}
1009
1010static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 975static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1011 struct device *dev) 976 struct device *dev)
1012{ 977{
@@ -1017,8 +982,16 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
1017 dma->device_free_chan_resources = edma_free_chan_resources; 982 dma->device_free_chan_resources = edma_free_chan_resources;
1018 dma->device_issue_pending = edma_issue_pending; 983 dma->device_issue_pending = edma_issue_pending;
1019 dma->device_tx_status = edma_tx_status; 984 dma->device_tx_status = edma_tx_status;
1020 dma->device_control = edma_control; 985 dma->device_config = edma_slave_config;
1021 dma->device_slave_caps = edma_dma_device_slave_caps; 986 dma->device_pause = edma_dma_pause;
987 dma->device_resume = edma_dma_resume;
988 dma->device_terminate_all = edma_terminate_all;
989
990 dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
991 dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
992 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
993 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
994
1022 dma->dev = dev; 995 dma->dev = dev;
1023 996
1024 /* 997 /*
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 7650470196c4..24e5290faa32 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -144,7 +144,7 @@ struct ep93xx_dma_desc {
144 * @queue: pending descriptors which are handled next 144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used 145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This 146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 * is set via %DMA_SLAVE_CONFIG before slave operation is 147 * is set via .device_config before slave operation is
148 * prepared 148 * prepared
149 * @runtime_ctrl: M2M runtime values for the control register. 149 * @runtime_ctrl: M2M runtime values for the control register.
150 * 150 *
@@ -1164,13 +1164,14 @@ fail:
1164 1164
1165/** 1165/**
1166 * ep93xx_dma_terminate_all - terminate all transactions 1166 * ep93xx_dma_terminate_all - terminate all transactions
1167 * @edmac: channel 1167 * @chan: channel
1168 * 1168 *
1169 * Stops all DMA transactions. All descriptors are put back to the 1169 * Stops all DMA transactions. All descriptors are put back to the
1170 * @edmac->free_list and callbacks are _not_ called. 1170 * @edmac->free_list and callbacks are _not_ called.
1171 */ 1171 */
1172static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) 1172static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1173{ 1173{
1174 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1174 struct ep93xx_dma_desc *desc, *_d; 1175 struct ep93xx_dma_desc *desc, *_d;
1175 unsigned long flags; 1176 unsigned long flags;
1176 LIST_HEAD(list); 1177 LIST_HEAD(list);
@@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1194 return 0; 1195 return 0;
1195} 1196}
1196 1197
1197static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, 1198static int ep93xx_dma_slave_config(struct dma_chan *chan,
1198 struct dma_slave_config *config) 1199 struct dma_slave_config *config)
1199{ 1200{
1201 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1200 enum dma_slave_buswidth width; 1202 enum dma_slave_buswidth width;
1201 unsigned long flags; 1203 unsigned long flags;
1202 u32 addr, ctrl; 1204 u32 addr, ctrl;
@@ -1242,36 +1244,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1242} 1244}
1243 1245
1244/** 1246/**
1245 * ep93xx_dma_control - manipulate all pending operations on a channel
1246 * @chan: channel
1247 * @cmd: control command to perform
1248 * @arg: optional argument
1249 *
1250 * Controls the channel. Function returns %0 in case of success or negative
1251 * error in case of failure.
1252 */
1253static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1254 unsigned long arg)
1255{
1256 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1257 struct dma_slave_config *config;
1258
1259 switch (cmd) {
1260 case DMA_TERMINATE_ALL:
1261 return ep93xx_dma_terminate_all(edmac);
1262
1263 case DMA_SLAVE_CONFIG:
1264 config = (struct dma_slave_config *)arg;
1265 return ep93xx_dma_slave_config(edmac, config);
1266
1267 default:
1268 break;
1269 }
1270
1271 return -ENOSYS;
1272}
1273
1274/**
1275 * ep93xx_dma_tx_status - check if a transaction is completed 1247 * ep93xx_dma_tx_status - check if a transaction is completed
1276 * @chan: channel 1248 * @chan: channel
1277 * @cookie: transaction specific cookie 1249 * @cookie: transaction specific cookie
@@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
1352 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; 1324 dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1353 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1325 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1354 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1326 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1355 dma_dev->device_control = ep93xx_dma_control; 1327 dma_dev->device_config = ep93xx_dma_slave_config;
1328 dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1356 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1329 dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1357 dma_dev->device_tx_status = ep93xx_dma_tx_status; 1330 dma_dev->device_tx_status = ep93xx_dma_tx_status;
1358 1331
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index e9ebb89e1711..09e2842d15ec 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
289 kfree(fsl_desc); 289 kfree(fsl_desc);
290} 290}
291 291
292static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 292static int fsl_edma_terminate_all(struct dma_chan *chan)
293 unsigned long arg)
294{ 293{
295 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 294 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
296 struct dma_slave_config *cfg = (void *)arg;
297 unsigned long flags; 295 unsigned long flags;
298 LIST_HEAD(head); 296 LIST_HEAD(head);
299 297
300 switch (cmd) { 298 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
301 case DMA_TERMINATE_ALL: 299 fsl_edma_disable_request(fsl_chan);
302 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 300 fsl_chan->edesc = NULL;
301 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
302 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
303 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
304 return 0;
305}
306
307static int fsl_edma_pause(struct dma_chan *chan)
308{
309 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
310 unsigned long flags;
311
312 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
313 if (fsl_chan->edesc) {
303 fsl_edma_disable_request(fsl_chan); 314 fsl_edma_disable_request(fsl_chan);
304 fsl_chan->edesc = NULL; 315 fsl_chan->status = DMA_PAUSED;
305 vchan_get_all_descriptors(&fsl_chan->vchan, &head); 316 }
306 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 317 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
307 vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 318 return 0;
308 return 0; 319}
309
310 case DMA_SLAVE_CONFIG:
311 fsl_chan->fsc.dir = cfg->direction;
312 if (cfg->direction == DMA_DEV_TO_MEM) {
313 fsl_chan->fsc.dev_addr = cfg->src_addr;
314 fsl_chan->fsc.addr_width = cfg->src_addr_width;
315 fsl_chan->fsc.burst = cfg->src_maxburst;
316 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
317 } else if (cfg->direction == DMA_MEM_TO_DEV) {
318 fsl_chan->fsc.dev_addr = cfg->dst_addr;
319 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
320 fsl_chan->fsc.burst = cfg->dst_maxburst;
321 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
322 } else {
323 return -EINVAL;
324 }
325 return 0;
326 320
327 case DMA_PAUSE: 321static int fsl_edma_resume(struct dma_chan *chan)
328 spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 322{
329 if (fsl_chan->edesc) { 323 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
330 fsl_edma_disable_request(fsl_chan); 324 unsigned long flags;
331 fsl_chan->status = DMA_PAUSED;
332 }
333 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
334 return 0;
335
336 case DMA_RESUME:
337 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
338 if (fsl_chan->edesc) {
339 fsl_edma_enable_request(fsl_chan);
340 fsl_chan->status = DMA_IN_PROGRESS;
341 }
342 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
343 return 0;
344 325
345 default: 326 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
346 return -ENXIO; 327 if (fsl_chan->edesc) {
328 fsl_edma_enable_request(fsl_chan);
329 fsl_chan->status = DMA_IN_PROGRESS;
330 }
331 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
332 return 0;
333}
334
335static int fsl_edma_slave_config(struct dma_chan *chan,
336 struct dma_slave_config *cfg)
337{
338 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
339
340 fsl_chan->fsc.dir = cfg->direction;
341 if (cfg->direction == DMA_DEV_TO_MEM) {
342 fsl_chan->fsc.dev_addr = cfg->src_addr;
343 fsl_chan->fsc.addr_width = cfg->src_addr_width;
344 fsl_chan->fsc.burst = cfg->src_maxburst;
345 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
346 } else if (cfg->direction == DMA_MEM_TO_DEV) {
347 fsl_chan->fsc.dev_addr = cfg->dst_addr;
348 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
349 fsl_chan->fsc.burst = cfg->dst_maxburst;
350 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
351 } else {
352 return -EINVAL;
347 } 353 }
354 return 0;
348} 355}
349 356
350static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 357static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
@@ -780,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan)
780 fsl_chan->tcd_pool = NULL; 787 fsl_chan->tcd_pool = NULL;
781} 788}
782 789
783static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
784 struct dma_slave_caps *caps)
785{
786 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
787 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
788 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
789 caps->cmd_pause = true;
790 caps->cmd_terminate = true;
791
792 return 0;
793}
794
795static int 790static int
796fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) 791fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
797{ 792{
@@ -917,9 +912,15 @@ static int fsl_edma_probe(struct platform_device *pdev)
917 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; 912 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
918 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; 913 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
919 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; 914 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
920 fsl_edma->dma_dev.device_control = fsl_edma_control; 915 fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
916 fsl_edma->dma_dev.device_pause = fsl_edma_pause;
917 fsl_edma->dma_dev.device_resume = fsl_edma_resume;
918 fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
921 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; 919 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
922 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; 920
921 fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
922 fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
923 fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
923 924
924 platform_set_drvdata(pdev, fsl_edma); 925 platform_set_drvdata(pdev, fsl_edma);
925 926
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 38821cdf862b..300f821f1890 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -941,84 +941,56 @@ fail:
941 return NULL; 941 return NULL;
942} 942}
943 943
944/** 944static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
945 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
946 * @chan: DMA channel
947 * @sgl: scatterlist to transfer to/from
948 * @sg_len: number of entries in @scatterlist
949 * @direction: DMA direction
950 * @flags: DMAEngine flags
951 * @context: transaction context (ignored)
952 *
953 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
954 * DMA_SLAVE API, this gets the device-specific information from the
955 * chan->private variable.
956 */
957static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
958 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
959 enum dma_transfer_direction direction, unsigned long flags,
960 void *context)
961{ 945{
962 /*
963 * This operation is not supported on the Freescale DMA controller
964 *
965 * However, we need to provide the function pointer to allow the
966 * device_control() method to work.
967 */
968 return NULL;
969}
970
971static int fsl_dma_device_control(struct dma_chan *dchan,
972 enum dma_ctrl_cmd cmd, unsigned long arg)
973{
974 struct dma_slave_config *config;
975 struct fsldma_chan *chan; 946 struct fsldma_chan *chan;
976 int size;
977 947
978 if (!dchan) 948 if (!dchan)
979 return -EINVAL; 949 return -EINVAL;
980 950
981 chan = to_fsl_chan(dchan); 951 chan = to_fsl_chan(dchan);
982 952
983 switch (cmd) { 953 spin_lock_bh(&chan->desc_lock);
984 case DMA_TERMINATE_ALL:
985 spin_lock_bh(&chan->desc_lock);
986
987 /* Halt the DMA engine */
988 dma_halt(chan);
989 954
990 /* Remove and free all of the descriptors in the LD queue */ 955 /* Halt the DMA engine */
991 fsldma_free_desc_list(chan, &chan->ld_pending); 956 dma_halt(chan);
992 fsldma_free_desc_list(chan, &chan->ld_running);
993 fsldma_free_desc_list(chan, &chan->ld_completed);
994 chan->idle = true;
995 957
996 spin_unlock_bh(&chan->desc_lock); 958 /* Remove and free all of the descriptors in the LD queue */
997 return 0; 959 fsldma_free_desc_list(chan, &chan->ld_pending);
960 fsldma_free_desc_list(chan, &chan->ld_running);
961 fsldma_free_desc_list(chan, &chan->ld_completed);
962 chan->idle = true;
998 963
999 case DMA_SLAVE_CONFIG: 964 spin_unlock_bh(&chan->desc_lock);
1000 config = (struct dma_slave_config *)arg; 965 return 0;
966}
1001 967
1002 /* make sure the channel supports setting burst size */ 968static int fsl_dma_device_config(struct dma_chan *dchan,
1003 if (!chan->set_request_count) 969 struct dma_slave_config *config)
1004 return -ENXIO; 970{
971 struct fsldma_chan *chan;
972 int size;
1005 973
1006 /* we set the controller burst size depending on direction */ 974 if (!dchan)
1007 if (config->direction == DMA_MEM_TO_DEV) 975 return -EINVAL;
1008 size = config->dst_addr_width * config->dst_maxburst;
1009 else
1010 size = config->src_addr_width * config->src_maxburst;
1011 976
1012 chan->set_request_count(chan, size); 977 chan = to_fsl_chan(dchan);
1013 return 0;
1014 978
1015 default: 979 /* make sure the channel supports setting burst size */
980 if (!chan->set_request_count)
1016 return -ENXIO; 981 return -ENXIO;
1017 }
1018 982
983 /* we set the controller burst size depending on direction */
984 if (config->direction == DMA_MEM_TO_DEV)
985 size = config->dst_addr_width * config->dst_maxburst;
986 else
987 size = config->src_addr_width * config->src_maxburst;
988
989 chan->set_request_count(chan, size);
1019 return 0; 990 return 0;
1020} 991}
1021 992
993
1022/** 994/**
1023 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 995 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
1024 * @chan : Freescale DMA channel 996 * @chan : Freescale DMA channel
@@ -1395,10 +1367,15 @@ static int fsldma_of_probe(struct platform_device *op)
1395 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1367 fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1396 fdev->common.device_tx_status = fsl_tx_status; 1368 fdev->common.device_tx_status = fsl_tx_status;
1397 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1369 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1398 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1370 fdev->common.device_config = fsl_dma_device_config;
1399 fdev->common.device_control = fsl_dma_device_control; 1371 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1400 fdev->common.dev = &op->dev; 1372 fdev->common.dev = &op->dev;
1401 1373
1374 fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1375 fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1376 fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1377 fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1378
1402 dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); 1379 dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1403 1380
1404 platform_set_drvdata(op, fdev); 1381 platform_set_drvdata(op, fdev);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 239c20c84382..31bffccdcc75 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -83,6 +83,10 @@
83#define FSL_DMA_DGSR_EOSI 0x02 83#define FSL_DMA_DGSR_EOSI 0x02
84#define FSL_DMA_DGSR_EOLSI 0x01 84#define FSL_DMA_DGSR_EOLSI 0x01
85 85
86#define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
87 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
88 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
89 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
86typedef u64 __bitwise v64; 90typedef u64 __bitwise v64;
87typedef u32 __bitwise v32; 91typedef u32 __bitwise v32;
88 92
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
new file mode 100644
index 000000000000..ed045a9ad634
--- /dev/null
+++ b/drivers/dma/img-mdc-dma.c
@@ -0,0 +1,1011 @@
1/*
2 * IMG Multi-threaded DMA Controller (MDC)
3 *
4 * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd.
5 * Copyright (C) 2014 Google, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 */
11
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/mfd/syscon.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
26#include <linux/regmap.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29
30#include "dmaengine.h"
31#include "virt-dma.h"
32
33#define MDC_MAX_DMA_CHANNELS 32
34
35#define MDC_GENERAL_CONFIG 0x000
36#define MDC_GENERAL_CONFIG_LIST_IEN BIT(31)
37#define MDC_GENERAL_CONFIG_IEN BIT(29)
38#define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28)
39#define MDC_GENERAL_CONFIG_INC_W BIT(12)
40#define MDC_GENERAL_CONFIG_INC_R BIT(8)
41#define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7)
42#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4
43#define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7
44#define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3)
45#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0
46#define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7
47
48#define MDC_READ_PORT_CONFIG 0x004
49#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28
50#define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf
51#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24
52#define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf
53#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16
54#define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf
55#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4
56#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff
57#define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1)
58
59#define MDC_READ_ADDRESS 0x008
60
61#define MDC_WRITE_ADDRESS 0x00c
62
63#define MDC_TRANSFER_SIZE 0x010
64#define MDC_TRANSFER_SIZE_MASK 0xffffff
65
66#define MDC_LIST_NODE_ADDRESS 0x014
67
68#define MDC_CMDS_PROCESSED 0x018
69#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16
70#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f
71#define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8)
72#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0
73#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f
74
75#define MDC_CONTROL_AND_STATUS 0x01c
76#define MDC_CONTROL_AND_STATUS_CANCEL BIT(20)
77#define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4)
78#define MDC_CONTROL_AND_STATUS_EN BIT(0)
79
80#define MDC_ACTIVE_TRANSFER_SIZE 0x030
81
82#define MDC_GLOBAL_CONFIG_A 0x900
83#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16
84#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff
85#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8
86#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff
87#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0
88#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff
89
90struct mdc_hw_list_desc {
91 u32 gen_conf;
92 u32 readport_conf;
93 u32 read_addr;
94 u32 write_addr;
95 u32 xfer_size;
96 u32 node_addr;
97 u32 cmds_done;
98 u32 ctrl_status;
99 /*
100 * Not part of the list descriptor, but instead used by the CPU to
101 * traverse the list.
102 */
103 struct mdc_hw_list_desc *next_desc;
104};
105
106struct mdc_tx_desc {
107 struct mdc_chan *chan;
108 struct virt_dma_desc vd;
109 dma_addr_t list_phys;
110 struct mdc_hw_list_desc *list;
111 bool cyclic;
112 bool cmd_loaded;
113 unsigned int list_len;
114 unsigned int list_period_len;
115 size_t list_xfer_size;
116 unsigned int list_cmds_done;
117};
118
119struct mdc_chan {
120 struct mdc_dma *mdma;
121 struct virt_dma_chan vc;
122 struct dma_slave_config config;
123 struct mdc_tx_desc *desc;
124 int irq;
125 unsigned int periph;
126 unsigned int thread;
127 unsigned int chan_nr;
128};
129
130struct mdc_dma_soc_data {
131 void (*enable_chan)(struct mdc_chan *mchan);
132 void (*disable_chan)(struct mdc_chan *mchan);
133};
134
135struct mdc_dma {
136 struct dma_device dma_dev;
137 void __iomem *regs;
138 struct clk *clk;
139 struct dma_pool *desc_pool;
140 struct regmap *periph_regs;
141 spinlock_t lock;
142 unsigned int nr_threads;
143 unsigned int nr_channels;
144 unsigned int bus_width;
145 unsigned int max_burst_mult;
146 unsigned int max_xfer_size;
147 const struct mdc_dma_soc_data *soc;
148 struct mdc_chan channels[MDC_MAX_DMA_CHANNELS];
149};
150
151static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg)
152{
153 return readl(mdma->regs + reg);
154}
155
156static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg)
157{
158 writel(val, mdma->regs + reg);
159}
160
161static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg)
162{
163 return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg);
164}
165
166static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg)
167{
168 mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg);
169}
170
171static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c)
172{
173 return container_of(to_virt_chan(c), struct mdc_chan, vc);
174}
175
176static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t)
177{
178 struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx);
179
180 return container_of(vdesc, struct mdc_tx_desc, vd);
181}
182
183static inline struct device *mdma2dev(struct mdc_dma *mdma)
184{
185 return mdma->dma_dev.dev;
186}
187
188static inline unsigned int to_mdc_width(unsigned int bytes)
189{
190 return ffs(bytes) - 1;
191}
192
193static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc,
194 unsigned int bytes)
195{
196 ldesc->gen_conf |= to_mdc_width(bytes) <<
197 MDC_GENERAL_CONFIG_WIDTH_R_SHIFT;
198}
199
200static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc,
201 unsigned int bytes)
202{
203 ldesc->gen_conf |= to_mdc_width(bytes) <<
204 MDC_GENERAL_CONFIG_WIDTH_W_SHIFT;
205}
206
207static void mdc_list_desc_config(struct mdc_chan *mchan,
208 struct mdc_hw_list_desc *ldesc,
209 enum dma_transfer_direction dir,
210 dma_addr_t src, dma_addr_t dst, size_t len)
211{
212 struct mdc_dma *mdma = mchan->mdma;
213 unsigned int max_burst, burst_size;
214
215 ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN |
216 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
217 MDC_GENERAL_CONFIG_PHYSICAL_R;
218 ldesc->readport_conf =
219 (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
220 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
221 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
222 ldesc->read_addr = src;
223 ldesc->write_addr = dst;
224 ldesc->xfer_size = len - 1;
225 ldesc->node_addr = 0;
226 ldesc->cmds_done = 0;
227 ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN |
228 MDC_CONTROL_AND_STATUS_EN;
229 ldesc->next_desc = NULL;
230
231 if (IS_ALIGNED(dst, mdma->bus_width) &&
232 IS_ALIGNED(src, mdma->bus_width))
233 max_burst = mdma->bus_width * mdma->max_burst_mult;
234 else
235 max_burst = mdma->bus_width * (mdma->max_burst_mult - 1);
236
237 if (dir == DMA_MEM_TO_DEV) {
238 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R;
239 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
240 mdc_set_read_width(ldesc, mdma->bus_width);
241 mdc_set_write_width(ldesc, mchan->config.dst_addr_width);
242 burst_size = min(max_burst, mchan->config.dst_maxburst *
243 mchan->config.dst_addr_width);
244 } else if (dir == DMA_DEV_TO_MEM) {
245 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W;
246 ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE;
247 mdc_set_read_width(ldesc, mchan->config.src_addr_width);
248 mdc_set_write_width(ldesc, mdma->bus_width);
249 burst_size = min(max_burst, mchan->config.src_maxburst *
250 mchan->config.src_addr_width);
251 } else {
252 ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R |
253 MDC_GENERAL_CONFIG_INC_W;
254 mdc_set_read_width(ldesc, mdma->bus_width);
255 mdc_set_write_width(ldesc, mdma->bus_width);
256 burst_size = max_burst;
257 }
258 ldesc->readport_conf |= (burst_size - 1) <<
259 MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT;
260}
261
262static void mdc_list_desc_free(struct mdc_tx_desc *mdesc)
263{
264 struct mdc_dma *mdma = mdesc->chan->mdma;
265 struct mdc_hw_list_desc *curr, *next;
266 dma_addr_t curr_phys, next_phys;
267
268 curr = mdesc->list;
269 curr_phys = mdesc->list_phys;
270 while (curr) {
271 next = curr->next_desc;
272 next_phys = curr->node_addr;
273 dma_pool_free(mdma->desc_pool, curr, curr_phys);
274 curr = next;
275 curr_phys = next_phys;
276 }
277}
278
279static void mdc_desc_free(struct virt_dma_desc *vd)
280{
281 struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx);
282
283 mdc_list_desc_free(mdesc);
284 kfree(mdesc);
285}
286
287static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy(
288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len,
289 unsigned long flags)
290{
291 struct mdc_chan *mchan = to_mdc_chan(chan);
292 struct mdc_dma *mdma = mchan->mdma;
293 struct mdc_tx_desc *mdesc;
294 struct mdc_hw_list_desc *curr, *prev = NULL;
295 dma_addr_t curr_phys, prev_phys;
296
297 if (!len)
298 return NULL;
299
300 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
301 if (!mdesc)
302 return NULL;
303 mdesc->chan = mchan;
304 mdesc->list_xfer_size = len;
305
306 while (len > 0) {
307 size_t xfer_size;
308
309 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys);
310 if (!curr)
311 goto free_desc;
312
313 if (prev) {
314 prev->node_addr = curr_phys;
315 prev->next_desc = curr;
316 } else {
317 mdesc->list_phys = curr_phys;
318 mdesc->list = curr;
319 }
320
321 xfer_size = min_t(size_t, mdma->max_xfer_size, len);
322
323 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest,
324 xfer_size);
325
326 prev = curr;
327 prev_phys = curr_phys;
328
329 mdesc->list_len++;
330 src += xfer_size;
331 dest += xfer_size;
332 len -= xfer_size;
333 }
334
335 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
336
337free_desc:
338 mdc_desc_free(&mdesc->vd);
339
340 return NULL;
341}
342
343static int mdc_check_slave_width(struct mdc_chan *mchan,
344 enum dma_transfer_direction dir)
345{
346 enum dma_slave_buswidth width;
347
348 if (dir == DMA_MEM_TO_DEV)
349 width = mchan->config.dst_addr_width;
350 else
351 width = mchan->config.src_addr_width;
352
353 switch (width) {
354 case DMA_SLAVE_BUSWIDTH_1_BYTE:
355 case DMA_SLAVE_BUSWIDTH_2_BYTES:
356 case DMA_SLAVE_BUSWIDTH_4_BYTES:
357 case DMA_SLAVE_BUSWIDTH_8_BYTES:
358 break;
359 default:
360 return -EINVAL;
361 }
362
363 if (width > mchan->mdma->bus_width)
364 return -EINVAL;
365
366 return 0;
367}
368
369static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic(
370 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371 size_t period_len, enum dma_transfer_direction dir,
372 unsigned long flags)
373{
374 struct mdc_chan *mchan = to_mdc_chan(chan);
375 struct mdc_dma *mdma = mchan->mdma;
376 struct mdc_tx_desc *mdesc;
377 struct mdc_hw_list_desc *curr, *prev = NULL;
378 dma_addr_t curr_phys, prev_phys;
379
380 if (!buf_len && !period_len)
381 return NULL;
382
383 if (!is_slave_direction(dir))
384 return NULL;
385
386 if (mdc_check_slave_width(mchan, dir) < 0)
387 return NULL;
388
389 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
390 if (!mdesc)
391 return NULL;
392 mdesc->chan = mchan;
393 mdesc->cyclic = true;
394 mdesc->list_xfer_size = buf_len;
395 mdesc->list_period_len = DIV_ROUND_UP(period_len,
396 mdma->max_xfer_size);
397
398 while (buf_len > 0) {
399 size_t remainder = min(period_len, buf_len);
400
401 while (remainder > 0) {
402 size_t xfer_size;
403
404 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
405 &curr_phys);
406 if (!curr)
407 goto free_desc;
408
409 if (!prev) {
410 mdesc->list_phys = curr_phys;
411 mdesc->list = curr;
412 } else {
413 prev->node_addr = curr_phys;
414 prev->next_desc = curr;
415 }
416
417 xfer_size = min_t(size_t, mdma->max_xfer_size,
418 remainder);
419
420 if (dir == DMA_MEM_TO_DEV) {
421 mdc_list_desc_config(mchan, curr, dir,
422 buf_addr,
423 mchan->config.dst_addr,
424 xfer_size);
425 } else {
426 mdc_list_desc_config(mchan, curr, dir,
427 mchan->config.src_addr,
428 buf_addr,
429 xfer_size);
430 }
431
432 prev = curr;
433 prev_phys = curr_phys;
434
435 mdesc->list_len++;
436 buf_addr += xfer_size;
437 buf_len -= xfer_size;
438 remainder -= xfer_size;
439 }
440 }
441 prev->node_addr = mdesc->list_phys;
442
443 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
444
445free_desc:
446 mdc_desc_free(&mdesc->vd);
447
448 return NULL;
449}
450
451static struct dma_async_tx_descriptor *mdc_prep_slave_sg(
452 struct dma_chan *chan, struct scatterlist *sgl,
453 unsigned int sg_len, enum dma_transfer_direction dir,
454 unsigned long flags, void *context)
455{
456 struct mdc_chan *mchan = to_mdc_chan(chan);
457 struct mdc_dma *mdma = mchan->mdma;
458 struct mdc_tx_desc *mdesc;
459 struct scatterlist *sg;
460 struct mdc_hw_list_desc *curr, *prev = NULL;
461 dma_addr_t curr_phys, prev_phys;
462 unsigned int i;
463
464 if (!sgl)
465 return NULL;
466
467 if (!is_slave_direction(dir))
468 return NULL;
469
470 if (mdc_check_slave_width(mchan, dir) < 0)
471 return NULL;
472
473 mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT);
474 if (!mdesc)
475 return NULL;
476 mdesc->chan = mchan;
477
478 for_each_sg(sgl, sg, sg_len, i) {
479 dma_addr_t buf = sg_dma_address(sg);
480 size_t buf_len = sg_dma_len(sg);
481
482 while (buf_len > 0) {
483 size_t xfer_size;
484
485 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT,
486 &curr_phys);
487 if (!curr)
488 goto free_desc;
489
490 if (!prev) {
491 mdesc->list_phys = curr_phys;
492 mdesc->list = curr;
493 } else {
494 prev->node_addr = curr_phys;
495 prev->next_desc = curr;
496 }
497
498 xfer_size = min_t(size_t, mdma->max_xfer_size,
499 buf_len);
500
501 if (dir == DMA_MEM_TO_DEV) {
502 mdc_list_desc_config(mchan, curr, dir, buf,
503 mchan->config.dst_addr,
504 xfer_size);
505 } else {
506 mdc_list_desc_config(mchan, curr, dir,
507 mchan->config.src_addr,
508 buf, xfer_size);
509 }
510
511 prev = curr;
512 prev_phys = curr_phys;
513
514 mdesc->list_len++;
515 mdesc->list_xfer_size += xfer_size;
516 buf += xfer_size;
517 buf_len -= xfer_size;
518 }
519 }
520
521 return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags);
522
523free_desc:
524 mdc_desc_free(&mdesc->vd);
525
526 return NULL;
527}
528
529static void mdc_issue_desc(struct mdc_chan *mchan)
530{
531 struct mdc_dma *mdma = mchan->mdma;
532 struct virt_dma_desc *vd;
533 struct mdc_tx_desc *mdesc;
534 u32 val;
535
536 vd = vchan_next_desc(&mchan->vc);
537 if (!vd)
538 return;
539
540 list_del(&vd->node);
541
542 mdesc = to_mdc_desc(&vd->tx);
543 mchan->desc = mdesc;
544
545 dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n",
546 mchan->chan_nr);
547
548 mdma->soc->enable_chan(mchan);
549
550 val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG);
551 val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN |
552 MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W |
553 MDC_GENERAL_CONFIG_PHYSICAL_R;
554 mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG);
555 val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) |
556 (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) |
557 (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT);
558 mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG);
559 mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS);
560 val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS);
561 val |= MDC_CONTROL_AND_STATUS_LIST_EN;
562 mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS);
563}
564
565static void mdc_issue_pending(struct dma_chan *chan)
566{
567 struct mdc_chan *mchan = to_mdc_chan(chan);
568 unsigned long flags;
569
570 spin_lock_irqsave(&mchan->vc.lock, flags);
571 if (vchan_issue_pending(&mchan->vc) && !mchan->desc)
572 mdc_issue_desc(mchan);
573 spin_unlock_irqrestore(&mchan->vc.lock, flags);
574}
575
576static enum dma_status mdc_tx_status(struct dma_chan *chan,
577 dma_cookie_t cookie, struct dma_tx_state *txstate)
578{
579 struct mdc_chan *mchan = to_mdc_chan(chan);
580 struct mdc_tx_desc *mdesc;
581 struct virt_dma_desc *vd;
582 unsigned long flags;
583 size_t bytes = 0;
584 int ret;
585
586 ret = dma_cookie_status(chan, cookie, txstate);
587 if (ret == DMA_COMPLETE)
588 return ret;
589
590 if (!txstate)
591 return ret;
592
593 spin_lock_irqsave(&mchan->vc.lock, flags);
594 vd = vchan_find_desc(&mchan->vc, cookie);
595 if (vd) {
596 mdesc = to_mdc_desc(&vd->tx);
597 bytes = mdesc->list_xfer_size;
598 } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) {
599 struct mdc_hw_list_desc *ldesc;
600 u32 val1, val2, done, processed, residue;
601 int i, cmds;
602
603 mdesc = mchan->desc;
604
605 /*
606 * Determine the number of commands that haven't been
607 * processed (handled by the IRQ handler) yet.
608 */
609 do {
610 val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
611 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
612 residue = mdc_chan_readl(mchan,
613 MDC_ACTIVE_TRANSFER_SIZE);
614 val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) &
615 ~MDC_CMDS_PROCESSED_INT_ACTIVE;
616 } while (val1 != val2);
617
618 done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
619 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
620 processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
621 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
622 cmds = (done - processed) %
623 (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1);
624
625 /*
626 * If the command loaded event hasn't been processed yet, then
627 * the difference above includes an extra command.
628 */
629 if (!mdesc->cmd_loaded)
630 cmds--;
631 else
632 cmds += mdesc->list_cmds_done;
633
634 bytes = mdesc->list_xfer_size;
635 ldesc = mdesc->list;
636 for (i = 0; i < cmds; i++) {
637 bytes -= ldesc->xfer_size + 1;
638 ldesc = ldesc->next_desc;
639 }
640 if (ldesc) {
641 if (residue != MDC_TRANSFER_SIZE_MASK)
642 bytes -= ldesc->xfer_size - residue;
643 else
644 bytes -= ldesc->xfer_size + 1;
645 }
646 }
647 spin_unlock_irqrestore(&mchan->vc.lock, flags);
648
649 dma_set_residue(txstate, bytes);
650
651 return ret;
652}
653
654static int mdc_terminate_all(struct dma_chan *chan)
655{
656 struct mdc_chan *mchan = to_mdc_chan(chan);
657 struct mdc_tx_desc *mdesc;
658 unsigned long flags;
659 LIST_HEAD(head);
660
661 spin_lock_irqsave(&mchan->vc.lock, flags);
662
663 mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
664 MDC_CONTROL_AND_STATUS);
665
666 mdesc = mchan->desc;
667 mchan->desc = NULL;
668 vchan_get_all_descriptors(&mchan->vc, &head);
669
670 spin_unlock_irqrestore(&mchan->vc.lock, flags);
671
672 if (mdesc)
673 mdc_desc_free(&mdesc->vd);
674 vchan_dma_desc_free_list(&mchan->vc, &head);
675
676 return 0;
677}
678
679static int mdc_slave_config(struct dma_chan *chan,
680 struct dma_slave_config *config)
681{
682 struct mdc_chan *mchan = to_mdc_chan(chan);
683 unsigned long flags;
684
685 spin_lock_irqsave(&mchan->vc.lock, flags);
686 mchan->config = *config;
687 spin_unlock_irqrestore(&mchan->vc.lock, flags);
688
689 return 0;
690}
691
692static int mdc_alloc_chan_resources(struct dma_chan *chan)
693{
694 return 0;
695}
696
697static void mdc_free_chan_resources(struct dma_chan *chan)
698{
699 struct mdc_chan *mchan = to_mdc_chan(chan);
700 struct mdc_dma *mdma = mchan->mdma;
701
702 mdc_terminate_all(chan);
703
704 mdma->soc->disable_chan(mchan);
705}
706
707static irqreturn_t mdc_chan_irq(int irq, void *dev_id)
708{
709 struct mdc_chan *mchan = (struct mdc_chan *)dev_id;
710 struct mdc_tx_desc *mdesc;
711 u32 val, processed, done1, done2;
712 unsigned int i;
713
714 spin_lock(&mchan->vc.lock);
715
716 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
717 processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) &
718 MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK;
719 /*
720 * CMDS_DONE may have incremented between reading CMDS_PROCESSED
721 * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we
722 * didn't miss a command completion.
723 */
724 do {
725 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
726 done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
727 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
728 val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK <<
729 MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) |
730 MDC_CMDS_PROCESSED_INT_ACTIVE);
731 val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT;
732 mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED);
733 val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED);
734 done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) &
735 MDC_CMDS_PROCESSED_CMDS_DONE_MASK;
736 } while (done1 != done2);
737
738 dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr);
739
740 mdesc = mchan->desc;
741 if (!mdesc) {
742 dev_warn(mdma2dev(mchan->mdma),
743 "IRQ with no active descriptor on channel %d\n",
744 mchan->chan_nr);
745 goto out;
746 }
747
748 for (i = processed; i != done1;
749 i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) {
750 /*
751 * The first interrupt in a transfer indicates that the
752 * command list has been loaded, not that a command has
753 * been completed.
754 */
755 if (!mdesc->cmd_loaded) {
756 mdesc->cmd_loaded = true;
757 continue;
758 }
759
760 mdesc->list_cmds_done++;
761 if (mdesc->cyclic) {
762 mdesc->list_cmds_done %= mdesc->list_len;
763 if (mdesc->list_cmds_done % mdesc->list_period_len == 0)
764 vchan_cyclic_callback(&mdesc->vd);
765 } else if (mdesc->list_cmds_done == mdesc->list_len) {
766 mchan->desc = NULL;
767 vchan_cookie_complete(&mdesc->vd);
768 mdc_issue_desc(mchan);
769 break;
770 }
771 }
772out:
773 spin_unlock(&mchan->vc.lock);
774
775 return IRQ_HANDLED;
776}
777
778static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec,
779 struct of_dma *ofdma)
780{
781 struct mdc_dma *mdma = ofdma->of_dma_data;
782 struct dma_chan *chan;
783
784 if (dma_spec->args_count != 3)
785 return NULL;
786
787 list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) {
788 struct mdc_chan *mchan = to_mdc_chan(chan);
789
790 if (!(dma_spec->args[1] & BIT(mchan->chan_nr)))
791 continue;
792 if (dma_get_slave_channel(chan)) {
793 mchan->periph = dma_spec->args[0];
794 mchan->thread = dma_spec->args[2];
795 return chan;
796 }
797 }
798
799 return NULL;
800}
801
802#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4))
803#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4))
804#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f
805
806static void pistachio_mdc_enable_chan(struct mdc_chan *mchan)
807{
808 struct mdc_dma *mdma = mchan->mdma;
809
810 regmap_update_bits(mdma->periph_regs,
811 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
812 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
813 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
814 mchan->periph <<
815 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr));
816}
817
818static void pistachio_mdc_disable_chan(struct mdc_chan *mchan)
819{
820 struct mdc_dma *mdma = mchan->mdma;
821
822 regmap_update_bits(mdma->periph_regs,
823 PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr),
824 PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK <<
825 PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr),
826 0);
827}
828
829static const struct mdc_dma_soc_data pistachio_mdc_data = {
830 .enable_chan = pistachio_mdc_enable_chan,
831 .disable_chan = pistachio_mdc_disable_chan,
832};
833
834static const struct of_device_id mdc_dma_of_match[] = {
835 { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, },
836 { },
837};
838MODULE_DEVICE_TABLE(of, mdc_dma_of_match);
839
840static int mdc_dma_probe(struct platform_device *pdev)
841{
842 struct mdc_dma *mdma;
843 struct resource *res;
844 const struct of_device_id *match;
845 unsigned int i;
846 u32 val;
847 int ret;
848
849 mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL);
850 if (!mdma)
851 return -ENOMEM;
852 platform_set_drvdata(pdev, mdma);
853
854 match = of_match_device(mdc_dma_of_match, &pdev->dev);
855 mdma->soc = match->data;
856
857 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
858 mdma->regs = devm_ioremap_resource(&pdev->dev, res);
859 if (IS_ERR(mdma->regs))
860 return PTR_ERR(mdma->regs);
861
862 mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
863 "img,cr-periph");
864 if (IS_ERR(mdma->periph_regs))
865 return PTR_ERR(mdma->periph_regs);
866
867 mdma->clk = devm_clk_get(&pdev->dev, "sys");
868 if (IS_ERR(mdma->clk))
869 return PTR_ERR(mdma->clk);
870
871 ret = clk_prepare_enable(mdma->clk);
872 if (ret)
873 return ret;
874
875 dma_cap_zero(mdma->dma_dev.cap_mask);
876 dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask);
877 dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask);
878 dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask);
879 dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask);
880
881 val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A);
882 mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) &
883 MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK;
884 mdma->nr_threads =
885 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) &
886 MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK);
887 mdma->bus_width =
888 (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) &
889 MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8;
890 /*
891 * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes
892 * are supported, this makes it possible for the value reported in
893 * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size
894 * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or
895 * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this
896 * ambiguity, restrict transfer sizes to one bus-width less than the
897 * actual maximum.
898 */
899 mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width;
900
901 of_property_read_u32(pdev->dev.of_node, "dma-channels",
902 &mdma->nr_channels);
903 ret = of_property_read_u32(pdev->dev.of_node,
904 "img,max-burst-multiplier",
905 &mdma->max_burst_mult);
906 if (ret)
907 goto disable_clk;
908
909 mdma->dma_dev.dev = &pdev->dev;
910 mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
911 mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
912 mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
913 mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
914 mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
915 mdma->dma_dev.device_tx_status = mdc_tx_status;
916 mdma->dma_dev.device_issue_pending = mdc_issue_pending;
917 mdma->dma_dev.device_terminate_all = mdc_terminate_all;
918 mdma->dma_dev.device_config = mdc_slave_config;
919
920 mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
921 mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
922 for (i = 1; i <= mdma->bus_width; i <<= 1) {
923 mdma->dma_dev.src_addr_widths |= BIT(i);
924 mdma->dma_dev.dst_addr_widths |= BIT(i);
925 }
926
927 INIT_LIST_HEAD(&mdma->dma_dev.channels);
928 for (i = 0; i < mdma->nr_channels; i++) {
929 struct mdc_chan *mchan = &mdma->channels[i];
930
931 mchan->mdma = mdma;
932 mchan->chan_nr = i;
933 mchan->irq = platform_get_irq(pdev, i);
934 if (mchan->irq < 0) {
935 ret = mchan->irq;
936 goto disable_clk;
937 }
938 ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq,
939 IRQ_TYPE_LEVEL_HIGH,
940 dev_name(&pdev->dev), mchan);
941 if (ret < 0)
942 goto disable_clk;
943
944 mchan->vc.desc_free = mdc_desc_free;
945 vchan_init(&mchan->vc, &mdma->dma_dev);
946 }
947
948 mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
949 sizeof(struct mdc_hw_list_desc),
950 4, 0);
951 if (!mdma->desc_pool) {
952 ret = -ENOMEM;
953 goto disable_clk;
954 }
955
956 ret = dma_async_device_register(&mdma->dma_dev);
957 if (ret)
958 goto disable_clk;
959
960 ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma);
961 if (ret)
962 goto unregister;
963
964 dev_info(&pdev->dev, "MDC with %u channels and %u threads\n",
965 mdma->nr_channels, mdma->nr_threads);
966
967 return 0;
968
969unregister:
970 dma_async_device_unregister(&mdma->dma_dev);
971disable_clk:
972 clk_disable_unprepare(mdma->clk);
973 return ret;
974}
975
976static int mdc_dma_remove(struct platform_device *pdev)
977{
978 struct mdc_dma *mdma = platform_get_drvdata(pdev);
979 struct mdc_chan *mchan, *next;
980
981 of_dma_controller_free(pdev->dev.of_node);
982 dma_async_device_unregister(&mdma->dma_dev);
983
984 list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels,
985 vc.chan.device_node) {
986 list_del(&mchan->vc.chan.device_node);
987
988 synchronize_irq(mchan->irq);
989 devm_free_irq(&pdev->dev, mchan->irq, mchan);
990
991 tasklet_kill(&mchan->vc.task);
992 }
993
994 clk_disable_unprepare(mdma->clk);
995
996 return 0;
997}
998
999static struct platform_driver mdc_dma_driver = {
1000 .driver = {
1001 .name = "img-mdc-dma",
1002 .of_match_table = of_match_ptr(mdc_dma_of_match),
1003 },
1004 .probe = mdc_dma_probe,
1005 .remove = mdc_dma_remove,
1006};
1007module_platform_driver(mdc_dma_driver);
1008
1009MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver");
1010MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
1011MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 10bbc0a675b0..eed405976ea9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -230,11 +230,6 @@ static inline int is_imx1_dma(struct imxdma_engine *imxdma)
230 return imxdma->devtype == IMX1_DMA; 230 return imxdma->devtype == IMX1_DMA;
231} 231}
232 232
233static inline int is_imx21_dma(struct imxdma_engine *imxdma)
234{
235 return imxdma->devtype == IMX21_DMA;
236}
237
238static inline int is_imx27_dma(struct imxdma_engine *imxdma) 233static inline int is_imx27_dma(struct imxdma_engine *imxdma)
239{ 234{
240 return imxdma->devtype == IMX27_DMA; 235 return imxdma->devtype == IMX27_DMA;
@@ -669,69 +664,67 @@ out:
669 664
670} 665}
671 666
672static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 667static int imxdma_terminate_all(struct dma_chan *chan)
673 unsigned long arg)
674{ 668{
675 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 669 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
676 struct dma_slave_config *dmaengine_cfg = (void *)arg;
677 struct imxdma_engine *imxdma = imxdmac->imxdma; 670 struct imxdma_engine *imxdma = imxdmac->imxdma;
678 unsigned long flags; 671 unsigned long flags;
679 unsigned int mode = 0;
680
681 switch (cmd) {
682 case DMA_TERMINATE_ALL:
683 imxdma_disable_hw(imxdmac);
684
685 spin_lock_irqsave(&imxdma->lock, flags);
686 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
687 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
688 spin_unlock_irqrestore(&imxdma->lock, flags);
689 return 0;
690 case DMA_SLAVE_CONFIG:
691 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 imxdmac->per_address = dmaengine_cfg->src_addr;
693 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
694 imxdmac->word_size = dmaengine_cfg->src_addr_width;
695 } else {
696 imxdmac->per_address = dmaengine_cfg->dst_addr;
697 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
698 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
699 }
700 672
701 switch (imxdmac->word_size) { 673 imxdma_disable_hw(imxdmac);
702 case DMA_SLAVE_BUSWIDTH_1_BYTE:
703 mode = IMX_DMA_MEMSIZE_8;
704 break;
705 case DMA_SLAVE_BUSWIDTH_2_BYTES:
706 mode = IMX_DMA_MEMSIZE_16;
707 break;
708 default:
709 case DMA_SLAVE_BUSWIDTH_4_BYTES:
710 mode = IMX_DMA_MEMSIZE_32;
711 break;
712 }
713 674
714 imxdmac->hw_chaining = 0; 675 spin_lock_irqsave(&imxdma->lock, flags);
676 list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
677 list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
678 spin_unlock_irqrestore(&imxdma->lock, flags);
679 return 0;
680}
715 681
716 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | 682static int imxdma_config(struct dma_chan *chan,
717 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | 683 struct dma_slave_config *dmaengine_cfg)
718 CCR_REN; 684{
719 imxdmac->ccr_to_device = 685 struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
720 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | 686 struct imxdma_engine *imxdma = imxdmac->imxdma;
721 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; 687 unsigned int mode = 0;
722 imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 DMA_RSSR(imxdmac->channel));
724 688
725 /* Set burst length */ 689 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
726 imx_dmav1_writel(imxdma, imxdmac->watermark_level * 690 imxdmac->per_address = dmaengine_cfg->src_addr;
727 imxdmac->word_size, DMA_BLR(imxdmac->channel)); 691 imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
692 imxdmac->word_size = dmaengine_cfg->src_addr_width;
693 } else {
694 imxdmac->per_address = dmaengine_cfg->dst_addr;
695 imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
696 imxdmac->word_size = dmaengine_cfg->dst_addr_width;
697 }
728 698
729 return 0; 699 switch (imxdmac->word_size) {
700 case DMA_SLAVE_BUSWIDTH_1_BYTE:
701 mode = IMX_DMA_MEMSIZE_8;
702 break;
703 case DMA_SLAVE_BUSWIDTH_2_BYTES:
704 mode = IMX_DMA_MEMSIZE_16;
705 break;
730 default: 706 default:
731 return -ENOSYS; 707 case DMA_SLAVE_BUSWIDTH_4_BYTES:
708 mode = IMX_DMA_MEMSIZE_32;
709 break;
732 } 710 }
733 711
734 return -EINVAL; 712 imxdmac->hw_chaining = 0;
713
714 imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
715 ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
716 CCR_REN;
717 imxdmac->ccr_to_device =
718 (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
719 ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
720 imx_dmav1_writel(imxdma, imxdmac->dma_request,
721 DMA_RSSR(imxdmac->channel));
722
723 /* Set burst length */
724 imx_dmav1_writel(imxdma, imxdmac->watermark_level *
725 imxdmac->word_size, DMA_BLR(imxdmac->channel));
726
727 return 0;
735} 728}
736 729
737static enum dma_status imxdma_tx_status(struct dma_chan *chan, 730static enum dma_status imxdma_tx_status(struct dma_chan *chan,
@@ -1184,7 +1177,8 @@ static int __init imxdma_probe(struct platform_device *pdev)
1184 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 1177 imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1185 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; 1178 imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1186 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; 1179 imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1187 imxdma->dma_device.device_control = imxdma_control; 1180 imxdma->dma_device.device_config = imxdma_config;
1181 imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1188 imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 1182 imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1189 1183
1190 platform_set_drvdata(pdev, imxdma); 1184 platform_set_drvdata(pdev, imxdma);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d0df198f62e9..18c0a131e4e4 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac)
830 return ret; 830 return ret;
831} 831}
832 832
833static void sdma_disable_channel(struct sdma_channel *sdmac) 833static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
834{
835 return container_of(chan, struct sdma_channel, chan);
836}
837
838static int sdma_disable_channel(struct dma_chan *chan)
834{ 839{
840 struct sdma_channel *sdmac = to_sdma_chan(chan);
835 struct sdma_engine *sdma = sdmac->sdma; 841 struct sdma_engine *sdma = sdmac->sdma;
836 int channel = sdmac->channel; 842 int channel = sdmac->channel;
837 843
838 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 844 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
839 sdmac->status = DMA_ERROR; 845 sdmac->status = DMA_ERROR;
846
847 return 0;
840} 848}
841 849
842static int sdma_config_channel(struct sdma_channel *sdmac) 850static int sdma_config_channel(struct dma_chan *chan)
843{ 851{
852 struct sdma_channel *sdmac = to_sdma_chan(chan);
844 int ret; 853 int ret;
845 854
846 sdma_disable_channel(sdmac); 855 sdma_disable_channel(chan);
847 856
848 sdmac->event_mask[0] = 0; 857 sdmac->event_mask[0] = 0;
849 sdmac->event_mask[1] = 0; 858 sdmac->event_mask[1] = 0;
@@ -935,11 +944,6 @@ out:
935 return ret; 944 return ret;
936} 945}
937 946
938static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
939{
940 return container_of(chan, struct sdma_channel, chan);
941}
942
943static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 947static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
944{ 948{
945 unsigned long flags; 949 unsigned long flags;
@@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
1004 struct sdma_channel *sdmac = to_sdma_chan(chan); 1008 struct sdma_channel *sdmac = to_sdma_chan(chan);
1005 struct sdma_engine *sdma = sdmac->sdma; 1009 struct sdma_engine *sdma = sdmac->sdma;
1006 1010
1007 sdma_disable_channel(sdmac); 1011 sdma_disable_channel(chan);
1008 1012
1009 if (sdmac->event_id0) 1013 if (sdmac->event_id0)
1010 sdma_event_disable(sdmac, sdmac->event_id0); 1014 sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1203,35 +1207,24 @@ err_out:
1203 return NULL; 1207 return NULL;
1204} 1208}
1205 1209
1206static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1210static int sdma_config(struct dma_chan *chan,
1207 unsigned long arg) 1211 struct dma_slave_config *dmaengine_cfg)
1208{ 1212{
1209 struct sdma_channel *sdmac = to_sdma_chan(chan); 1213 struct sdma_channel *sdmac = to_sdma_chan(chan);
1210 struct dma_slave_config *dmaengine_cfg = (void *)arg;
1211
1212 switch (cmd) {
1213 case DMA_TERMINATE_ALL:
1214 sdma_disable_channel(sdmac);
1215 return 0;
1216 case DMA_SLAVE_CONFIG:
1217 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1218 sdmac->per_address = dmaengine_cfg->src_addr;
1219 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1220 dmaengine_cfg->src_addr_width;
1221 sdmac->word_size = dmaengine_cfg->src_addr_width;
1222 } else {
1223 sdmac->per_address = dmaengine_cfg->dst_addr;
1224 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1225 dmaengine_cfg->dst_addr_width;
1226 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1227 }
1228 sdmac->direction = dmaengine_cfg->direction;
1229 return sdma_config_channel(sdmac);
1230 default:
1231 return -ENOSYS;
1232 }
1233 1214
1234 return -EINVAL; 1215 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1216 sdmac->per_address = dmaengine_cfg->src_addr;
1217 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1218 dmaengine_cfg->src_addr_width;
1219 sdmac->word_size = dmaengine_cfg->src_addr_width;
1220 } else {
1221 sdmac->per_address = dmaengine_cfg->dst_addr;
1222 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1223 dmaengine_cfg->dst_addr_width;
1224 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1225 }
1226 sdmac->direction = dmaengine_cfg->direction;
1227 return sdma_config_channel(chan);
1235} 1228}
1236 1229
1237static enum dma_status sdma_tx_status(struct dma_chan *chan, 1230static enum dma_status sdma_tx_status(struct dma_chan *chan,
@@ -1303,15 +1296,15 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1303 if (header->ram_code_start + header->ram_code_size > fw->size) 1296 if (header->ram_code_start + header->ram_code_size > fw->size)
1304 goto err_firmware; 1297 goto err_firmware;
1305 switch (header->version_major) { 1298 switch (header->version_major) {
1306 case 1: 1299 case 1:
1307 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1300 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1308 break; 1301 break;
1309 case 2: 1302 case 2:
1310 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1303 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1311 break; 1304 break;
1312 default: 1305 default:
1313 dev_err(sdma->dev, "unknown firmware version\n"); 1306 dev_err(sdma->dev, "unknown firmware version\n");
1314 goto err_firmware; 1307 goto err_firmware;
1315 } 1308 }
1316 1309
1317 addr = (void *)header + header->script_addrs_start; 1310 addr = (void *)header + header->script_addrs_start;
@@ -1479,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev)
1479 if (ret) 1472 if (ret)
1480 return ret; 1473 return ret;
1481 1474
1482 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1475 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1483 if (!sdma) 1476 if (!sdma)
1484 return -ENOMEM; 1477 return -ENOMEM;
1485 1478
@@ -1488,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev)
1488 sdma->dev = &pdev->dev; 1481 sdma->dev = &pdev->dev;
1489 sdma->drvdata = drvdata; 1482 sdma->drvdata = drvdata;
1490 1483
1491 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1492 irq = platform_get_irq(pdev, 0); 1484 irq = platform_get_irq(pdev, 0);
1493 if (!iores || irq < 0) { 1485 if (irq < 0)
1494 ret = -EINVAL; 1486 return irq;
1495 goto err_irq;
1496 }
1497 1487
1498 if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { 1488 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1499 ret = -EBUSY; 1489 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1500 goto err_request_region; 1490 if (IS_ERR(sdma->regs))
1501 } 1491 return PTR_ERR(sdma->regs);
1502 1492
1503 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 1493 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1504 if (IS_ERR(sdma->clk_ipg)) { 1494 if (IS_ERR(sdma->clk_ipg))
1505 ret = PTR_ERR(sdma->clk_ipg); 1495 return PTR_ERR(sdma->clk_ipg);
1506 goto err_clk;
1507 }
1508 1496
1509 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 1497 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1510 if (IS_ERR(sdma->clk_ahb)) { 1498 if (IS_ERR(sdma->clk_ahb))
1511 ret = PTR_ERR(sdma->clk_ahb); 1499 return PTR_ERR(sdma->clk_ahb);
1512 goto err_clk;
1513 }
1514 1500
1515 clk_prepare(sdma->clk_ipg); 1501 clk_prepare(sdma->clk_ipg);
1516 clk_prepare(sdma->clk_ahb); 1502 clk_prepare(sdma->clk_ahb);
1517 1503
1518 sdma->regs = ioremap(iores->start, resource_size(iores)); 1504 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1519 if (!sdma->regs) { 1505 sdma);
1520 ret = -ENOMEM;
1521 goto err_ioremap;
1522 }
1523
1524 ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1525 if (ret) 1506 if (ret)
1526 goto err_request_irq; 1507 return ret;
1527 1508
1528 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); 1509 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1529 if (!sdma->script_addrs) { 1510 if (!sdma->script_addrs)
1530 ret = -ENOMEM; 1511 return -ENOMEM;
1531 goto err_alloc;
1532 }
1533 1512
1534 /* initially no scripts available */ 1513 /* initially no scripts available */
1535 saddr_arr = (s32 *)sdma->script_addrs; 1514 saddr_arr = (s32 *)sdma->script_addrs;
@@ -1600,7 +1579,12 @@ static int sdma_probe(struct platform_device *pdev)
1600 sdma->dma_device.device_tx_status = sdma_tx_status; 1579 sdma->dma_device.device_tx_status = sdma_tx_status;
1601 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1580 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1602 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1581 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1603 sdma->dma_device.device_control = sdma_control; 1582 sdma->dma_device.device_config = sdma_config;
1583 sdma->dma_device.device_terminate_all = sdma_disable_channel;
1584 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1585 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1586 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1587 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1604 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1588 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1605 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 1589 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1606 dma_set_max_seg_size(sdma->dma_device.dev, 65535); 1590 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
@@ -1629,38 +1613,22 @@ err_register:
1629 dma_async_device_unregister(&sdma->dma_device); 1613 dma_async_device_unregister(&sdma->dma_device);
1630err_init: 1614err_init:
1631 kfree(sdma->script_addrs); 1615 kfree(sdma->script_addrs);
1632err_alloc:
1633 free_irq(irq, sdma);
1634err_request_irq:
1635 iounmap(sdma->regs);
1636err_ioremap:
1637err_clk:
1638 release_mem_region(iores->start, resource_size(iores));
1639err_request_region:
1640err_irq:
1641 kfree(sdma);
1642 return ret; 1616 return ret;
1643} 1617}
1644 1618
1645static int sdma_remove(struct platform_device *pdev) 1619static int sdma_remove(struct platform_device *pdev)
1646{ 1620{
1647 struct sdma_engine *sdma = platform_get_drvdata(pdev); 1621 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1648 struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1649 int irq = platform_get_irq(pdev, 0);
1650 int i; 1622 int i;
1651 1623
1652 dma_async_device_unregister(&sdma->dma_device); 1624 dma_async_device_unregister(&sdma->dma_device);
1653 kfree(sdma->script_addrs); 1625 kfree(sdma->script_addrs);
1654 free_irq(irq, sdma);
1655 iounmap(sdma->regs);
1656 release_mem_region(iores->start, resource_size(iores));
1657 /* Kill the tasklet */ 1626 /* Kill the tasklet */
1658 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1627 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1659 struct sdma_channel *sdmac = &sdma->channel[i]; 1628 struct sdma_channel *sdmac = &sdma->channel[i];
1660 1629
1661 tasklet_kill(&sdmac->tasklet); 1630 tasklet_kill(&sdmac->tasklet);
1662 } 1631 }
1663 kfree(sdma);
1664 1632
1665 platform_set_drvdata(pdev, NULL); 1633 platform_set_drvdata(pdev, NULL);
1666 dev_info(&pdev->dev, "Removed...\n"); 1634 dev_info(&pdev->dev, "Removed...\n");
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 1aab8130efa1..5aaead9b56f7 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
492 return ret; 492 return ret;
493} 493}
494 494
495static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 495static int intel_mid_dma_config(struct dma_chan *chan,
496 struct dma_slave_config *slave)
496{ 497{
497 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 498 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
498 struct dma_slave_config *slave = (struct dma_slave_config *)arg;
499 struct intel_mid_dma_slave *mid_slave; 499 struct intel_mid_dma_slave *mid_slave;
500 500
501 BUG_ON(!midc); 501 BUG_ON(!midc);
@@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg)
509 midc->mid_slave = mid_slave; 509 midc->mid_slave = mid_slave;
510 return 0; 510 return 0;
511} 511}
512/** 512
513 * intel_mid_dma_device_control - DMA device control 513static int intel_mid_dma_terminate_all(struct dma_chan *chan)
514 * @chan: chan for DMA control
515 * @cmd: control cmd
516 * @arg: cmd arg value
517 *
518 * Perform DMA control command
519 */
520static int intel_mid_dma_device_control(struct dma_chan *chan,
521 enum dma_ctrl_cmd cmd, unsigned long arg)
522{ 514{
523 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 515 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
524 struct middma_device *mid = to_middma_device(chan->device); 516 struct middma_device *mid = to_middma_device(chan->device);
525 struct intel_mid_dma_desc *desc, *_desc; 517 struct intel_mid_dma_desc *desc, *_desc;
526 union intel_mid_dma_cfg_lo cfg_lo; 518 union intel_mid_dma_cfg_lo cfg_lo;
527 519
528 if (cmd == DMA_SLAVE_CONFIG)
529 return dma_slave_control(chan, arg);
530
531 if (cmd != DMA_TERMINATE_ALL)
532 return -ENXIO;
533
534 spin_lock_bh(&midc->lock); 520 spin_lock_bh(&midc->lock);
535 if (midc->busy == false) { 521 if (midc->busy == false) {
536 spin_unlock_bh(&midc->lock); 522 spin_unlock_bh(&midc->lock);
@@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
1148 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1134 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy;
1149 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1135 dma->common.device_issue_pending = intel_mid_dma_issue_pending;
1150 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1136 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg;
1151 dma->common.device_control = intel_mid_dma_device_control; 1137 dma->common.device_config = intel_mid_dma_config;
1138 dma->common.device_terminate_all = intel_mid_dma_terminate_all;
1152 1139
1153 /*enable dma cntrl*/ 1140 /*enable dma cntrl*/
1154 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1141 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 32eae38291e5..77a6dcf25b98 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -214,6 +214,11 @@ static bool is_bwd_ioat(struct pci_dev *pdev)
214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 214 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 215 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 216 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
217 /* even though not Atom, BDX-DE has same DMA silicon */
218 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
219 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
220 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
221 case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
217 return true; 222 return true;
218 default: 223 default:
219 return false; 224 return false;
@@ -489,6 +494,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
489 struct ioat_chan_common *chan = &ioat->base; 494 struct ioat_chan_common *chan = &ioat->base;
490 struct pci_dev *pdev = to_pdev(chan); 495 struct pci_dev *pdev = to_pdev(chan);
491 struct ioat_dma_descriptor *hw; 496 struct ioat_dma_descriptor *hw;
497 struct dma_async_tx_descriptor *tx;
492 u64 phys_complete; 498 u64 phys_complete;
493 struct ioat_ring_ent *desc; 499 struct ioat_ring_ent *desc;
494 u32 err_handled = 0; 500 u32 err_handled = 0;
@@ -534,6 +540,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat)
534 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 540 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
535 __func__, chanerr, err_handled); 541 __func__, chanerr, err_handled);
536 BUG(); 542 BUG();
543 } else { /* cleanup the faulty descriptor */
544 tx = &desc->txd;
545 if (tx->cookie) {
546 dma_cookie_complete(tx);
547 dma_descriptor_unmap(tx);
548 if (tx->callback) {
549 tx->callback(tx->callback_param);
550 tx->callback = NULL;
551 }
552 }
537 } 553 }
538 554
539 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 555 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -1300,7 +1316,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1300 1316
1301 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1317 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1302 1318
1303 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1319 if (tmo == 0 ||
1320 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1304 dev_err(dev, "Self-test xor timed out\n"); 1321 dev_err(dev, "Self-test xor timed out\n");
1305 err = -ENODEV; 1322 err = -ENODEV;
1306 goto dma_unmap; 1323 goto dma_unmap;
@@ -1366,7 +1383,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1366 1383
1367 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1384 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1368 1385
1369 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1386 if (tmo == 0 ||
1387 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1370 dev_err(dev, "Self-test validate timed out\n"); 1388 dev_err(dev, "Self-test validate timed out\n");
1371 err = -ENODEV; 1389 err = -ENODEV;
1372 goto dma_unmap; 1390 goto dma_unmap;
@@ -1418,7 +1436,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1418 1436
1419 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1437 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1420 1438
1421 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 1439 if (tmo == 0 ||
1440 dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1422 dev_err(dev, "Self-test 2nd validate timed out\n"); 1441 dev_err(dev, "Self-test 2nd validate timed out\n");
1423 err = -ENODEV; 1442 err = -ENODEV;
1424 goto dma_unmap; 1443 goto dma_unmap;
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 62f83e983d8d..02177ecf09f8 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -57,6 +57,11 @@
57#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 57#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52
58#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 58#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53
59 59
60#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0 0x6f50
61#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1 0x6f51
62#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52
63#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53
64
60#define IOAT_VER_1_2 0x12 /* Version 1.2 */ 65#define IOAT_VER_1_2 0x12 /* Version 1.2 */
61#define IOAT_VER_2_0 0x20 /* Version 2.0 */ 66#define IOAT_VER_2_0 0x20 /* Version 2.0 */
62#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 67#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 1d051cd045db..5501eb072d69 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -111,6 +111,11 @@ static struct pci_device_id ioat_pci_tbl[] = {
111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, 111 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, 112 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
113 113
114 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
115 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
116 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
117 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
118
114 { 0, } 119 { 0, }
115}; 120};
116MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 121MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c2b017ad139d..b54f62de9232 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan)
1398 */ 1398 */
1399} 1399}
1400 1400
1401static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1401static int idmac_pause(struct dma_chan *chan)
1402 unsigned long arg)
1403{ 1402{
1404 struct idmac_channel *ichan = to_idmac_chan(chan); 1403 struct idmac_channel *ichan = to_idmac_chan(chan);
1405 struct idmac *idmac = to_idmac(chan->device); 1404 struct idmac *idmac = to_idmac(chan->device);
1406 struct ipu *ipu = to_ipu(idmac); 1405 struct ipu *ipu = to_ipu(idmac);
1407 struct list_head *list, *tmp; 1406 struct list_head *list, *tmp;
1408 unsigned long flags; 1407 unsigned long flags;
1409 int i;
1410 1408
1411 switch (cmd) { 1409 mutex_lock(&ichan->chan_mutex);
1412 case DMA_PAUSE:
1413 spin_lock_irqsave(&ipu->lock, flags);
1414 ipu_ic_disable_task(ipu, chan->chan_id);
1415 1410
1416 /* Return all descriptors into "prepared" state */ 1411 spin_lock_irqsave(&ipu->lock, flags);
1417 list_for_each_safe(list, tmp, &ichan->queue) 1412 ipu_ic_disable_task(ipu, chan->chan_id);
1418 list_del_init(list);
1419 1413
1420 ichan->sg[0] = NULL; 1414 /* Return all descriptors into "prepared" state */
1421 ichan->sg[1] = NULL; 1415 list_for_each_safe(list, tmp, &ichan->queue)
1416 list_del_init(list);
1422 1417
1423 spin_unlock_irqrestore(&ipu->lock, flags); 1418 ichan->sg[0] = NULL;
1419 ichan->sg[1] = NULL;
1424 1420
1425 ichan->status = IPU_CHANNEL_INITIALIZED; 1421 spin_unlock_irqrestore(&ipu->lock, flags);
1426 break;
1427 case DMA_TERMINATE_ALL:
1428 ipu_disable_channel(idmac, ichan,
1429 ichan->status >= IPU_CHANNEL_ENABLED);
1430 1422
1431 tasklet_disable(&ipu->tasklet); 1423 ichan->status = IPU_CHANNEL_INITIALIZED;
1432 1424
1433 /* ichan->queue is modified in ISR, have to spinlock */ 1425 mutex_unlock(&ichan->chan_mutex);
1434 spin_lock_irqsave(&ichan->lock, flags);
1435 list_splice_init(&ichan->queue, &ichan->free_list);
1436 1426
1437 if (ichan->desc) 1427 return 0;
1438 for (i = 0; i < ichan->n_tx_desc; i++) { 1428}
1439 struct idmac_tx_desc *desc = ichan->desc + i;
1440 if (list_empty(&desc->list))
1441 /* Descriptor was prepared, but not submitted */
1442 list_add(&desc->list, &ichan->free_list);
1443 1429
1444 async_tx_clear_ack(&desc->txd); 1430static int __idmac_terminate_all(struct dma_chan *chan)
1445 } 1431{
1432 struct idmac_channel *ichan = to_idmac_chan(chan);
1433 struct idmac *idmac = to_idmac(chan->device);
1434 struct ipu *ipu = to_ipu(idmac);
1435 unsigned long flags;
1436 int i;
1446 1437
1447 ichan->sg[0] = NULL; 1438 ipu_disable_channel(idmac, ichan,
1448 ichan->sg[1] = NULL; 1439 ichan->status >= IPU_CHANNEL_ENABLED);
1449 spin_unlock_irqrestore(&ichan->lock, flags);
1450 1440
1451 tasklet_enable(&ipu->tasklet); 1441 tasklet_disable(&ipu->tasklet);
1452 1442
1453 ichan->status = IPU_CHANNEL_INITIALIZED; 1443 /* ichan->queue is modified in ISR, have to spinlock */
1454 break; 1444 spin_lock_irqsave(&ichan->lock, flags);
1455 default: 1445 list_splice_init(&ichan->queue, &ichan->free_list);
1456 return -ENOSYS; 1446
1457 } 1447 if (ichan->desc)
1448 for (i = 0; i < ichan->n_tx_desc; i++) {
1449 struct idmac_tx_desc *desc = ichan->desc + i;
1450 if (list_empty(&desc->list))
1451 /* Descriptor was prepared, but not submitted */
1452 list_add(&desc->list, &ichan->free_list);
1453
1454 async_tx_clear_ack(&desc->txd);
1455 }
1456
1457 ichan->sg[0] = NULL;
1458 ichan->sg[1] = NULL;
1459 spin_unlock_irqrestore(&ichan->lock, flags);
1460
1461 tasklet_enable(&ipu->tasklet);
1462
1463 ichan->status = IPU_CHANNEL_INITIALIZED;
1458 1464
1459 return 0; 1465 return 0;
1460} 1466}
1461 1467
1462static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1468static int idmac_terminate_all(struct dma_chan *chan)
1463 unsigned long arg)
1464{ 1469{
1465 struct idmac_channel *ichan = to_idmac_chan(chan); 1470 struct idmac_channel *ichan = to_idmac_chan(chan);
1466 int ret; 1471 int ret;
1467 1472
1468 mutex_lock(&ichan->chan_mutex); 1473 mutex_lock(&ichan->chan_mutex);
1469 1474
1470 ret = __idmac_control(chan, cmd, arg); 1475 ret = __idmac_terminate_all(chan);
1471 1476
1472 mutex_unlock(&ichan->chan_mutex); 1477 mutex_unlock(&ichan->chan_mutex);
1473 1478
@@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
1568 1573
1569 mutex_lock(&ichan->chan_mutex); 1574 mutex_lock(&ichan->chan_mutex);
1570 1575
1571 __idmac_control(chan, DMA_TERMINATE_ALL, 0); 1576 __idmac_terminate_all(chan);
1572 1577
1573 if (ichan->status > IPU_CHANNEL_FREE) { 1578 if (ichan->status > IPU_CHANNEL_FREE) {
1574#ifdef DEBUG 1579#ifdef DEBUG
@@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu)
1622 1627
1623 /* Compulsory for DMA_SLAVE fields */ 1628 /* Compulsory for DMA_SLAVE fields */
1624 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1629 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1625 dma->device_control = idmac_control; 1630 dma->device_pause = idmac_pause;
1631 dma->device_terminate_all = idmac_terminate_all;
1626 1632
1627 INIT_LIST_HEAD(&dma->channels); 1633 INIT_LIST_HEAD(&dma->channels);
1628 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1634 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
@@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu)
1655 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1661 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1656 struct idmac_channel *ichan = ipu->channel + i; 1662 struct idmac_channel *ichan = ipu->channel + i;
1657 1663
1658 idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); 1664 idmac_terminate_all(&ichan->dma_chan);
1659 } 1665 }
1660 1666
1661 dma_async_device_unregister(&idmac->dma); 1667 dma_async_device_unregister(&idmac->dma);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1de14ab2c51..6f7f43529ccb 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
441 num = 0; 441 num = 0;
442 442
443 if (!c->ccfg) { 443 if (!c->ccfg) {
444 /* default is memtomem, without calling device_control */ 444 /* default is memtomem, without calling device_config */
445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; 445 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ 446 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ 447 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
@@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
523 return vchan_tx_prep(&c->vc, &ds->vd, flags); 523 return vchan_tx_prep(&c->vc, &ds->vd, flags);
524} 524}
525 525
526static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 526static int k3_dma_config(struct dma_chan *chan,
527 unsigned long arg) 527 struct dma_slave_config *cfg)
528{
529 struct k3_dma_chan *c = to_k3_chan(chan);
530 u32 maxburst = 0, val = 0;
531 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
532
533 if (cfg == NULL)
534 return -EINVAL;
535 c->dir = cfg->direction;
536 if (c->dir == DMA_DEV_TO_MEM) {
537 c->ccfg = CX_CFG_DSTINCR;
538 c->dev_addr = cfg->src_addr;
539 maxburst = cfg->src_maxburst;
540 width = cfg->src_addr_width;
541 } else if (c->dir == DMA_MEM_TO_DEV) {
542 c->ccfg = CX_CFG_SRCINCR;
543 c->dev_addr = cfg->dst_addr;
544 maxburst = cfg->dst_maxburst;
545 width = cfg->dst_addr_width;
546 }
547 switch (width) {
548 case DMA_SLAVE_BUSWIDTH_1_BYTE:
549 case DMA_SLAVE_BUSWIDTH_2_BYTES:
550 case DMA_SLAVE_BUSWIDTH_4_BYTES:
551 case DMA_SLAVE_BUSWIDTH_8_BYTES:
552 val = __ffs(width);
553 break;
554 default:
555 val = 3;
556 break;
557 }
558 c->ccfg |= (val << 12) | (val << 16);
559
560 if ((maxburst == 0) || (maxburst > 16))
561 val = 16;
562 else
563 val = maxburst - 1;
564 c->ccfg |= (val << 20) | (val << 24);
565 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
566
567 /* specific request line */
568 c->ccfg |= c->vc.chan.chan_id << 4;
569
570 return 0;
571}
572
573static int k3_dma_terminate_all(struct dma_chan *chan)
528{ 574{
529 struct k3_dma_chan *c = to_k3_chan(chan); 575 struct k3_dma_chan *c = to_k3_chan(chan);
530 struct k3_dma_dev *d = to_k3_dma(chan->device); 576 struct k3_dma_dev *d = to_k3_dma(chan->device);
531 struct dma_slave_config *cfg = (void *)arg;
532 struct k3_dma_phy *p = c->phy; 577 struct k3_dma_phy *p = c->phy;
533 unsigned long flags; 578 unsigned long flags;
534 u32 maxburst = 0, val = 0;
535 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
536 LIST_HEAD(head); 579 LIST_HEAD(head);
537 580
538 switch (cmd) { 581 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
539 case DMA_SLAVE_CONFIG:
540 if (cfg == NULL)
541 return -EINVAL;
542 c->dir = cfg->direction;
543 if (c->dir == DMA_DEV_TO_MEM) {
544 c->ccfg = CX_CFG_DSTINCR;
545 c->dev_addr = cfg->src_addr;
546 maxburst = cfg->src_maxburst;
547 width = cfg->src_addr_width;
548 } else if (c->dir == DMA_MEM_TO_DEV) {
549 c->ccfg = CX_CFG_SRCINCR;
550 c->dev_addr = cfg->dst_addr;
551 maxburst = cfg->dst_maxburst;
552 width = cfg->dst_addr_width;
553 }
554 switch (width) {
555 case DMA_SLAVE_BUSWIDTH_1_BYTE:
556 case DMA_SLAVE_BUSWIDTH_2_BYTES:
557 case DMA_SLAVE_BUSWIDTH_4_BYTES:
558 case DMA_SLAVE_BUSWIDTH_8_BYTES:
559 val = __ffs(width);
560 break;
561 default:
562 val = 3;
563 break;
564 }
565 c->ccfg |= (val << 12) | (val << 16);
566 582
567 if ((maxburst == 0) || (maxburst > 16)) 583 /* Prevent this channel being scheduled */
568 val = 16; 584 spin_lock(&d->lock);
569 else 585 list_del_init(&c->node);
570 val = maxburst - 1; 586 spin_unlock(&d->lock);
571 c->ccfg |= (val << 20) | (val << 24);
572 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
573 587
574 /* specific request line */ 588 /* Clear the tx descriptor lists */
575 c->ccfg |= c->vc.chan.chan_id << 4; 589 spin_lock_irqsave(&c->vc.lock, flags);
576 break; 590 vchan_get_all_descriptors(&c->vc, &head);
591 if (p) {
592 /* vchan is assigned to a pchan - stop the channel */
593 k3_dma_terminate_chan(p, d);
594 c->phy = NULL;
595 p->vchan = NULL;
596 p->ds_run = p->ds_done = NULL;
597 }
598 spin_unlock_irqrestore(&c->vc.lock, flags);
599 vchan_dma_desc_free_list(&c->vc, &head);
577 600
578 case DMA_TERMINATE_ALL: 601 return 0;
579 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); 602}
580 603
581 /* Prevent this channel being scheduled */ 604static int k3_dma_transfer_pause(struct dma_chan *chan)
582 spin_lock(&d->lock); 605{
583 list_del_init(&c->node); 606 struct k3_dma_chan *c = to_k3_chan(chan);
584 spin_unlock(&d->lock); 607 struct k3_dma_dev *d = to_k3_dma(chan->device);
608 struct k3_dma_phy *p = c->phy;
585 609
586 /* Clear the tx descriptor lists */ 610 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
587 spin_lock_irqsave(&c->vc.lock, flags); 611 if (c->status == DMA_IN_PROGRESS) {
588 vchan_get_all_descriptors(&c->vc, &head); 612 c->status = DMA_PAUSED;
589 if (p) { 613 if (p) {
590 /* vchan is assigned to a pchan - stop the channel */ 614 k3_dma_pause_dma(p, false);
591 k3_dma_terminate_chan(p, d); 615 } else {
592 c->phy = NULL; 616 spin_lock(&d->lock);
593 p->vchan = NULL; 617 list_del_init(&c->node);
594 p->ds_run = p->ds_done = NULL; 618 spin_unlock(&d->lock);
595 } 619 }
596 spin_unlock_irqrestore(&c->vc.lock, flags); 620 }
597 vchan_dma_desc_free_list(&c->vc, &head);
598 break;
599 621
600 case DMA_PAUSE: 622 return 0;
601 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 623}
602 if (c->status == DMA_IN_PROGRESS) {
603 c->status = DMA_PAUSED;
604 if (p) {
605 k3_dma_pause_dma(p, false);
606 } else {
607 spin_lock(&d->lock);
608 list_del_init(&c->node);
609 spin_unlock(&d->lock);
610 }
611 }
612 break;
613 624
614 case DMA_RESUME: 625static int k3_dma_transfer_resume(struct dma_chan *chan)
615 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 626{
616 spin_lock_irqsave(&c->vc.lock, flags); 627 struct k3_dma_chan *c = to_k3_chan(chan);
617 if (c->status == DMA_PAUSED) { 628 struct k3_dma_dev *d = to_k3_dma(chan->device);
618 c->status = DMA_IN_PROGRESS; 629 struct k3_dma_phy *p = c->phy;
619 if (p) { 630 unsigned long flags;
620 k3_dma_pause_dma(p, true); 631
621 } else if (!list_empty(&c->vc.desc_issued)) { 632 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
622 spin_lock(&d->lock); 633 spin_lock_irqsave(&c->vc.lock, flags);
623 list_add_tail(&c->node, &d->chan_pending); 634 if (c->status == DMA_PAUSED) {
624 spin_unlock(&d->lock); 635 c->status = DMA_IN_PROGRESS;
625 } 636 if (p) {
637 k3_dma_pause_dma(p, true);
638 } else if (!list_empty(&c->vc.desc_issued)) {
639 spin_lock(&d->lock);
640 list_add_tail(&c->node, &d->chan_pending);
641 spin_unlock(&d->lock);
626 } 642 }
627 spin_unlock_irqrestore(&c->vc.lock, flags);
628 break;
629 default:
630 return -ENXIO;
631 } 643 }
644 spin_unlock_irqrestore(&c->vc.lock, flags);
645
632 return 0; 646 return 0;
633} 647}
634 648
@@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op)
720 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; 734 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
721 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; 735 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
722 d->slave.device_issue_pending = k3_dma_issue_pending; 736 d->slave.device_issue_pending = k3_dma_issue_pending;
723 d->slave.device_control = k3_dma_control; 737 d->slave.device_config = k3_dma_config;
738 d->slave.device_pause = k3_dma_transfer_pause;
739 d->slave.device_resume = k3_dma_transfer_resume;
740 d->slave.device_terminate_all = k3_dma_terminate_all;
724 d->slave.copy_align = DMA_ALIGN; 741 d->slave.copy_align = DMA_ALIGN;
725 742
726 /* init virtual channel */ 743 /* init virtual channel */
@@ -787,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op)
787} 804}
788 805
789#ifdef CONFIG_PM_SLEEP 806#ifdef CONFIG_PM_SLEEP
790static int k3_dma_suspend(struct device *dev) 807static int k3_dma_suspend_dev(struct device *dev)
791{ 808{
792 struct k3_dma_dev *d = dev_get_drvdata(dev); 809 struct k3_dma_dev *d = dev_get_drvdata(dev);
793 u32 stat = 0; 810 u32 stat = 0;
@@ -803,7 +820,7 @@ static int k3_dma_suspend(struct device *dev)
803 return 0; 820 return 0;
804} 821}
805 822
806static int k3_dma_resume(struct device *dev) 823static int k3_dma_resume_dev(struct device *dev)
807{ 824{
808 struct k3_dma_dev *d = dev_get_drvdata(dev); 825 struct k3_dma_dev *d = dev_get_drvdata(dev);
809 int ret = 0; 826 int ret = 0;
@@ -818,7 +835,7 @@ static int k3_dma_resume(struct device *dev)
818} 835}
819#endif 836#endif
820 837
821static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 838static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
822 839
823static struct platform_driver k3_pdma_driver = { 840static struct platform_driver k3_pdma_driver = {
824 .driver = { 841 .driver = {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 8b8952f35e6c..8926f271904e 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -683,68 +683,70 @@ fail:
683 return NULL; 683 return NULL;
684} 684}
685 685
686static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 686static int mmp_pdma_config(struct dma_chan *dchan,
687 unsigned long arg) 687 struct dma_slave_config *cfg)
688{ 688{
689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
690 struct dma_slave_config *cfg = (void *)arg;
691 unsigned long flags;
692 u32 maxburst = 0, addr = 0; 690 u32 maxburst = 0, addr = 0;
693 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 691 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
694 692
695 if (!dchan) 693 if (!dchan)
696 return -EINVAL; 694 return -EINVAL;
697 695
698 switch (cmd) { 696 if (cfg->direction == DMA_DEV_TO_MEM) {
699 case DMA_TERMINATE_ALL: 697 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
700 disable_chan(chan->phy); 698 maxburst = cfg->src_maxburst;
701 mmp_pdma_free_phy(chan); 699 width = cfg->src_addr_width;
702 spin_lock_irqsave(&chan->desc_lock, flags); 700 addr = cfg->src_addr;
703 mmp_pdma_free_desc_list(chan, &chan->chain_pending); 701 } else if (cfg->direction == DMA_MEM_TO_DEV) {
704 mmp_pdma_free_desc_list(chan, &chan->chain_running); 702 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
705 spin_unlock_irqrestore(&chan->desc_lock, flags); 703 maxburst = cfg->dst_maxburst;
706 chan->idle = true; 704 width = cfg->dst_addr_width;
707 break; 705 addr = cfg->dst_addr;
708 case DMA_SLAVE_CONFIG:
709 if (cfg->direction == DMA_DEV_TO_MEM) {
710 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
711 maxburst = cfg->src_maxburst;
712 width = cfg->src_addr_width;
713 addr = cfg->src_addr;
714 } else if (cfg->direction == DMA_MEM_TO_DEV) {
715 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
716 maxburst = cfg->dst_maxburst;
717 width = cfg->dst_addr_width;
718 addr = cfg->dst_addr;
719 }
720
721 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
722 chan->dcmd |= DCMD_WIDTH1;
723 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
724 chan->dcmd |= DCMD_WIDTH2;
725 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
726 chan->dcmd |= DCMD_WIDTH4;
727
728 if (maxburst == 8)
729 chan->dcmd |= DCMD_BURST8;
730 else if (maxburst == 16)
731 chan->dcmd |= DCMD_BURST16;
732 else if (maxburst == 32)
733 chan->dcmd |= DCMD_BURST32;
734
735 chan->dir = cfg->direction;
736 chan->dev_addr = addr;
737 /* FIXME: drivers should be ported over to use the filter
738 * function. Once that's done, the following two lines can
739 * be removed.
740 */
741 if (cfg->slave_id)
742 chan->drcmr = cfg->slave_id;
743 break;
744 default:
745 return -ENOSYS;
746 } 706 }
747 707
708 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
709 chan->dcmd |= DCMD_WIDTH1;
710 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
711 chan->dcmd |= DCMD_WIDTH2;
712 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
713 chan->dcmd |= DCMD_WIDTH4;
714
715 if (maxburst == 8)
716 chan->dcmd |= DCMD_BURST8;
717 else if (maxburst == 16)
718 chan->dcmd |= DCMD_BURST16;
719 else if (maxburst == 32)
720 chan->dcmd |= DCMD_BURST32;
721
722 chan->dir = cfg->direction;
723 chan->dev_addr = addr;
724 /* FIXME: drivers should be ported over to use the filter
725 * function. Once that's done, the following two lines can
726 * be removed.
727 */
728 if (cfg->slave_id)
729 chan->drcmr = cfg->slave_id;
730
731 return 0;
732}
733
734static int mmp_pdma_terminate_all(struct dma_chan *dchan)
735{
736 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
737 unsigned long flags;
738
739 if (!dchan)
740 return -EINVAL;
741
742 disable_chan(chan->phy);
743 mmp_pdma_free_phy(chan);
744 spin_lock_irqsave(&chan->desc_lock, flags);
745 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
746 mmp_pdma_free_desc_list(chan, &chan->chain_running);
747 spin_unlock_irqrestore(&chan->desc_lock, flags);
748 chan->idle = true;
749
748 return 0; 750 return 0;
749} 751}
750 752
@@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op)
1061 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; 1063 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1062 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; 1064 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1063 pdev->device.device_issue_pending = mmp_pdma_issue_pending; 1065 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1064 pdev->device.device_control = mmp_pdma_control; 1066 pdev->device.device_config = mmp_pdma_config;
1067 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1065 pdev->device.copy_align = PDMA_ALIGNMENT; 1068 pdev->device.copy_align = PDMA_ALIGNMENT;
1066 1069
1067 if (pdev->dev->coherent_dma_mask) 1070 if (pdev->dev->coherent_dma_mask)
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index bfb46957c3dc..70c2fa9963cd 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -19,7 +19,6 @@
19#include <linux/dmaengine.h> 19#include <linux/dmaengine.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <mach/regs-icu.h>
23#include <linux/platform_data/dma-mmp_tdma.h> 22#include <linux/platform_data/dma-mmp_tdma.h>
24#include <linux/of_device.h> 23#include <linux/of_device.h>
25#include <linux/of_dma.h> 24#include <linux/of_dma.h>
@@ -164,33 +163,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
164 tdmac->status = DMA_IN_PROGRESS; 163 tdmac->status = DMA_IN_PROGRESS;
165} 164}
166 165
167static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) 166static int mmp_tdma_disable_chan(struct dma_chan *chan)
168{ 167{
168 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
169
169 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 170 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
170 tdmac->reg_base + TDCR); 171 tdmac->reg_base + TDCR);
171 172
172 tdmac->status = DMA_COMPLETE; 173 tdmac->status = DMA_COMPLETE;
174
175 return 0;
173} 176}
174 177
175static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 178static int mmp_tdma_resume_chan(struct dma_chan *chan)
176{ 179{
180 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
181
177 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 182 writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
178 tdmac->reg_base + TDCR); 183 tdmac->reg_base + TDCR);
179 tdmac->status = DMA_IN_PROGRESS; 184 tdmac->status = DMA_IN_PROGRESS;
185
186 return 0;
180} 187}
181 188
182static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) 189static int mmp_tdma_pause_chan(struct dma_chan *chan)
183{ 190{
191 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
192
184 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 193 writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
185 tdmac->reg_base + TDCR); 194 tdmac->reg_base + TDCR);
186 tdmac->status = DMA_PAUSED; 195 tdmac->status = DMA_PAUSED;
196
197 return 0;
187} 198}
188 199
189static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 200static int mmp_tdma_config_chan(struct dma_chan *chan)
190{ 201{
202 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
191 unsigned int tdcr = 0; 203 unsigned int tdcr = 0;
192 204
193 mmp_tdma_disable_chan(tdmac); 205 mmp_tdma_disable_chan(chan);
194 206
195 if (tdmac->dir == DMA_MEM_TO_DEV) 207 if (tdmac->dir == DMA_MEM_TO_DEV)
196 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; 208 tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
@@ -452,42 +464,34 @@ err_out:
452 return NULL; 464 return NULL;
453} 465}
454 466
455static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 467static int mmp_tdma_terminate_all(struct dma_chan *chan)
456 unsigned long arg)
457{ 468{
458 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 469 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
459 struct dma_slave_config *dmaengine_cfg = (void *)arg; 470
460 int ret = 0; 471 mmp_tdma_disable_chan(chan);
461 472 /* disable interrupt */
462 switch (cmd) { 473 mmp_tdma_enable_irq(tdmac, false);
463 case DMA_TERMINATE_ALL: 474
464 mmp_tdma_disable_chan(tdmac); 475 return 0;
465 /* disable interrupt */ 476}
466 mmp_tdma_enable_irq(tdmac, false); 477
467 break; 478static int mmp_tdma_config(struct dma_chan *chan,
468 case DMA_PAUSE: 479 struct dma_slave_config *dmaengine_cfg)
469 mmp_tdma_pause_chan(tdmac); 480{
470 break; 481 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
471 case DMA_RESUME: 482
472 mmp_tdma_resume_chan(tdmac); 483 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
473 break; 484 tdmac->dev_addr = dmaengine_cfg->src_addr;
474 case DMA_SLAVE_CONFIG: 485 tdmac->burst_sz = dmaengine_cfg->src_maxburst;
475 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 486 tdmac->buswidth = dmaengine_cfg->src_addr_width;
476 tdmac->dev_addr = dmaengine_cfg->src_addr; 487 } else {
477 tdmac->burst_sz = dmaengine_cfg->src_maxburst; 488 tdmac->dev_addr = dmaengine_cfg->dst_addr;
478 tdmac->buswidth = dmaengine_cfg->src_addr_width; 489 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
479 } else { 490 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
480 tdmac->dev_addr = dmaengine_cfg->dst_addr;
481 tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
482 tdmac->buswidth = dmaengine_cfg->dst_addr_width;
483 }
484 tdmac->dir = dmaengine_cfg->direction;
485 return mmp_tdma_config_chan(tdmac);
486 default:
487 ret = -ENOSYS;
488 } 491 }
492 tdmac->dir = dmaengine_cfg->direction;
489 493
490 return ret; 494 return mmp_tdma_config_chan(chan);
491} 495}
492 496
493static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, 497static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
@@ -668,7 +672,10 @@ static int mmp_tdma_probe(struct platform_device *pdev)
668 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; 672 tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
669 tdev->device.device_tx_status = mmp_tdma_tx_status; 673 tdev->device.device_tx_status = mmp_tdma_tx_status;
670 tdev->device.device_issue_pending = mmp_tdma_issue_pending; 674 tdev->device.device_issue_pending = mmp_tdma_issue_pending;
671 tdev->device.device_control = mmp_tdma_control; 675 tdev->device.device_config = mmp_tdma_config;
676 tdev->device.device_pause = mmp_tdma_pause_chan;
677 tdev->device.device_resume = mmp_tdma_resume_chan;
678 tdev->device.device_terminate_all = mmp_tdma_terminate_all;
672 tdev->device.copy_align = TDMA_ALIGNMENT; 679 tdev->device.copy_align = TDMA_ALIGNMENT;
673 680
674 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 681 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 53032bac06e0..15cab7d79525 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan,
263 return 0; 263 return 0;
264} 264}
265 265
266static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
267 unsigned long arg)
268{
269 int ret = 0;
270
271 switch (cmd) {
272 case DMA_PAUSE:
273 case DMA_RESUME:
274 return -EINVAL;
275 case DMA_TERMINATE_ALL:
276 moxart_terminate_all(chan);
277 break;
278 case DMA_SLAVE_CONFIG:
279 ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
280 break;
281 default:
282 ret = -ENOSYS;
283 }
284
285 return ret;
286}
287
288static struct dma_async_tx_descriptor *moxart_prep_slave_sg( 266static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
289 struct dma_chan *chan, struct scatterlist *sgl, 267 struct dma_chan *chan, struct scatterlist *sgl,
290 unsigned int sg_len, enum dma_transfer_direction dir, 268 unsigned int sg_len, enum dma_transfer_direction dir,
@@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev)
531 dma->device_free_chan_resources = moxart_free_chan_resources; 509 dma->device_free_chan_resources = moxart_free_chan_resources;
532 dma->device_issue_pending = moxart_issue_pending; 510 dma->device_issue_pending = moxart_issue_pending;
533 dma->device_tx_status = moxart_tx_status; 511 dma->device_tx_status = moxart_tx_status;
534 dma->device_control = moxart_control; 512 dma->device_config = moxart_slave_config;
513 dma->device_terminate_all = moxart_terminate_all;
535 dma->dev = dev; 514 dma->dev = dev;
536 515
537 INIT_LIST_HEAD(&dma->channels); 516 INIT_LIST_HEAD(&dma->channels);
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 01bec4023de2..57d2457545f3 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -800,79 +800,69 @@ err_prep:
800 return NULL; 800 return NULL;
801} 801}
802 802
803static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 803static int mpc_dma_device_config(struct dma_chan *chan,
804 unsigned long arg) 804 struct dma_slave_config *cfg)
805{ 805{
806 struct mpc_dma_chan *mchan; 806 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
807 struct mpc_dma *mdma;
808 struct dma_slave_config *cfg;
809 unsigned long flags; 807 unsigned long flags;
810 808
811 mchan = dma_chan_to_mpc_dma_chan(chan); 809 /*
812 switch (cmd) { 810 * Software constraints:
813 case DMA_TERMINATE_ALL: 811 * - only transfers between a peripheral device and
814 /* Disable channel requests */ 812 * memory are supported;
815 mdma = dma_chan_to_mpc_dma(chan); 813 * - only peripheral devices with 4-byte FIFO access register
816 814 * are supported;
817 spin_lock_irqsave(&mchan->lock, flags); 815 * - minimal transfer chunk is 4 bytes and consequently
818 816 * source and destination addresses must be 4-byte aligned
819 out_8(&mdma->regs->dmacerq, chan->chan_id); 817 * and transfer size must be aligned on (4 * maxburst)
820 list_splice_tail_init(&mchan->prepared, &mchan->free); 818 * boundary;
821 list_splice_tail_init(&mchan->queued, &mchan->free); 819 * - during the transfer RAM address is being incremented by
822 list_splice_tail_init(&mchan->active, &mchan->free); 820 * the size of minimal transfer chunk;
823 821 * - peripheral port's address is constant during the transfer.
824 spin_unlock_irqrestore(&mchan->lock, flags); 822 */
825 823
826 return 0; 824 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
825 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
826 !IS_ALIGNED(cfg->src_addr, 4) ||
827 !IS_ALIGNED(cfg->dst_addr, 4)) {
828 return -EINVAL;
829 }
827 830
828 case DMA_SLAVE_CONFIG: 831 spin_lock_irqsave(&mchan->lock, flags);
829 /*
830 * Software constraints:
831 * - only transfers between a peripheral device and
832 * memory are supported;
833 * - only peripheral devices with 4-byte FIFO access register
834 * are supported;
835 * - minimal transfer chunk is 4 bytes and consequently
836 * source and destination addresses must be 4-byte aligned
837 * and transfer size must be aligned on (4 * maxburst)
838 * boundary;
839 * - during the transfer RAM address is being incremented by
840 * the size of minimal transfer chunk;
841 * - peripheral port's address is constant during the transfer.
842 */
843 832
844 cfg = (void *)arg; 833 mchan->src_per_paddr = cfg->src_addr;
834 mchan->src_tcd_nunits = cfg->src_maxburst;
835 mchan->dst_per_paddr = cfg->dst_addr;
836 mchan->dst_tcd_nunits = cfg->dst_maxburst;
845 837
846 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 838 /* Apply defaults */
847 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 839 if (mchan->src_tcd_nunits == 0)
848 !IS_ALIGNED(cfg->src_addr, 4) || 840 mchan->src_tcd_nunits = 1;
849 !IS_ALIGNED(cfg->dst_addr, 4)) { 841 if (mchan->dst_tcd_nunits == 0)
850 return -EINVAL; 842 mchan->dst_tcd_nunits = 1;
851 }
852 843
853 spin_lock_irqsave(&mchan->lock, flags); 844 spin_unlock_irqrestore(&mchan->lock, flags);
854 845
855 mchan->src_per_paddr = cfg->src_addr; 846 return 0;
856 mchan->src_tcd_nunits = cfg->src_maxburst; 847}
857 mchan->dst_per_paddr = cfg->dst_addr;
858 mchan->dst_tcd_nunits = cfg->dst_maxburst;
859 848
860 /* Apply defaults */ 849static int mpc_dma_device_terminate_all(struct dma_chan *chan)
861 if (mchan->src_tcd_nunits == 0) 850{
862 mchan->src_tcd_nunits = 1; 851 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
863 if (mchan->dst_tcd_nunits == 0) 852 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
864 mchan->dst_tcd_nunits = 1; 853 unsigned long flags;
865 854
866 spin_unlock_irqrestore(&mchan->lock, flags); 855 /* Disable channel requests */
856 spin_lock_irqsave(&mchan->lock, flags);
867 857
868 return 0; 858 out_8(&mdma->regs->dmacerq, chan->chan_id);
859 list_splice_tail_init(&mchan->prepared, &mchan->free);
860 list_splice_tail_init(&mchan->queued, &mchan->free);
861 list_splice_tail_init(&mchan->active, &mchan->free);
869 862
870 default: 863 spin_unlock_irqrestore(&mchan->lock, flags);
871 /* Unknown command */
872 break;
873 }
874 864
875 return -ENXIO; 865 return 0;
876} 866}
877 867
878static int mpc_dma_probe(struct platform_device *op) 868static int mpc_dma_probe(struct platform_device *op)
@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
963 dma->device_tx_status = mpc_dma_tx_status; 953 dma->device_tx_status = mpc_dma_tx_status;
964 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 954 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
965 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; 955 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
966 dma->device_control = mpc_dma_device_control; 956 dma->device_config = mpc_dma_device_config;
957 dma->device_terminate_all = mpc_dma_device_terminate_all;
967 958
968 INIT_LIST_HEAD(&dma->channels); 959 INIT_LIST_HEAD(&dma->channels);
969 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 960 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d7ac558c2c1c..b03e8137b918 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -928,14 +928,6 @@ out:
928 return err; 928 return err;
929} 929}
930 930
931/* This driver does not implement any of the optional DMA operations. */
932static int
933mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
934 unsigned long arg)
935{
936 return -ENOSYS;
937}
938
939static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 931static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
940{ 932{
941 struct dma_chan *chan, *_chan; 933 struct dma_chan *chan, *_chan;
@@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
1008 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1000 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1009 dma_dev->device_tx_status = mv_xor_status; 1001 dma_dev->device_tx_status = mv_xor_status;
1010 dma_dev->device_issue_pending = mv_xor_issue_pending; 1002 dma_dev->device_issue_pending = mv_xor_issue_pending;
1011 dma_dev->device_control = mv_xor_control;
1012 dma_dev->dev = &pdev->dev; 1003 dma_dev->dev = &pdev->dev;
1013 1004
1014 /* set prep routines based on capability */ 1005 /* set prep routines based on capability */
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 5ea61201dbf0..829ec686dac3 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
202 return container_of(chan, struct mxs_dma_chan, chan); 202 return container_of(chan, struct mxs_dma_chan, chan);
203} 203}
204 204
205static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 205static void mxs_dma_reset_chan(struct dma_chan *chan)
206{ 206{
207 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
207 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 208 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
208 int chan_id = mxs_chan->chan.chan_id; 209 int chan_id = mxs_chan->chan.chan_id;
209 210
@@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
250 mxs_chan->status = DMA_COMPLETE; 251 mxs_chan->status = DMA_COMPLETE;
251} 252}
252 253
253static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 254static void mxs_dma_enable_chan(struct dma_chan *chan)
254{ 255{
256 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
255 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 257 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
256 int chan_id = mxs_chan->chan.chan_id; 258 int chan_id = mxs_chan->chan.chan_id;
257 259
@@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
272 mxs_chan->reset = false; 274 mxs_chan->reset = false;
273} 275}
274 276
275static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 277static void mxs_dma_disable_chan(struct dma_chan *chan)
276{ 278{
279 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
280
277 mxs_chan->status = DMA_COMPLETE; 281 mxs_chan->status = DMA_COMPLETE;
278} 282}
279 283
280static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 284static int mxs_dma_pause_chan(struct dma_chan *chan)
281{ 285{
286 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
282 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 287 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
283 int chan_id = mxs_chan->chan.chan_id; 288 int chan_id = mxs_chan->chan.chan_id;
284 289
@@ -291,10 +296,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
291 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); 296 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
292 297
293 mxs_chan->status = DMA_PAUSED; 298 mxs_chan->status = DMA_PAUSED;
299 return 0;
294} 300}
295 301
296static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 302static int mxs_dma_resume_chan(struct dma_chan *chan)
297{ 303{
304 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
298 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 305 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
299 int chan_id = mxs_chan->chan.chan_id; 306 int chan_id = mxs_chan->chan.chan_id;
300 307
@@ -307,6 +314,7 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
307 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); 314 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
308 315
309 mxs_chan->status = DMA_IN_PROGRESS; 316 mxs_chan->status = DMA_IN_PROGRESS;
317 return 0;
310} 318}
311 319
312static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 320static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -383,7 +391,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
383 "%s: error in channel %d\n", __func__, 391 "%s: error in channel %d\n", __func__,
384 chan); 392 chan);
385 mxs_chan->status = DMA_ERROR; 393 mxs_chan->status = DMA_ERROR;
386 mxs_dma_reset_chan(mxs_chan); 394 mxs_dma_reset_chan(&mxs_chan->chan);
387 } else if (mxs_chan->status != DMA_COMPLETE) { 395 } else if (mxs_chan->status != DMA_COMPLETE) {
388 if (mxs_chan->flags & MXS_DMA_SG_LOOP) { 396 if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
389 mxs_chan->status = DMA_IN_PROGRESS; 397 mxs_chan->status = DMA_IN_PROGRESS;
@@ -432,7 +440,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
432 if (ret) 440 if (ret)
433 goto err_clk; 441 goto err_clk;
434 442
435 mxs_dma_reset_chan(mxs_chan); 443 mxs_dma_reset_chan(chan);
436 444
437 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 445 dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
438 mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 446 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -456,7 +464,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
456 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 464 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
457 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 465 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
458 466
459 mxs_dma_disable_chan(mxs_chan); 467 mxs_dma_disable_chan(chan);
460 468
461 free_irq(mxs_chan->chan_irq, mxs_dma); 469 free_irq(mxs_chan->chan_irq, mxs_dma);
462 470
@@ -651,28 +659,12 @@ err_out:
651 return NULL; 659 return NULL;
652} 660}
653 661
654static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 662static int mxs_dma_terminate_all(struct dma_chan *chan)
655 unsigned long arg)
656{ 663{
657 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 664 mxs_dma_reset_chan(chan);
658 int ret = 0; 665 mxs_dma_disable_chan(chan);
659
660 switch (cmd) {
661 case DMA_TERMINATE_ALL:
662 mxs_dma_reset_chan(mxs_chan);
663 mxs_dma_disable_chan(mxs_chan);
664 break;
665 case DMA_PAUSE:
666 mxs_dma_pause_chan(mxs_chan);
667 break;
668 case DMA_RESUME:
669 mxs_dma_resume_chan(mxs_chan);
670 break;
671 default:
672 ret = -ENOSYS;
673 }
674 666
675 return ret; 667 return 0;
676} 668}
677 669
678static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 670static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
@@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
701 return mxs_chan->status; 693 return mxs_chan->status;
702} 694}
703 695
704static void mxs_dma_issue_pending(struct dma_chan *chan)
705{
706 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
707
708 mxs_dma_enable_chan(mxs_chan);
709}
710
711static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 696static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
712{ 697{
713 int ret; 698 int ret;
@@ -860,8 +845,14 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
860 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 845 mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status;
861 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 846 mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg;
862 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 847 mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic;
863 mxs_dma->dma_device.device_control = mxs_dma_control; 848 mxs_dma->dma_device.device_pause = mxs_dma_pause_chan;
864 mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 849 mxs_dma->dma_device.device_resume = mxs_dma_resume_chan;
850 mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all;
851 mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
852 mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
853 mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
854 mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
855 mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
865 856
866 ret = dma_async_device_register(&mxs_dma->dma_device); 857 ret = dma_async_device_register(&mxs_dma->dma_device);
867 if (ret) { 858 if (ret) {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index d7d61e1a01c3..88b77c98365d 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
504 * pauses DMA and reads out data received via DMA as well as those left 504 * pauses DMA and reads out data received via DMA as well as those left
505 * in the Rx FIFO. For this to work with the RAM side using burst 505 * in the Rx FIFO. For this to work with the RAM side using burst
506 * transfers we enable the SBE bit and terminate the transfer in our 506 * transfers we enable the SBE bit and terminate the transfer in our
507 * DMA_PAUSE handler. 507 * .device_pause handler.
508 */ 508 */
509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size); 509 mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
510 510
@@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf)
565 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); 565 nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
566} 566}
567 567
568static void nbpf_pause(struct nbpf_channel *chan)
569{
570 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
571 /* See comment in nbpf_prep_one() */
572 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
573}
574
575/* Generic part */ 568/* Generic part */
576 569
577/* DMA ENGINE functions */ 570/* DMA ENGINE functions */
@@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan)
837 } 830 }
838} 831}
839 832
840static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 833static int nbpf_pause(struct dma_chan *dchan)
841 unsigned long arg)
842{ 834{
843 struct nbpf_channel *chan = nbpf_to_chan(dchan); 835 struct nbpf_channel *chan = nbpf_to_chan(dchan);
844 struct dma_slave_config *config;
845 836
846 dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); 837 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
847 838
848 switch (cmd) { 839 chan->paused = true;
849 case DMA_TERMINATE_ALL: 840 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
850 dev_dbg(dchan->device->dev, "Terminating\n"); 841 /* See comment in nbpf_prep_one() */
851 nbpf_chan_halt(chan); 842 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
852 nbpf_chan_idle(chan);
853 break;
854 843
855 case DMA_SLAVE_CONFIG: 844 return 0;
856 if (!arg) 845}
857 return -EINVAL;
858 config = (struct dma_slave_config *)arg;
859 846
860 /* 847static int nbpf_terminate_all(struct dma_chan *dchan)
861 * We could check config->slave_id to match chan->terminal here, 848{
862 * but with DT they would be coming from the same source, so 849 struct nbpf_channel *chan = nbpf_to_chan(dchan);
863 * such a check would be superflous
864 */
865 850
866 chan->slave_dst_addr = config->dst_addr; 851 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
867 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, 852 dev_dbg(dchan->device->dev, "Terminating\n");
868 config->dst_addr_width, 1);
869 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
870 config->dst_addr_width,
871 config->dst_maxburst);
872 chan->slave_src_addr = config->src_addr;
873 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
874 config->src_addr_width, 1);
875 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
876 config->src_addr_width,
877 config->src_maxburst);
878 break;
879 853
880 case DMA_PAUSE: 854 nbpf_chan_halt(chan);
881 chan->paused = true; 855 nbpf_chan_idle(chan);
882 nbpf_pause(chan);
883 break;
884 856
885 default: 857 return 0;
886 return -ENXIO; 858}
887 } 859
860static int nbpf_config(struct dma_chan *dchan,
861 struct dma_slave_config *config)
862{
863 struct nbpf_channel *chan = nbpf_to_chan(dchan);
864
865 dev_dbg(dchan->device->dev, "Entry %s\n", __func__);
866
867 /*
868 * We could check config->slave_id to match chan->terminal here,
869 * but with DT they would be coming from the same source, so
870 * such a check would be superflous
871 */
872
873 chan->slave_dst_addr = config->dst_addr;
874 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
875 config->dst_addr_width, 1);
876 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
877 config->dst_addr_width,
878 config->dst_maxburst);
879 chan->slave_src_addr = config->src_addr;
880 chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
881 config->src_addr_width, 1);
882 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
883 config->src_addr_width,
884 config->src_maxburst);
888 885
889 return 0; 886 return 0;
890} 887}
@@ -1072,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan)
1072 } 1069 }
1073} 1070}
1074 1071
1075static int nbpf_slave_caps(struct dma_chan *dchan,
1076 struct dma_slave_caps *caps)
1077{
1078 caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1079 caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
1080 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1081 caps->cmd_pause = false;
1082 caps->cmd_terminate = true;
1083
1084 return 0;
1085}
1086
1087static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, 1072static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
1088 struct of_dma *ofdma) 1073 struct of_dma *ofdma)
1089{ 1074{
@@ -1414,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev)
1414 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; 1399 dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
1415 dma_dev->device_tx_status = nbpf_tx_status; 1400 dma_dev->device_tx_status = nbpf_tx_status;
1416 dma_dev->device_issue_pending = nbpf_issue_pending; 1401 dma_dev->device_issue_pending = nbpf_issue_pending;
1417 dma_dev->device_slave_caps = nbpf_slave_caps;
1418 1402
1419 /* 1403 /*
1420 * If we drop support for unaligned MEMCPY buffer addresses and / or 1404 * If we drop support for unaligned MEMCPY buffer addresses and / or
@@ -1426,7 +1410,13 @@ static int nbpf_probe(struct platform_device *pdev)
1426 1410
1427 /* Compulsory for DMA_SLAVE fields */ 1411 /* Compulsory for DMA_SLAVE fields */
1428 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; 1412 dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
1429 dma_dev->device_control = nbpf_control; 1413 dma_dev->device_config = nbpf_config;
1414 dma_dev->device_pause = nbpf_pause;
1415 dma_dev->device_terminate_all = nbpf_terminate_all;
1416
1417 dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS;
1418 dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS;
1419 dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1430 1420
1431 platform_set_drvdata(pdev, nbpf); 1421 platform_set_drvdata(pdev, nbpf);
1432 1422
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index d5fbeaa1e7ba..ca31f1b45366 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -159,6 +159,10 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
159 return ERR_PTR(-ENODEV); 159 return ERR_PTR(-ENODEV);
160 } 160 }
161 161
162 /* Silently fail if there is not even the "dmas" property */
163 if (!of_find_property(np, "dmas", NULL))
164 return ERR_PTR(-ENODEV);
165
162 count = of_property_count_strings(np, "dma-names"); 166 count = of_property_count_strings(np, "dma-names");
163 if (count < 0) { 167 if (count < 0) {
164 pr_err("%s: dma-names property of node '%s' missing or empty\n", 168 pr_err("%s: dma-names property of node '%s' missing or empty\n",
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index c0016a68b446..7dd6dd121681 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
948 return vchan_tx_prep(&c->vc, &d->vd, flags); 948 return vchan_tx_prep(&c->vc, &d->vd, flags);
949} 949}
950 950
951static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) 951static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
952{ 952{
953 struct omap_chan *c = to_omap_dma_chan(chan);
954
953 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 955 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
954 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 956 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
955 return -EINVAL; 957 return -EINVAL;
@@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c
959 return 0; 961 return 0;
960} 962}
961 963
962static int omap_dma_terminate_all(struct omap_chan *c) 964static int omap_dma_terminate_all(struct dma_chan *chan)
963{ 965{
966 struct omap_chan *c = to_omap_dma_chan(chan);
964 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); 967 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
965 unsigned long flags; 968 unsigned long flags;
966 LIST_HEAD(head); 969 LIST_HEAD(head);
@@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c)
996 return 0; 999 return 0;
997} 1000}
998 1001
999static int omap_dma_pause(struct omap_chan *c) 1002static int omap_dma_pause(struct dma_chan *chan)
1000{ 1003{
1004 struct omap_chan *c = to_omap_dma_chan(chan);
1005
1001 /* Pause/Resume only allowed with cyclic mode */ 1006 /* Pause/Resume only allowed with cyclic mode */
1002 if (!c->cyclic) 1007 if (!c->cyclic)
1003 return -EINVAL; 1008 return -EINVAL;
@@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c)
1010 return 0; 1015 return 0;
1011} 1016}
1012 1017
1013static int omap_dma_resume(struct omap_chan *c) 1018static int omap_dma_resume(struct dma_chan *chan)
1014{ 1019{
1020 struct omap_chan *c = to_omap_dma_chan(chan);
1021
1015 /* Pause/Resume only allowed with cyclic mode */ 1022 /* Pause/Resume only allowed with cyclic mode */
1016 if (!c->cyclic) 1023 if (!c->cyclic)
1017 return -EINVAL; 1024 return -EINVAL;
@@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c)
1029 return 0; 1036 return 0;
1030} 1037}
1031 1038
1032static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1033 unsigned long arg)
1034{
1035 struct omap_chan *c = to_omap_dma_chan(chan);
1036 int ret;
1037
1038 switch (cmd) {
1039 case DMA_SLAVE_CONFIG:
1040 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
1041 break;
1042
1043 case DMA_TERMINATE_ALL:
1044 ret = omap_dma_terminate_all(c);
1045 break;
1046
1047 case DMA_PAUSE:
1048 ret = omap_dma_pause(c);
1049 break;
1050
1051 case DMA_RESUME:
1052 ret = omap_dma_resume(c);
1053 break;
1054
1055 default:
1056 ret = -ENXIO;
1057 break;
1058 }
1059
1060 return ret;
1061}
1062
1063static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) 1039static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
1064{ 1040{
1065 struct omap_chan *c; 1041 struct omap_chan *c;
@@ -1094,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od)
1094 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1070 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1095 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1071 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1096 1072
1097static int omap_dma_device_slave_caps(struct dma_chan *dchan,
1098 struct dma_slave_caps *caps)
1099{
1100 caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
1101 caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
1102 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1103 caps->cmd_pause = true;
1104 caps->cmd_terminate = true;
1105 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1106
1107 return 0;
1108}
1109
1110static int omap_dma_probe(struct platform_device *pdev) 1073static int omap_dma_probe(struct platform_device *pdev)
1111{ 1074{
1112 struct omap_dmadev *od; 1075 struct omap_dmadev *od;
@@ -1136,8 +1099,14 @@ static int omap_dma_probe(struct platform_device *pdev)
1136 od->ddev.device_issue_pending = omap_dma_issue_pending; 1099 od->ddev.device_issue_pending = omap_dma_issue_pending;
1137 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 1100 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1138 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 1101 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1139 od->ddev.device_control = omap_dma_control; 1102 od->ddev.device_config = omap_dma_slave_config;
1140 od->ddev.device_slave_caps = omap_dma_device_slave_caps; 1103 od->ddev.device_pause = omap_dma_pause;
1104 od->ddev.device_resume = omap_dma_resume;
1105 od->ddev.device_terminate_all = omap_dma_terminate_all;
1106 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1107 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1108 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1109 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1141 od->ddev.dev = &pdev->dev; 1110 od->ddev.dev = &pdev->dev;
1142 INIT_LIST_HEAD(&od->ddev.channels); 1111 INIT_LIST_HEAD(&od->ddev.channels);
1143 INIT_LIST_HEAD(&od->pending); 1112 INIT_LIST_HEAD(&od->pending);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 6e0e47d76b23..35c143cb88da 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -665,16 +665,12 @@ err_desc_get:
665 return NULL; 665 return NULL;
666} 666}
667 667
668static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 668static int pd_device_terminate_all(struct dma_chan *chan)
669 unsigned long arg)
670{ 669{
671 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 670 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
672 struct pch_dma_desc *desc, *_d; 671 struct pch_dma_desc *desc, *_d;
673 LIST_HEAD(list); 672 LIST_HEAD(list);
674 673
675 if (cmd != DMA_TERMINATE_ALL)
676 return -ENXIO;
677
678 spin_lock_irq(&pd_chan->lock); 674 spin_lock_irq(&pd_chan->lock);
679 675
680 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); 676 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
@@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
932 pd->dma.device_tx_status = pd_tx_status; 928 pd->dma.device_tx_status = pd_tx_status;
933 pd->dma.device_issue_pending = pd_issue_pending; 929 pd->dma.device_issue_pending = pd_issue_pending;
934 pd->dma.device_prep_slave_sg = pd_prep_slave_sg; 930 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
935 pd->dma.device_control = pd_device_control; 931 pd->dma.device_terminate_all = pd_device_terminate_all;
936 932
937 err = dma_async_device_register(&pd->dma); 933 err = dma_async_device_register(&pd->dma);
938 if (err) { 934 if (err) {
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index bdf40b530032..0e1f56772855 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -504,6 +504,9 @@ struct dma_pl330_desc {
504 504
505 enum desc_status status; 505 enum desc_status status;
506 506
507 int bytes_requested;
508 bool last;
509
507 /* The channel which currently holds this desc */ 510 /* The channel which currently holds this desc */
508 struct dma_pl330_chan *pchan; 511 struct dma_pl330_chan *pchan;
509 512
@@ -1048,6 +1051,10 @@ static bool _trigger(struct pl330_thread *thrd)
1048 if (!req) 1051 if (!req)
1049 return true; 1052 return true;
1050 1053
1054 /* Return if req is running */
1055 if (idx == thrd->req_running)
1056 return true;
1057
1051 desc = req->desc; 1058 desc = req->desc;
1052 1059
1053 ns = desc->rqcfg.nonsecure ? 1 : 0; 1060 ns = desc->rqcfg.nonsecure ? 1 : 0;
@@ -1587,6 +1594,8 @@ static int pl330_update(struct pl330_dmac *pl330)
1587 descdone = thrd->req[active].desc; 1594 descdone = thrd->req[active].desc;
1588 thrd->req[active].desc = NULL; 1595 thrd->req[active].desc = NULL;
1589 1596
1597 thrd->req_running = -1;
1598
1590 /* Get going again ASAP */ 1599 /* Get going again ASAP */
1591 _start(thrd); 1600 _start(thrd);
1592 1601
@@ -2086,77 +2095,89 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
2086 return 1; 2095 return 1;
2087} 2096}
2088 2097
2089static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 2098static int pl330_config(struct dma_chan *chan,
2099 struct dma_slave_config *slave_config)
2100{
2101 struct dma_pl330_chan *pch = to_pchan(chan);
2102
2103 if (slave_config->direction == DMA_MEM_TO_DEV) {
2104 if (slave_config->dst_addr)
2105 pch->fifo_addr = slave_config->dst_addr;
2106 if (slave_config->dst_addr_width)
2107 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2108 if (slave_config->dst_maxburst)
2109 pch->burst_len = slave_config->dst_maxburst;
2110 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2111 if (slave_config->src_addr)
2112 pch->fifo_addr = slave_config->src_addr;
2113 if (slave_config->src_addr_width)
2114 pch->burst_sz = __ffs(slave_config->src_addr_width);
2115 if (slave_config->src_maxburst)
2116 pch->burst_len = slave_config->src_maxburst;
2117 }
2118
2119 return 0;
2120}
2121
2122static int pl330_terminate_all(struct dma_chan *chan)
2090{ 2123{
2091 struct dma_pl330_chan *pch = to_pchan(chan); 2124 struct dma_pl330_chan *pch = to_pchan(chan);
2092 struct dma_pl330_desc *desc; 2125 struct dma_pl330_desc *desc;
2093 unsigned long flags; 2126 unsigned long flags;
2094 struct pl330_dmac *pl330 = pch->dmac; 2127 struct pl330_dmac *pl330 = pch->dmac;
2095 struct dma_slave_config *slave_config;
2096 LIST_HEAD(list); 2128 LIST_HEAD(list);
2097 2129
2098 switch (cmd) { 2130 spin_lock_irqsave(&pch->lock, flags);
2099 case DMA_TERMINATE_ALL: 2131 spin_lock(&pl330->lock);
2100 pm_runtime_get_sync(pl330->ddma.dev); 2132 _stop(pch->thread);
2101 spin_lock_irqsave(&pch->lock, flags); 2133 spin_unlock(&pl330->lock);
2134
2135 pch->thread->req[0].desc = NULL;
2136 pch->thread->req[1].desc = NULL;
2137 pch->thread->req_running = -1;
2138
2139 /* Mark all desc done */
2140 list_for_each_entry(desc, &pch->submitted_list, node) {
2141 desc->status = FREE;
2142 dma_cookie_complete(&desc->txd);
2143 }
2102 2144
2103 spin_lock(&pl330->lock); 2145 list_for_each_entry(desc, &pch->work_list , node) {
2104 _stop(pch->thread); 2146 desc->status = FREE;
2105 spin_unlock(&pl330->lock); 2147 dma_cookie_complete(&desc->txd);
2148 }
2106 2149
2107 pch->thread->req[0].desc = NULL; 2150 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2108 pch->thread->req[1].desc = NULL; 2151 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2109 pch->thread->req_running = -1; 2152 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2153 spin_unlock_irqrestore(&pch->lock, flags);
2110 2154
2111 /* Mark all desc done */ 2155 return 0;
2112 list_for_each_entry(desc, &pch->submitted_list, node) { 2156}
2113 desc->status = FREE;
2114 dma_cookie_complete(&desc->txd);
2115 }
2116 2157
2117 list_for_each_entry(desc, &pch->work_list , node) { 2158/*
2118 desc->status = FREE; 2159 * We don't support DMA_RESUME command because of hardware
2119 dma_cookie_complete(&desc->txd); 2160 * limitations, so after pausing the channel we cannot restore
2120 } 2161 * it to active state. We have to terminate channel and setup
2162 * DMA transfer again. This pause feature was implemented to
2163 * allow safely read residue before channel termination.
2164 */
2165int pl330_pause(struct dma_chan *chan)
2166{
2167 struct dma_pl330_chan *pch = to_pchan(chan);
2168 struct pl330_dmac *pl330 = pch->dmac;
2169 unsigned long flags;
2121 2170
2122 list_for_each_entry(desc, &pch->completed_list , node) { 2171 pm_runtime_get_sync(pl330->ddma.dev);
2123 desc->status = FREE; 2172 spin_lock_irqsave(&pch->lock, flags);
2124 dma_cookie_complete(&desc->txd);
2125 }
2126 2173
2127 if (!list_empty(&pch->work_list)) 2174 spin_lock(&pl330->lock);
2128 pm_runtime_put(pl330->ddma.dev); 2175 _stop(pch->thread);
2176 spin_unlock(&pl330->lock);
2129 2177
2130 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); 2178 spin_unlock_irqrestore(&pch->lock, flags);
2131 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); 2179 pm_runtime_mark_last_busy(pl330->ddma.dev);
2132 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2180 pm_runtime_put_autosuspend(pl330->ddma.dev);
2133 spin_unlock_irqrestore(&pch->lock, flags);
2134 pm_runtime_mark_last_busy(pl330->ddma.dev);
2135 pm_runtime_put_autosuspend(pl330->ddma.dev);
2136 break;
2137 case DMA_SLAVE_CONFIG:
2138 slave_config = (struct dma_slave_config *)arg;
2139
2140 if (slave_config->direction == DMA_MEM_TO_DEV) {
2141 if (slave_config->dst_addr)
2142 pch->fifo_addr = slave_config->dst_addr;
2143 if (slave_config->dst_addr_width)
2144 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2145 if (slave_config->dst_maxburst)
2146 pch->burst_len = slave_config->dst_maxburst;
2147 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2148 if (slave_config->src_addr)
2149 pch->fifo_addr = slave_config->src_addr;
2150 if (slave_config->src_addr_width)
2151 pch->burst_sz = __ffs(slave_config->src_addr_width);
2152 if (slave_config->src_maxburst)
2153 pch->burst_len = slave_config->src_maxburst;
2154 }
2155 break;
2156 default:
2157 dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
2158 return -ENXIO;
2159 }
2160 2181
2161 return 0; 2182 return 0;
2162} 2183}
@@ -2182,11 +2203,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2182 pm_runtime_put_autosuspend(pch->dmac->ddma.dev); 2203 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2183} 2204}
2184 2205
2206int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2207 struct dma_pl330_desc *desc)
2208{
2209 struct pl330_thread *thrd = pch->thread;
2210 struct pl330_dmac *pl330 = pch->dmac;
2211 void __iomem *regs = thrd->dmac->base;
2212 u32 val, addr;
2213
2214 pm_runtime_get_sync(pl330->ddma.dev);
2215 val = addr = 0;
2216 if (desc->rqcfg.src_inc) {
2217 val = readl(regs + SA(thrd->id));
2218 addr = desc->px.src_addr;
2219 } else {
2220 val = readl(regs + DA(thrd->id));
2221 addr = desc->px.dst_addr;
2222 }
2223 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2224 pm_runtime_put_autosuspend(pl330->ddma.dev);
2225 return val - addr;
2226}
2227
2185static enum dma_status 2228static enum dma_status
2186pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 2229pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2187 struct dma_tx_state *txstate) 2230 struct dma_tx_state *txstate)
2188{ 2231{
2189 return dma_cookie_status(chan, cookie, txstate); 2232 enum dma_status ret;
2233 unsigned long flags;
2234 struct dma_pl330_desc *desc, *running = NULL;
2235 struct dma_pl330_chan *pch = to_pchan(chan);
2236 unsigned int transferred, residual = 0;
2237
2238 ret = dma_cookie_status(chan, cookie, txstate);
2239
2240 if (!txstate)
2241 return ret;
2242
2243 if (ret == DMA_COMPLETE)
2244 goto out;
2245
2246 spin_lock_irqsave(&pch->lock, flags);
2247
2248 if (pch->thread->req_running != -1)
2249 running = pch->thread->req[pch->thread->req_running].desc;
2250
2251 /* Check in pending list */
2252 list_for_each_entry(desc, &pch->work_list, node) {
2253 if (desc->status == DONE)
2254 transferred = desc->bytes_requested;
2255 else if (running && desc == running)
2256 transferred =
2257 pl330_get_current_xferred_count(pch, desc);
2258 else
2259 transferred = 0;
2260 residual += desc->bytes_requested - transferred;
2261 if (desc->txd.cookie == cookie) {
2262 ret = desc->status;
2263 break;
2264 }
2265 if (desc->last)
2266 residual = 0;
2267 }
2268 spin_unlock_irqrestore(&pch->lock, flags);
2269
2270out:
2271 dma_set_residue(txstate, residual);
2272
2273 return ret;
2190} 2274}
2191 2275
2192static void pl330_issue_pending(struct dma_chan *chan) 2276static void pl330_issue_pending(struct dma_chan *chan)
@@ -2231,12 +2315,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2231 desc->txd.callback = last->txd.callback; 2315 desc->txd.callback = last->txd.callback;
2232 desc->txd.callback_param = last->txd.callback_param; 2316 desc->txd.callback_param = last->txd.callback_param;
2233 } 2317 }
2318 last->last = false;
2234 2319
2235 dma_cookie_assign(&desc->txd); 2320 dma_cookie_assign(&desc->txd);
2236 2321
2237 list_move_tail(&desc->node, &pch->submitted_list); 2322 list_move_tail(&desc->node, &pch->submitted_list);
2238 } 2323 }
2239 2324
2325 last->last = true;
2240 cookie = dma_cookie_assign(&last->txd); 2326 cookie = dma_cookie_assign(&last->txd);
2241 list_add_tail(&last->node, &pch->submitted_list); 2327 list_add_tail(&last->node, &pch->submitted_list);
2242 spin_unlock_irqrestore(&pch->lock, flags); 2328 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2459,6 +2545,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2459 desc->rqtype = direction; 2545 desc->rqtype = direction;
2460 desc->rqcfg.brst_size = pch->burst_sz; 2546 desc->rqcfg.brst_size = pch->burst_sz;
2461 desc->rqcfg.brst_len = 1; 2547 desc->rqcfg.brst_len = 1;
2548 desc->bytes_requested = period_len;
2462 fill_px(&desc->px, dst, src, period_len); 2549 fill_px(&desc->px, dst, src, period_len);
2463 2550
2464 if (!first) 2551 if (!first)
@@ -2601,6 +2688,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2601 desc->rqcfg.brst_size = pch->burst_sz; 2688 desc->rqcfg.brst_size = pch->burst_sz;
2602 desc->rqcfg.brst_len = 1; 2689 desc->rqcfg.brst_len = 1;
2603 desc->rqtype = direction; 2690 desc->rqtype = direction;
2691 desc->bytes_requested = sg_dma_len(sg);
2604 } 2692 }
2605 2693
2606 /* Return the last desc in the chain */ 2694 /* Return the last desc in the chain */
@@ -2623,19 +2711,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
2623 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 2711 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2624 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 2712 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2625 2713
2626static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2627 struct dma_slave_caps *caps)
2628{
2629 caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
2630 caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
2631 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2632 caps->cmd_pause = false;
2633 caps->cmd_terminate = true;
2634 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2635
2636 return 0;
2637}
2638
2639/* 2714/*
2640 * Runtime PM callbacks are provided by amba/bus.c driver. 2715 * Runtime PM callbacks are provided by amba/bus.c driver.
2641 * 2716 *
@@ -2793,9 +2868,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2793 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; 2868 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
2794 pd->device_tx_status = pl330_tx_status; 2869 pd->device_tx_status = pl330_tx_status;
2795 pd->device_prep_slave_sg = pl330_prep_slave_sg; 2870 pd->device_prep_slave_sg = pl330_prep_slave_sg;
2796 pd->device_control = pl330_control; 2871 pd->device_config = pl330_config;
2872 pd->device_pause = pl330_pause;
2873 pd->device_terminate_all = pl330_terminate_all;
2797 pd->device_issue_pending = pl330_issue_pending; 2874 pd->device_issue_pending = pl330_issue_pending;
2798 pd->device_slave_caps = pl330_dma_device_slave_caps; 2875 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
2876 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
2877 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2878 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2799 2879
2800 ret = dma_async_device_register(pd); 2880 ret = dma_async_device_register(pd);
2801 if (ret) { 2881 if (ret) {
@@ -2847,7 +2927,7 @@ probe_err3:
2847 2927
2848 /* Flush the channel */ 2928 /* Flush the channel */
2849 if (pch->thread) { 2929 if (pch->thread) {
2850 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 2930 pl330_terminate_all(&pch->chan);
2851 pl330_free_chan_resources(&pch->chan); 2931 pl330_free_chan_resources(&pch->chan);
2852 } 2932 }
2853 } 2933 }
@@ -2878,7 +2958,7 @@ static int pl330_remove(struct amba_device *adev)
2878 2958
2879 /* Flush the channel */ 2959 /* Flush the channel */
2880 if (pch->thread) { 2960 if (pch->thread) {
2881 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); 2961 pl330_terminate_all(&pch->chan);
2882 pl330_free_chan_resources(&pch->chan); 2962 pl330_free_chan_resources(&pch->chan);
2883 } 2963 }
2884 } 2964 }
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 3122a99ec06b..d7a33b3ac466 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan)
530 * Sets slave configuration for channel 530 * Sets slave configuration for channel
531 * 531 *
532 */ 532 */
533static void bam_slave_config(struct bam_chan *bchan, 533static int bam_slave_config(struct dma_chan *chan,
534 struct dma_slave_config *cfg) 534 struct dma_slave_config *cfg)
535{ 535{
536 struct bam_chan *bchan = to_bam_chan(chan);
537 unsigned long flag;
538
539 spin_lock_irqsave(&bchan->vc.lock, flag);
536 memcpy(&bchan->slave, cfg, sizeof(*cfg)); 540 memcpy(&bchan->slave, cfg, sizeof(*cfg));
537 bchan->reconfigure = 1; 541 bchan->reconfigure = 1;
542 spin_unlock_irqrestore(&bchan->vc.lock, flag);
543
544 return 0;
538} 545}
539 546
540/** 547/**
@@ -627,8 +634,9 @@ err_out:
627 * No callbacks are done 634 * No callbacks are done
628 * 635 *
629 */ 636 */
630static void bam_dma_terminate_all(struct bam_chan *bchan) 637static int bam_dma_terminate_all(struct dma_chan *chan)
631{ 638{
639 struct bam_chan *bchan = to_bam_chan(chan);
632 unsigned long flag; 640 unsigned long flag;
633 LIST_HEAD(head); 641 LIST_HEAD(head);
634 642
@@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan)
643 spin_unlock_irqrestore(&bchan->vc.lock, flag); 651 spin_unlock_irqrestore(&bchan->vc.lock, flag);
644 652
645 vchan_dma_desc_free_list(&bchan->vc, &head); 653 vchan_dma_desc_free_list(&bchan->vc, &head);
654
655 return 0;
646} 656}
647 657
648/** 658/**
649 * bam_control - DMA device control 659 * bam_pause - Pause DMA channel
650 * @chan: dma channel 660 * @chan: dma channel
651 * @cmd: control cmd
652 * @arg: cmd argument
653 * 661 *
654 * Perform DMA control command 662 */
663static int bam_pause(struct dma_chan *chan)
664{
665 struct bam_chan *bchan = to_bam_chan(chan);
666 struct bam_device *bdev = bchan->bdev;
667 unsigned long flag;
668
669 spin_lock_irqsave(&bchan->vc.lock, flag);
670 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
671 bchan->paused = 1;
672 spin_unlock_irqrestore(&bchan->vc.lock, flag);
673
674 return 0;
675}
676
677/**
678 * bam_resume - Resume DMA channel operations
679 * @chan: dma channel
655 * 680 *
656 */ 681 */
657static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 682static int bam_resume(struct dma_chan *chan)
658 unsigned long arg)
659{ 683{
660 struct bam_chan *bchan = to_bam_chan(chan); 684 struct bam_chan *bchan = to_bam_chan(chan);
661 struct bam_device *bdev = bchan->bdev; 685 struct bam_device *bdev = bchan->bdev;
662 int ret = 0;
663 unsigned long flag; 686 unsigned long flag;
664 687
665 switch (cmd) { 688 spin_lock_irqsave(&bchan->vc.lock, flag);
666 case DMA_PAUSE: 689 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
667 spin_lock_irqsave(&bchan->vc.lock, flag); 690 bchan->paused = 0;
668 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); 691 spin_unlock_irqrestore(&bchan->vc.lock, flag);
669 bchan->paused = 1;
670 spin_unlock_irqrestore(&bchan->vc.lock, flag);
671 break;
672
673 case DMA_RESUME:
674 spin_lock_irqsave(&bchan->vc.lock, flag);
675 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
676 bchan->paused = 0;
677 spin_unlock_irqrestore(&bchan->vc.lock, flag);
678 break;
679
680 case DMA_TERMINATE_ALL:
681 bam_dma_terminate_all(bchan);
682 break;
683
684 case DMA_SLAVE_CONFIG:
685 spin_lock_irqsave(&bchan->vc.lock, flag);
686 bam_slave_config(bchan, (struct dma_slave_config *)arg);
687 spin_unlock_irqrestore(&bchan->vc.lock, flag);
688 break;
689
690 default:
691 ret = -ENXIO;
692 break;
693 }
694 692
695 return ret; 693 return 0;
696} 694}
697 695
698/** 696/**
@@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev)
1148 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1146 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1149 bdev->common.device_free_chan_resources = bam_free_chan; 1147 bdev->common.device_free_chan_resources = bam_free_chan;
1150 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1148 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1151 bdev->common.device_control = bam_control; 1149 bdev->common.device_config = bam_slave_config;
1150 bdev->common.device_pause = bam_pause;
1151 bdev->common.device_resume = bam_resume;
1152 bdev->common.device_terminate_all = bam_dma_terminate_all;
1152 bdev->common.device_issue_pending = bam_issue_pending; 1153 bdev->common.device_issue_pending = bam_issue_pending;
1153 bdev->common.device_tx_status = bam_tx_status; 1154 bdev->common.device_tx_status = bam_tx_status;
1154 bdev->common.dev = bdev->dev; 1155 bdev->common.dev = bdev->dev;
@@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev)
1187 devm_free_irq(bdev->dev, bdev->irq, bdev); 1188 devm_free_irq(bdev->dev, bdev->irq, bdev);
1188 1189
1189 for (i = 0; i < bdev->num_channels; i++) { 1190 for (i = 0; i < bdev->num_channels; i++) {
1190 bam_dma_terminate_all(&bdev->channels[i]); 1191 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1191 tasklet_kill(&bdev->channels[i].vc.task); 1192 tasklet_kill(&bdev->channels[i].vc.task);
1192 1193
1193 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, 1194 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 6941a77521c3..2f91da3db836 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
384 return tc * txd->width; 384 return tc * txd->width;
385} 385}
386 386
387static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, 387static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
388 struct dma_slave_config *config) 388 struct dma_slave_config *config)
389{ 389{
390 if (!s3cchan->slave) 390 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
391 return -EINVAL; 391 unsigned long flags;
392 int ret = 0;
392 393
393 /* Reject definitely invalid configurations */ 394 /* Reject definitely invalid configurations */
394 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 395 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
395 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 396 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
396 return -EINVAL; 397 return -EINVAL;
397 398
399 spin_lock_irqsave(&s3cchan->vc.lock, flags);
400
401 if (!s3cchan->slave) {
402 ret = -EINVAL;
403 goto out;
404 }
405
398 s3cchan->cfg = *config; 406 s3cchan->cfg = *config;
399 407
400 return 0; 408out:
409 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
410 return ret;
401} 411}
402 412
403/* 413/*
@@ -703,8 +713,7 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
703 * The DMA ENGINE API 713 * The DMA ENGINE API
704 */ 714 */
705 715
706static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 716static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
707 unsigned long arg)
708{ 717{
709 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 718 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
710 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 719 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
@@ -713,40 +722,28 @@ static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
713 722
714 spin_lock_irqsave(&s3cchan->vc.lock, flags); 723 spin_lock_irqsave(&s3cchan->vc.lock, flags);
715 724
716 switch (cmd) { 725 if (!s3cchan->phy && !s3cchan->at) {
717 case DMA_SLAVE_CONFIG: 726 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
718 ret = s3c24xx_dma_set_runtime_config(s3cchan, 727 s3cchan->id);
719 (struct dma_slave_config *)arg); 728 ret = -EINVAL;
720 break; 729 goto unlock;
721 case DMA_TERMINATE_ALL: 730 }
722 if (!s3cchan->phy && !s3cchan->at) {
723 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
724 s3cchan->id);
725 ret = -EINVAL;
726 break;
727 }
728 731
729 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 732 s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
730 733
731 /* Mark physical channel as free */ 734 /* Mark physical channel as free */
732 if (s3cchan->phy) 735 if (s3cchan->phy)
733 s3c24xx_dma_phy_free(s3cchan); 736 s3c24xx_dma_phy_free(s3cchan);
734 737
735 /* Dequeue current job */ 738 /* Dequeue current job */
736 if (s3cchan->at) { 739 if (s3cchan->at) {
737 s3c24xx_dma_desc_free(&s3cchan->at->vd); 740 s3c24xx_dma_desc_free(&s3cchan->at->vd);
738 s3cchan->at = NULL; 741 s3cchan->at = NULL;
739 }
740
741 /* Dequeue jobs not yet fired as well */
742 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
743 break;
744 default:
745 /* Unknown command */
746 ret = -ENXIO;
747 break;
748 } 742 }
749 743
744 /* Dequeue jobs not yet fired as well */
745 s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
746unlock:
750 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 747 spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
751 748
752 return ret; 749 return ret;
@@ -1300,7 +1297,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1300 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; 1297 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1301 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; 1298 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1302 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; 1299 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1303 s3cdma->memcpy.device_control = s3c24xx_dma_control; 1300 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1301 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1304 1302
1305 /* Initialize slave engine for SoC internal dedicated peripherals */ 1303 /* Initialize slave engine for SoC internal dedicated peripherals */
1306 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1304 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1315,7 +1313,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1315 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; 1313 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1316 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; 1314 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1317 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; 1315 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1318 s3cdma->slave.device_control = s3c24xx_dma_control; 1316 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1317 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1319 1318
1320 /* Register as many memcpy channels as there are physical channels */ 1319 /* Register as many memcpy channels as there are physical channels */
1321 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, 1320 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 96bb62c39c41..5adf5407a8cb 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
669 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 669 return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
670} 670}
671 671
672static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) 672static int sa11x0_dma_device_config(struct dma_chan *chan,
673 struct dma_slave_config *cfg)
673{ 674{
675 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
674 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); 676 u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
675 dma_addr_t addr; 677 dma_addr_t addr;
676 enum dma_slave_buswidth width; 678 enum dma_slave_buswidth width;
@@ -704,99 +706,101 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
704 return 0; 706 return 0;
705} 707}
706 708
707static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 709static int sa11x0_dma_device_pause(struct dma_chan *chan)
708 unsigned long arg)
709{ 710{
710 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); 711 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
711 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); 712 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
712 struct sa11x0_dma_phy *p; 713 struct sa11x0_dma_phy *p;
713 LIST_HEAD(head); 714 LIST_HEAD(head);
714 unsigned long flags; 715 unsigned long flags;
715 int ret;
716 716
717 switch (cmd) { 717 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
718 case DMA_SLAVE_CONFIG: 718 spin_lock_irqsave(&c->vc.lock, flags);
719 return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); 719 if (c->status == DMA_IN_PROGRESS) {
720 720 c->status = DMA_PAUSED;
721 case DMA_TERMINATE_ALL:
722 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
723 /* Clear the tx descriptor lists */
724 spin_lock_irqsave(&c->vc.lock, flags);
725 vchan_get_all_descriptors(&c->vc, &head);
726 721
727 p = c->phy; 722 p = c->phy;
728 if (p) { 723 if (p) {
729 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); 724 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
730 /* vchan is assigned to a pchan - stop the channel */ 725 } else {
731 writel(DCSR_RUN | DCSR_IE |
732 DCSR_STRTA | DCSR_DONEA |
733 DCSR_STRTB | DCSR_DONEB,
734 p->base + DMA_DCSR_C);
735
736 if (p->txd_load) {
737 if (p->txd_load != p->txd_done)
738 list_add_tail(&p->txd_load->vd.node, &head);
739 p->txd_load = NULL;
740 }
741 if (p->txd_done) {
742 list_add_tail(&p->txd_done->vd.node, &head);
743 p->txd_done = NULL;
744 }
745 c->phy = NULL;
746 spin_lock(&d->lock); 726 spin_lock(&d->lock);
747 p->vchan = NULL; 727 list_del_init(&c->node);
748 spin_unlock(&d->lock); 728 spin_unlock(&d->lock);
749 tasklet_schedule(&d->task);
750 } 729 }
751 spin_unlock_irqrestore(&c->vc.lock, flags); 730 }
752 vchan_dma_desc_free_list(&c->vc, &head); 731 spin_unlock_irqrestore(&c->vc.lock, flags);
753 ret = 0;
754 break;
755 732
756 case DMA_PAUSE: 733 return 0;
757 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); 734}
758 spin_lock_irqsave(&c->vc.lock, flags);
759 if (c->status == DMA_IN_PROGRESS) {
760 c->status = DMA_PAUSED;
761 735
762 p = c->phy; 736static int sa11x0_dma_device_resume(struct dma_chan *chan)
763 if (p) { 737{
764 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); 738 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
765 } else { 739 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
766 spin_lock(&d->lock); 740 struct sa11x0_dma_phy *p;
767 list_del_init(&c->node); 741 LIST_HEAD(head);
768 spin_unlock(&d->lock); 742 unsigned long flags;
769 }
770 }
771 spin_unlock_irqrestore(&c->vc.lock, flags);
772 ret = 0;
773 break;
774 743
775 case DMA_RESUME: 744 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
776 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); 745 spin_lock_irqsave(&c->vc.lock, flags);
777 spin_lock_irqsave(&c->vc.lock, flags); 746 if (c->status == DMA_PAUSED) {
778 if (c->status == DMA_PAUSED) { 747 c->status = DMA_IN_PROGRESS;
779 c->status = DMA_IN_PROGRESS; 748
780 749 p = c->phy;
781 p = c->phy; 750 if (p) {
782 if (p) { 751 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
783 writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); 752 } else if (!list_empty(&c->vc.desc_issued)) {
784 } else if (!list_empty(&c->vc.desc_issued)) { 753 spin_lock(&d->lock);
785 spin_lock(&d->lock); 754 list_add_tail(&c->node, &d->chan_pending);
786 list_add_tail(&c->node, &d->chan_pending); 755 spin_unlock(&d->lock);
787 spin_unlock(&d->lock);
788 }
789 } 756 }
790 spin_unlock_irqrestore(&c->vc.lock, flags); 757 }
791 ret = 0; 758 spin_unlock_irqrestore(&c->vc.lock, flags);
792 break;
793 759
794 default: 760 return 0;
795 ret = -ENXIO; 761}
796 break; 762
763static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
764{
765 struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
766 struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
767 struct sa11x0_dma_phy *p;
768 LIST_HEAD(head);
769 unsigned long flags;
770
771 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
772 /* Clear the tx descriptor lists */
773 spin_lock_irqsave(&c->vc.lock, flags);
774 vchan_get_all_descriptors(&c->vc, &head);
775
776 p = c->phy;
777 if (p) {
778 dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
779 /* vchan is assigned to a pchan - stop the channel */
780 writel(DCSR_RUN | DCSR_IE |
781 DCSR_STRTA | DCSR_DONEA |
782 DCSR_STRTB | DCSR_DONEB,
783 p->base + DMA_DCSR_C);
784
785 if (p->txd_load) {
786 if (p->txd_load != p->txd_done)
787 list_add_tail(&p->txd_load->vd.node, &head);
788 p->txd_load = NULL;
789 }
790 if (p->txd_done) {
791 list_add_tail(&p->txd_done->vd.node, &head);
792 p->txd_done = NULL;
793 }
794 c->phy = NULL;
795 spin_lock(&d->lock);
796 p->vchan = NULL;
797 spin_unlock(&d->lock);
798 tasklet_schedule(&d->task);
797 } 799 }
800 spin_unlock_irqrestore(&c->vc.lock, flags);
801 vchan_dma_desc_free_list(&c->vc, &head);
798 802
799 return ret; 803 return 0;
800} 804}
801 805
802struct sa11x0_dma_channel_desc { 806struct sa11x0_dma_channel_desc {
@@ -833,7 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
833 dmadev->dev = dev; 837 dmadev->dev = dev;
834 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 838 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
835 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; 839 dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
836 dmadev->device_control = sa11x0_dma_control; 840 dmadev->device_config = sa11x0_dma_device_config;
841 dmadev->device_pause = sa11x0_dma_device_pause;
842 dmadev->device_resume = sa11x0_dma_device_resume;
843 dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
837 dmadev->device_tx_status = sa11x0_dma_tx_status; 844 dmadev->device_tx_status = sa11x0_dma_tx_status;
838 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 845 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
839 846
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0349125a2e20..8190ad225a1b 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -2,6 +2,10 @@
2# DMA engine configuration for sh 2# DMA engine configuration for sh
3# 3#
4 4
5config RENESAS_DMA
6 bool
7 select DMA_ENGINE
8
5# 9#
6# DMA Engine Helpers 10# DMA Engine Helpers
7# 11#
@@ -12,7 +16,7 @@ config SH_DMAE_BASE
12 depends on !SUPERH || SH_DMA 16 depends on !SUPERH || SH_DMA
13 depends on !SH_DMA_API 17 depends on !SH_DMA_API
14 default y 18 default y
15 select DMA_ENGINE 19 select RENESAS_DMA
16 help 20 help
17 Enable support for the Renesas SuperH DMA controllers. 21 Enable support for the Renesas SuperH DMA controllers.
18 22
@@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP
52 depends on SH_DMAE_BASE 56 depends on SH_DMAE_BASE
53 help 57 help
54 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. 58 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
59
60config RCAR_DMAC
61 tristate "Renesas R-Car Gen2 DMA Controller"
62 depends on ARCH_SHMOBILE || COMPILE_TEST
63 select RENESAS_DMA
64 help
65 This driver supports the general purpose DMA controller found in the
66 Renesas R-Car second generation SoCs.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 0a5cfdb76e45..2852f9db61a4 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
16obj-$(CONFIG_SUDMAC) += sudmac.o 16obj-$(CONFIG_SUDMAC) += sudmac.o
17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o 17obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o 18obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
19obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
new file mode 100644
index 000000000000..a18d16cc4795
--- /dev/null
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -0,0 +1,1770 @@
1/*
2 * Renesas R-Car Gen2 DMA Controller Driver
3 *
4 * Copyright (C) 2014 Renesas Electronics Inc.
5 *
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/mutex.h>
19#include <linux/of.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26
27#include "../dmaengine.h"
28
29/*
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
35 */
36struct rcar_dmac_xfer_chunk {
37 struct list_head node;
38
39 dma_addr_t src_addr;
40 dma_addr_t dst_addr;
41 u32 size;
42};
43
44/*
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
49 */
50struct rcar_dmac_hw_desc {
51 u32 sar;
52 u32 dar;
53 u32 tcr;
54 u32 reserved;
55} __attribute__((__packed__));
56
57/*
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
66 * @nchunks: number of transfer chunks for this transfer
67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
73 */
74struct rcar_dmac_desc {
75 struct dma_async_tx_descriptor async_tx;
76 enum dma_transfer_direction direction;
77 unsigned int xfer_shift;
78 u32 chcr;
79
80 struct list_head node;
81 struct list_head chunks;
82 struct rcar_dmac_xfer_chunk *running;
83 unsigned int nchunks;
84
85 struct {
86 bool use;
87 struct rcar_dmac_hw_desc *mem;
88 dma_addr_t dma;
89 size_t size;
90 } hwdescs;
91
92 unsigned int size;
93 bool cyclic;
94};
95
96#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
97
98/*
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
103 */
104struct rcar_dmac_desc_page {
105 struct list_head node;
106
107 union {
108 struct rcar_dmac_desc descs[0];
109 struct rcar_dmac_xfer_chunk chunks[0];
110 };
111};
112
113#define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
119
120/*
121 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
122 * @chan: base DMA channel object
123 * @iomem: channel I/O memory base
124 * @index: index of this channel in the controller
125 * @src_xfer_size: size (in bytes) of hardware transfers on the source side
126 * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
127 * @src_slave_addr: slave source memory address
128 * @dst_slave_addr: slave destination memory address
129 * @mid_rid: hardware MID/RID for the DMA client using this channel
130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
139 */
140struct rcar_dmac_chan {
141 struct dma_chan chan;
142 void __iomem *iomem;
143 unsigned int index;
144
145 unsigned int src_xfer_size;
146 unsigned int dst_xfer_size;
147 dma_addr_t src_slave_addr;
148 dma_addr_t dst_slave_addr;
149 int mid_rid;
150
151 spinlock_t lock;
152
153 struct {
154 struct list_head free;
155 struct list_head pending;
156 struct list_head active;
157 struct list_head done;
158 struct list_head wait;
159 struct rcar_dmac_desc *running;
160
161 struct list_head chunks_free;
162
163 struct list_head pages;
164 } desc;
165};
166
167#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
168
169/*
170 * struct rcar_dmac - R-Car Gen2 DMA Controller
171 * @engine: base DMA engine object
172 * @dev: the hardware device
173 * @iomem: remapped I/O memory base
174 * @n_channels: number of available channels
175 * @channels: array of DMAC channels
176 * @modules: bitmask of client modules in use
177 */
178struct rcar_dmac {
179 struct dma_device engine;
180 struct device *dev;
181 void __iomem *iomem;
182
183 unsigned int n_channels;
184 struct rcar_dmac_chan *channels;
185
186 unsigned long modules[256 / BITS_PER_LONG];
187};
188
189#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
190
191/* -----------------------------------------------------------------------------
192 * Registers
193 */
194
195#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
196
197#define RCAR_DMAISTA 0x0020
198#define RCAR_DMASEC 0x0030
199#define RCAR_DMAOR 0x0060
200#define RCAR_DMAOR_PRI_FIXED (0 << 8)
201#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
202#define RCAR_DMAOR_AE (1 << 2)
203#define RCAR_DMAOR_DME (1 << 0)
204#define RCAR_DMACHCLR 0x0080
205#define RCAR_DMADPSEC 0x00a0
206
207#define RCAR_DMASAR 0x0000
208#define RCAR_DMADAR 0x0004
209#define RCAR_DMATCR 0x0008
210#define RCAR_DMATCR_MASK 0x00ffffff
211#define RCAR_DMATSR 0x0028
212#define RCAR_DMACHCR 0x000c
213#define RCAR_DMACHCR_CAE (1 << 31)
214#define RCAR_DMACHCR_CAIE (1 << 30)
215#define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
216#define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
217#define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
218#define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
219#define RCAR_DMACHCR_RPT_SAR (1 << 27)
220#define RCAR_DMACHCR_RPT_DAR (1 << 26)
221#define RCAR_DMACHCR_RPT_TCR (1 << 25)
222#define RCAR_DMACHCR_DPB (1 << 22)
223#define RCAR_DMACHCR_DSE (1 << 19)
224#define RCAR_DMACHCR_DSIE (1 << 18)
225#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
226#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
227#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
228#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
229#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
230#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
231#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
232#define RCAR_DMACHCR_DM_FIXED (0 << 14)
233#define RCAR_DMACHCR_DM_INC (1 << 14)
234#define RCAR_DMACHCR_DM_DEC (2 << 14)
235#define RCAR_DMACHCR_SM_FIXED (0 << 12)
236#define RCAR_DMACHCR_SM_INC (1 << 12)
237#define RCAR_DMACHCR_SM_DEC (2 << 12)
238#define RCAR_DMACHCR_RS_AUTO (4 << 8)
239#define RCAR_DMACHCR_RS_DMARS (8 << 8)
240#define RCAR_DMACHCR_IE (1 << 2)
241#define RCAR_DMACHCR_TE (1 << 1)
242#define RCAR_DMACHCR_DE (1 << 0)
243#define RCAR_DMATCRB 0x0018
244#define RCAR_DMATSRB 0x0038
245#define RCAR_DMACHCRB 0x001c
246#define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
247#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
248#define RCAR_DMACHCRB_DPTR_SHIFT 16
249#define RCAR_DMACHCRB_DRST (1 << 15)
250#define RCAR_DMACHCRB_DTS (1 << 8)
251#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
252#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
253#define RCAR_DMACHCRB_PRI(n) ((n) << 0)
254#define RCAR_DMARS 0x0040
255#define RCAR_DMABUFCR 0x0048
256#define RCAR_DMABUFCR_MBU(n) ((n) << 16)
257#define RCAR_DMABUFCR_ULB(n) ((n) << 0)
258#define RCAR_DMADPBASE 0x0050
259#define RCAR_DMADPBASE_MASK 0xfffffff0
260#define RCAR_DMADPBASE_SEL (1 << 0)
261#define RCAR_DMADPCR 0x0054
262#define RCAR_DMADPCR_DIPT(n) ((n) << 24)
263#define RCAR_DMAFIXSAR 0x0010
264#define RCAR_DMAFIXDAR 0x0014
265#define RCAR_DMAFIXDPBASE 0x0060
266
267/* Hardcode the MEMCPY transfer size to 4 bytes. */
268#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
269
270/* -----------------------------------------------------------------------------
271 * Device access
272 */
273
274static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data)
275{
276 if (reg == RCAR_DMAOR)
277 writew(data, dmac->iomem + reg);
278 else
279 writel(data, dmac->iomem + reg);
280}
281
282static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg)
283{
284 if (reg == RCAR_DMAOR)
285 return readw(dmac->iomem + reg);
286 else
287 return readl(dmac->iomem + reg);
288}
289
290static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg)
291{
292 if (reg == RCAR_DMARS)
293 return readw(chan->iomem + reg);
294 else
295 return readl(chan->iomem + reg);
296}
297
298static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data)
299{
300 if (reg == RCAR_DMARS)
301 writew(data, chan->iomem + reg);
302 else
303 writel(data, chan->iomem + reg);
304}
305
306/* -----------------------------------------------------------------------------
307 * Initialization and configuration
308 */
309
310static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan)
311{
312 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
313
314 return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE;
315}
316
317static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan)
318{
319 struct rcar_dmac_desc *desc = chan->desc.running;
320 u32 chcr = desc->chcr;
321
322 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan));
323
324 if (chan->mid_rid >= 0)
325 rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid);
326
327 if (desc->hwdescs.use) {
328 struct rcar_dmac_xfer_chunk *chunk;
329
330 dev_dbg(chan->chan.device->dev,
331 "chan%u: queue desc %p: %u@%pad\n",
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma);
333
334#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
335 rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE,
336 desc->hwdescs.dma >> 32);
337#endif
338 rcar_dmac_chan_write(chan, RCAR_DMADPBASE,
339 (desc->hwdescs.dma & 0xfffffff0) |
340 RCAR_DMADPBASE_SEL);
341 rcar_dmac_chan_write(chan, RCAR_DMACHCRB,
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) |
343 RCAR_DMACHCRB_DRST);
344
345 /*
346 * Errata: When descriptor memory is accessed through an IOMMU
347 * the DMADAR register isn't initialized automatically from the
348 * first descriptor at beginning of transfer by the DMAC like it
349 * should. Initialize it manually with the destination address
350 * of the first chunk.
351 */
352 chunk = list_first_entry(&desc->chunks,
353 struct rcar_dmac_xfer_chunk, node);
354 rcar_dmac_chan_write(chan, RCAR_DMADAR,
355 chunk->dst_addr & 0xffffffff);
356
357 /*
358 * Program the descriptor stage interrupt to occur after the end
359 * of the first stage.
360 */
361 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1));
362
363 chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR
364 | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB;
365
366 /*
367 * If the descriptor isn't cyclic enable normal descriptor mode
368 * and the transfer completion interrupt.
369 */
370 if (!desc->cyclic)
371 chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE;
372 /*
373 * If the descriptor is cyclic and has a callback enable the
374 * descriptor stage interrupt in infinite repeat mode.
375 */
376 else if (desc->async_tx.callback)
377 chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE;
378 /*
379 * Otherwise just select infinite repeat mode without any
380 * interrupt.
381 */
382 else
383 chcr |= RCAR_DMACHCR_DPM_INFINITE;
384 } else {
385 struct rcar_dmac_xfer_chunk *chunk = desc->running;
386
387 dev_dbg(chan->chan.device->dev,
388 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
389 chan->index, chunk, chunk->size, &chunk->src_addr,
390 &chunk->dst_addr);
391
392#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
393 rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR,
394 chunk->src_addr >> 32);
395 rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR,
396 chunk->dst_addr >> 32);
397#endif
398 rcar_dmac_chan_write(chan, RCAR_DMASAR,
399 chunk->src_addr & 0xffffffff);
400 rcar_dmac_chan_write(chan, RCAR_DMADAR,
401 chunk->dst_addr & 0xffffffff);
402 rcar_dmac_chan_write(chan, RCAR_DMATCR,
403 chunk->size >> desc->xfer_shift);
404
405 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE;
406 }
407
408 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE);
409}
410
411static int rcar_dmac_init(struct rcar_dmac *dmac)
412{
413 u16 dmaor;
414
415 /* Clear all channels and enable the DMAC globally. */
416 rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff);
417 rcar_dmac_write(dmac, RCAR_DMAOR,
418 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
419
420 dmaor = rcar_dmac_read(dmac, RCAR_DMAOR);
421 if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) {
422 dev_warn(dmac->dev, "DMAOR initialization failed.\n");
423 return -EIO;
424 }
425
426 return 0;
427}
428
429/* -----------------------------------------------------------------------------
430 * Descriptors submission
431 */
432
433static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
434{
435 struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan);
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx);
437 unsigned long flags;
438 dma_cookie_t cookie;
439
440 spin_lock_irqsave(&chan->lock, flags);
441
442 cookie = dma_cookie_assign(tx);
443
444 dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n",
445 chan->index, tx->cookie, desc);
446
447 list_add_tail(&desc->node, &chan->desc.pending);
448 desc->running = list_first_entry(&desc->chunks,
449 struct rcar_dmac_xfer_chunk, node);
450
451 spin_unlock_irqrestore(&chan->lock, flags);
452
453 return cookie;
454}
455
456/* -----------------------------------------------------------------------------
457 * Descriptors allocation and free
458 */
459
460/*
461 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
462 * @chan: the DMA channel
463 * @gfp: allocation flags
464 */
465static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
466{
467 struct rcar_dmac_desc_page *page;
468 LIST_HEAD(list);
469 unsigned int i;
470
471 page = (void *)get_zeroed_page(gfp);
472 if (!page)
473 return -ENOMEM;
474
475 for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) {
476 struct rcar_dmac_desc *desc = &page->descs[i];
477
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
479 desc->async_tx.tx_submit = rcar_dmac_tx_submit;
480 INIT_LIST_HEAD(&desc->chunks);
481
482 list_add_tail(&desc->node, &list);
483 }
484
485 spin_lock_irq(&chan->lock);
486 list_splice_tail(&list, &chan->desc.free);
487 list_add_tail(&page->node, &chan->desc.pages);
488 spin_unlock_irq(&chan->lock);
489
490 return 0;
491}
492
493/*
494 * rcar_dmac_desc_put - Release a DMA transfer descriptor
495 * @chan: the DMA channel
496 * @desc: the descriptor
497 *
498 * Put the descriptor and its transfer chunk descriptors back in the channel's
499 * free descriptors lists. The descriptor's chunks list will be reinitialized to
500 * an empty list as a result.
501 *
502 * The descriptor must have been removed from the channel's lists before calling
503 * this function.
504 */
505static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
506 struct rcar_dmac_desc *desc)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&chan->lock, flags);
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free);
512 list_add_tail(&desc->node, &chan->desc.free);
513 spin_unlock_irqrestore(&chan->lock, flags);
514}
515
516static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
517{
518 struct rcar_dmac_desc *desc, *_desc;
519 LIST_HEAD(list);
520
521 /*
522 * We have to temporarily move all descriptors from the wait list to a
523 * local list as iterating over the wait list, even with
524 * list_for_each_entry_safe, isn't safe if we release the channel lock
525 * around the rcar_dmac_desc_put() call.
526 */
527 spin_lock_irq(&chan->lock);
528 list_splice_init(&chan->desc.wait, &list);
529 spin_unlock_irq(&chan->lock);
530
531 list_for_each_entry_safe(desc, _desc, &list, node) {
532 if (async_tx_test_ack(&desc->async_tx)) {
533 list_del(&desc->node);
534 rcar_dmac_desc_put(chan, desc);
535 }
536 }
537
538 if (list_empty(&list))
539 return;
540
541 /* Put the remaining descriptors back in the wait list. */
542 spin_lock_irq(&chan->lock);
543 list_splice(&list, &chan->desc.wait);
544 spin_unlock_irq(&chan->lock);
545}
546
547/*
548 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
549 * @chan: the DMA channel
550 *
551 * Locking: This function must be called in a non-atomic context.
552 *
553 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
554 * be allocated.
555 */
556static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
557{
558 struct rcar_dmac_desc *desc;
559 int ret;
560
561 /* Recycle acked descriptors before attempting allocation. */
562 rcar_dmac_desc_recycle_acked(chan);
563
564 spin_lock_irq(&chan->lock);
565
566 while (list_empty(&chan->desc.free)) {
567 /*
568 * No free descriptors, allocate a page worth of them and try
569 * again, as someone else could race us to get the newly
570 * allocated descriptors. If the allocation fails return an
571 * error.
572 */
573 spin_unlock_irq(&chan->lock);
574 ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
575 if (ret < 0)
576 return NULL;
577 spin_lock_irq(&chan->lock);
578 }
579
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
581 list_del(&desc->node);
582
583 spin_unlock_irq(&chan->lock);
584
585 return desc;
586}
587
588/*
589 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
590 * @chan: the DMA channel
591 * @gfp: allocation flags
592 */
593static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
594{
595 struct rcar_dmac_desc_page *page;
596 LIST_HEAD(list);
597 unsigned int i;
598
599 page = (void *)get_zeroed_page(gfp);
600 if (!page)
601 return -ENOMEM;
602
603 for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) {
604 struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i];
605
606 list_add_tail(&chunk->node, &list);
607 }
608
609 spin_lock_irq(&chan->lock);
610 list_splice_tail(&list, &chan->desc.chunks_free);
611 list_add_tail(&page->node, &chan->desc.pages);
612 spin_unlock_irq(&chan->lock);
613
614 return 0;
615}
616
617/*
618 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
619 * @chan: the DMA channel
620 *
621 * Locking: This function must be called in a non-atomic context.
622 *
623 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
624 * descriptor can be allocated.
625 */
626static struct rcar_dmac_xfer_chunk *
627rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
628{
629 struct rcar_dmac_xfer_chunk *chunk;
630 int ret;
631
632 spin_lock_irq(&chan->lock);
633
634 while (list_empty(&chan->desc.chunks_free)) {
635 /*
636 * No free descriptors, allocate a page worth of them and try
637 * again, as someone else could race us to get the newly
638 * allocated descriptors. If the allocation fails return an
639 * error.
640 */
641 spin_unlock_irq(&chan->lock);
642 ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
643 if (ret < 0)
644 return NULL;
645 spin_lock_irq(&chan->lock);
646 }
647
648 chunk = list_first_entry(&chan->desc.chunks_free,
649 struct rcar_dmac_xfer_chunk, node);
650 list_del(&chunk->node);
651
652 spin_unlock_irq(&chan->lock);
653
654 return chunk;
655}
656
657static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan,
658 struct rcar_dmac_desc *desc, size_t size)
659{
660 /*
661 * dma_alloc_coherent() allocates memory in page size increments. To
662 * avoid reallocating the hardware descriptors when the allocated size
663 * wouldn't change align the requested size to a multiple of the page
664 * size.
665 */
666 size = PAGE_ALIGN(size);
667
668 if (desc->hwdescs.size == size)
669 return;
670
671 if (desc->hwdescs.mem) {
672 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size,
673 desc->hwdescs.mem, desc->hwdescs.dma);
674 desc->hwdescs.mem = NULL;
675 desc->hwdescs.size = 0;
676 }
677
678 if (!size)
679 return;
680
681 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size,
682 &desc->hwdescs.dma, GFP_NOWAIT);
683 if (!desc->hwdescs.mem)
684 return;
685
686 desc->hwdescs.size = size;
687}
688
689static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
690 struct rcar_dmac_desc *desc)
691{
692 struct rcar_dmac_xfer_chunk *chunk;
693 struct rcar_dmac_hw_desc *hwdesc;
694
695 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc));
696
697 hwdesc = desc->hwdescs.mem;
698 if (!hwdesc)
699 return -ENOMEM;
700
701 list_for_each_entry(chunk, &desc->chunks, node) {
702 hwdesc->sar = chunk->src_addr;
703 hwdesc->dar = chunk->dst_addr;
704 hwdesc->tcr = chunk->size >> desc->xfer_shift;
705 hwdesc++;
706 }
707
708 return 0;
709}
710
711/* -----------------------------------------------------------------------------
712 * Stop and reset
713 */
714
715static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
716{
717 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
718
719 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
720 RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
721 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
722}
723
724static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
725{
726 struct rcar_dmac_desc *desc, *_desc;
727 unsigned long flags;
728 LIST_HEAD(descs);
729
730 spin_lock_irqsave(&chan->lock, flags);
731
732 /* Move all non-free descriptors to the local lists. */
733 list_splice_init(&chan->desc.pending, &descs);
734 list_splice_init(&chan->desc.active, &descs);
735 list_splice_init(&chan->desc.done, &descs);
736 list_splice_init(&chan->desc.wait, &descs);
737
738 chan->desc.running = NULL;
739
740 spin_unlock_irqrestore(&chan->lock, flags);
741
742 list_for_each_entry_safe(desc, _desc, &descs, node) {
743 list_del(&desc->node);
744 rcar_dmac_desc_put(chan, desc);
745 }
746}
747
748static void rcar_dmac_stop(struct rcar_dmac *dmac)
749{
750 rcar_dmac_write(dmac, RCAR_DMAOR, 0);
751}
752
753static void rcar_dmac_abort(struct rcar_dmac *dmac)
754{
755 unsigned int i;
756
757 /* Stop all channels. */
758 for (i = 0; i < dmac->n_channels; ++i) {
759 struct rcar_dmac_chan *chan = &dmac->channels[i];
760
761 /* Stop and reinitialize the channel. */
762 spin_lock(&chan->lock);
763 rcar_dmac_chan_halt(chan);
764 spin_unlock(&chan->lock);
765
766 rcar_dmac_chan_reinit(chan);
767 }
768}
769
770/* -----------------------------------------------------------------------------
771 * Descriptors preparation
772 */
773
774static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
775 struct rcar_dmac_desc *desc)
776{
777 static const u32 chcr_ts[] = {
778 RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B,
779 RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B,
780 RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B,
781 RCAR_DMACHCR_TS_64B,
782 };
783
784 unsigned int xfer_size;
785 u32 chcr;
786
787 switch (desc->direction) {
788 case DMA_DEV_TO_MEM:
789 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
790 | RCAR_DMACHCR_RS_DMARS;
791 xfer_size = chan->src_xfer_size;
792 break;
793
794 case DMA_MEM_TO_DEV:
795 chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
796 | RCAR_DMACHCR_RS_DMARS;
797 xfer_size = chan->dst_xfer_size;
798 break;
799
800 case DMA_MEM_TO_MEM:
801 default:
802 chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC
803 | RCAR_DMACHCR_RS_AUTO;
804 xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE;
805 break;
806 }
807
808 desc->xfer_shift = ilog2(xfer_size);
809 desc->chcr = chcr | chcr_ts[desc->xfer_shift];
810}
811
812/*
813 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
814 *
815 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
816 * converted to scatter-gather to guarantee consistent locking and a correct
817 * list manipulation. For slave DMA direction carries the usual meaning, and,
818 * logically, the SG list is RAM and the addr variable contains slave address,
819 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
820 * and the SG list contains only one element and points at the source buffer.
821 */
822static struct dma_async_tx_descriptor *
823rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl,
824 unsigned int sg_len, dma_addr_t dev_addr,
825 enum dma_transfer_direction dir, unsigned long dma_flags,
826 bool cyclic)
827{
828 struct rcar_dmac_xfer_chunk *chunk;
829 struct rcar_dmac_desc *desc;
830 struct scatterlist *sg;
831 unsigned int nchunks = 0;
832 unsigned int max_chunk_size;
833 unsigned int full_size = 0;
834 bool highmem = false;
835 unsigned int i;
836
837 desc = rcar_dmac_desc_get(chan);
838 if (!desc)
839 return NULL;
840
841 desc->async_tx.flags = dma_flags;
842 desc->async_tx.cookie = -EBUSY;
843
844 desc->cyclic = cyclic;
845 desc->direction = dir;
846
847 rcar_dmac_chan_configure_desc(chan, desc);
848
849 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift;
850
851 /*
852 * Allocate and fill the transfer chunk descriptors. We own the only
853 * reference to the DMA descriptor, there's no need for locking.
854 */
855 for_each_sg(sgl, sg, sg_len, i) {
856 dma_addr_t mem_addr = sg_dma_address(sg);
857 unsigned int len = sg_dma_len(sg);
858
859 full_size += len;
860
861 while (len) {
862 unsigned int size = min(len, max_chunk_size);
863
864#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
865 /*
866 * Prevent individual transfers from crossing 4GB
867 * boundaries.
868 */
869 if (dev_addr >> 32 != (dev_addr + size - 1) >> 32)
870 size = ALIGN(dev_addr, 1ULL << 32) - dev_addr;
871 if (mem_addr >> 32 != (mem_addr + size - 1) >> 32)
872 size = ALIGN(mem_addr, 1ULL << 32) - mem_addr;
873
874 /*
875 * Check if either of the source or destination address
876 * can't be expressed in 32 bits. If so we can't use
877 * hardware descriptor lists.
878 */
879 if (dev_addr >> 32 || mem_addr >> 32)
880 highmem = true;
881#endif
882
883 chunk = rcar_dmac_xfer_chunk_get(chan);
884 if (!chunk) {
885 rcar_dmac_desc_put(chan, desc);
886 return NULL;
887 }
888
889 if (dir == DMA_DEV_TO_MEM) {
890 chunk->src_addr = dev_addr;
891 chunk->dst_addr = mem_addr;
892 } else {
893 chunk->src_addr = mem_addr;
894 chunk->dst_addr = dev_addr;
895 }
896
897 chunk->size = size;
898
899 dev_dbg(chan->chan.device->dev,
900 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
901 chan->index, chunk, desc, i, sg, size, len,
902 &chunk->src_addr, &chunk->dst_addr);
903
904 mem_addr += size;
905 if (dir == DMA_MEM_TO_MEM)
906 dev_addr += size;
907
908 len -= size;
909
910 list_add_tail(&chunk->node, &desc->chunks);
911 nchunks++;
912 }
913 }
914
915 desc->nchunks = nchunks;
916 desc->size = full_size;
917
918 /*
919 * Use hardware descriptor lists if possible when more than one chunk
920 * needs to be transferred (otherwise they don't make much sense).
921 *
922 * The highmem check currently covers the whole transfer. As an
923 * optimization we could use descriptor lists for consecutive lowmem
924 * chunks and direct manual mode for highmem chunks. Whether the
925 * performance improvement would be significant enough compared to the
926 * additional complexity remains to be investigated.
927 */
928 desc->hwdescs.use = !highmem && nchunks > 1;
929 if (desc->hwdescs.use) {
930 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
931 desc->hwdescs.use = false;
932 }
933
934 return &desc->async_tx;
935}
936
937/* -----------------------------------------------------------------------------
938 * DMA engine operations
939 */
940
941static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan)
942{
943 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
944 int ret;
945
946 INIT_LIST_HEAD(&rchan->desc.chunks_free);
947 INIT_LIST_HEAD(&rchan->desc.pages);
948
949 /* Preallocate descriptors. */
950 ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL);
951 if (ret < 0)
952 return -ENOMEM;
953
954 ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL);
955 if (ret < 0)
956 return -ENOMEM;
957
958 return pm_runtime_get_sync(chan->device->dev);
959}
960
961static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
962{
963 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
964 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
965 struct rcar_dmac_desc_page *page, *_page;
966 struct rcar_dmac_desc *desc;
967 LIST_HEAD(list);
968
969 /* Protect against ISR */
970 spin_lock_irq(&rchan->lock);
971 rcar_dmac_chan_halt(rchan);
972 spin_unlock_irq(&rchan->lock);
973
974 /* Now no new interrupts will occur */
975
976 if (rchan->mid_rid >= 0) {
977 /* The caller is holding dma_list_mutex */
978 clear_bit(rchan->mid_rid, dmac->modules);
979 rchan->mid_rid = -EINVAL;
980 }
981
982 list_splice_init(&rchan->desc.free, &list);
983 list_splice_init(&rchan->desc.pending, &list);
984 list_splice_init(&rchan->desc.active, &list);
985 list_splice_init(&rchan->desc.done, &list);
986 list_splice_init(&rchan->desc.wait, &list);
987
988 list_for_each_entry(desc, &list, node)
989 rcar_dmac_realloc_hwdesc(rchan, desc, 0);
990
991 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) {
992 list_del(&page->node);
993 free_page((unsigned long)page);
994 }
995
996 pm_runtime_put(chan->device->dev);
997}
998
999static struct dma_async_tx_descriptor *
1000rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1001 dma_addr_t dma_src, size_t len, unsigned long flags)
1002{
1003 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1004 struct scatterlist sgl;
1005
1006 if (!len)
1007 return NULL;
1008
1009 sg_init_table(&sgl, 1);
1010 sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len,
1011 offset_in_page(dma_src));
1012 sg_dma_address(&sgl) = dma_src;
1013 sg_dma_len(&sgl) = len;
1014
1015 return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest,
1016 DMA_MEM_TO_MEM, flags, false);
1017}
1018
1019static struct dma_async_tx_descriptor *
1020rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1021 unsigned int sg_len, enum dma_transfer_direction dir,
1022 unsigned long flags, void *context)
1023{
1024 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1025 dma_addr_t dev_addr;
1026
1027 /* Someone calling slave DMA on a generic channel? */
1028 if (rchan->mid_rid < 0 || !sg_len) {
1029 dev_warn(chan->device->dev,
1030 "%s: bad parameter: len=%d, id=%d\n",
1031 __func__, sg_len, rchan->mid_rid);
1032 return NULL;
1033 }
1034
1035 dev_addr = dir == DMA_DEV_TO_MEM
1036 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1037 return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1038 dir, flags, false);
1039}
1040
1041#define RCAR_DMAC_MAX_SG_LEN 32
1042
1043static struct dma_async_tx_descriptor *
1044rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
1045 size_t buf_len, size_t period_len,
1046 enum dma_transfer_direction dir, unsigned long flags)
1047{
1048 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1049 struct dma_async_tx_descriptor *desc;
1050 struct scatterlist *sgl;
1051 dma_addr_t dev_addr;
1052 unsigned int sg_len;
1053 unsigned int i;
1054
1055 /* Someone calling slave DMA on a generic channel? */
1056 if (rchan->mid_rid < 0 || buf_len < period_len) {
1057 dev_warn(chan->device->dev,
1058 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1059 __func__, buf_len, period_len, rchan->mid_rid);
1060 return NULL;
1061 }
1062
1063 sg_len = buf_len / period_len;
1064 if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
1065 dev_err(chan->device->dev,
1066 "chan%u: sg length %d exceds limit %d",
1067 rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN);
1068 return NULL;
1069 }
1070
1071 /*
1072 * Allocate the sg list dynamically as it would consume too much stack
1073 * space.
1074 */
1075 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT);
1076 if (!sgl)
1077 return NULL;
1078
1079 sg_init_table(sgl, sg_len);
1080
1081 for (i = 0; i < sg_len; ++i) {
1082 dma_addr_t src = buf_addr + (period_len * i);
1083
1084 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
1085 offset_in_page(src));
1086 sg_dma_address(&sgl[i]) = src;
1087 sg_dma_len(&sgl[i]) = period_len;
1088 }
1089
1090 dev_addr = dir == DMA_DEV_TO_MEM
1091 ? rchan->src_slave_addr : rchan->dst_slave_addr;
1092 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
1093 dir, flags, true);
1094
1095 kfree(sgl);
1096 return desc;
1097}
1098
1099static int rcar_dmac_device_config(struct dma_chan *chan,
1100 struct dma_slave_config *cfg)
1101{
1102 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1103
1104 /*
1105 * We could lock this, but you shouldn't be configuring the
1106 * channel, while using it...
1107 */
1108 rchan->src_slave_addr = cfg->src_addr;
1109 rchan->dst_slave_addr = cfg->dst_addr;
1110 rchan->src_xfer_size = cfg->src_addr_width;
1111 rchan->dst_xfer_size = cfg->dst_addr_width;
1112
1113 return 0;
1114}
1115
1116static int rcar_dmac_chan_terminate_all(struct dma_chan *chan)
1117{
1118 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1119 unsigned long flags;
1120
1121 spin_lock_irqsave(&rchan->lock, flags);
1122 rcar_dmac_chan_halt(rchan);
1123 spin_unlock_irqrestore(&rchan->lock, flags);
1124
1125 /*
1126 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1127 * be running.
1128 */
1129
1130 rcar_dmac_chan_reinit(rchan);
1131
1132 return 0;
1133}
1134
1135static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
1136 dma_cookie_t cookie)
1137{
1138 struct rcar_dmac_desc *desc = chan->desc.running;
1139 struct rcar_dmac_xfer_chunk *running = NULL;
1140 struct rcar_dmac_xfer_chunk *chunk;
1141 unsigned int residue = 0;
1142 unsigned int dptr = 0;
1143
1144 if (!desc)
1145 return 0;
1146
1147 /*
1148 * If the cookie doesn't correspond to the currently running transfer
1149 * then the descriptor hasn't been processed yet, and the residue is
1150 * equal to the full descriptor size.
1151 */
1152 if (cookie != desc->async_tx.cookie)
1153 return desc->size;
1154
1155 /*
1156 * In descriptor mode the descriptor running pointer is not maintained
1157 * by the interrupt handler, find the running descriptor from the
1158 * descriptor pointer field in the CHCRB register. In non-descriptor
1159 * mode just use the running descriptor pointer.
1160 */
1161 if (desc->hwdescs.use) {
1162 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1163 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1164 WARN_ON(dptr >= desc->nchunks);
1165 } else {
1166 running = desc->running;
1167 }
1168
1169 /* Compute the size of all chunks still to be transferred. */
1170 list_for_each_entry_reverse(chunk, &desc->chunks, node) {
1171 if (chunk == running || ++dptr == desc->nchunks)
1172 break;
1173
1174 residue += chunk->size;
1175 }
1176
1177 /* Add the residue for the current chunk. */
1178 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
1179
1180 return residue;
1181}
1182
1183static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
1184 dma_cookie_t cookie,
1185 struct dma_tx_state *txstate)
1186{
1187 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1188 enum dma_status status;
1189 unsigned long flags;
1190 unsigned int residue;
1191
1192 status = dma_cookie_status(chan, cookie, txstate);
1193 if (status == DMA_COMPLETE || !txstate)
1194 return status;
1195
1196 spin_lock_irqsave(&rchan->lock, flags);
1197 residue = rcar_dmac_chan_get_residue(rchan, cookie);
1198 spin_unlock_irqrestore(&rchan->lock, flags);
1199
1200 dma_set_residue(txstate, residue);
1201
1202 return status;
1203}
1204
1205static void rcar_dmac_issue_pending(struct dma_chan *chan)
1206{
1207 struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
1208 unsigned long flags;
1209
1210 spin_lock_irqsave(&rchan->lock, flags);
1211
1212 if (list_empty(&rchan->desc.pending))
1213 goto done;
1214
1215 /* Append the pending list to the active list. */
1216 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active);
1217
1218 /*
1219 * If no transfer is running pick the first descriptor from the active
1220 * list and start the transfer.
1221 */
1222 if (!rchan->desc.running) {
1223 struct rcar_dmac_desc *desc;
1224
1225 desc = list_first_entry(&rchan->desc.active,
1226 struct rcar_dmac_desc, node);
1227 rchan->desc.running = desc;
1228
1229 rcar_dmac_chan_start_xfer(rchan);
1230 }
1231
1232done:
1233 spin_unlock_irqrestore(&rchan->lock, flags);
1234}
1235
1236/* -----------------------------------------------------------------------------
1237 * IRQ handling
1238 */
1239
1240static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan)
1241{
1242 struct rcar_dmac_desc *desc = chan->desc.running;
1243 unsigned int stage;
1244
1245 if (WARN_ON(!desc || !desc->cyclic)) {
1246 /*
1247 * This should never happen, there should always be a running
1248 * cyclic descriptor when a descriptor stage end interrupt is
1249 * triggered. Warn and return.
1250 */
1251 return IRQ_NONE;
1252 }
1253
1254 /* Program the interrupt pointer to the next stage. */
1255 stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
1256 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
1257 rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage));
1258
1259 return IRQ_WAKE_THREAD;
1260}
1261
1262static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan)
1263{
1264 struct rcar_dmac_desc *desc = chan->desc.running;
1265 irqreturn_t ret = IRQ_WAKE_THREAD;
1266
1267 if (WARN_ON_ONCE(!desc)) {
1268 /*
1269 * This should never happen, there should always be a running
1270 * descriptor when a transfer end interrupt is triggered. Warn
1271 * and return.
1272 */
1273 return IRQ_NONE;
1274 }
1275
1276 /*
1277 * The transfer end interrupt isn't generated for each chunk when using
1278 * descriptor mode. Only update the running chunk pointer in
1279 * non-descriptor mode.
1280 */
1281 if (!desc->hwdescs.use) {
1282 /*
1283 * If we haven't completed the last transfer chunk simply move
1284 * to the next one. Only wake the IRQ thread if the transfer is
1285 * cyclic.
1286 */
1287 if (!list_is_last(&desc->running->node, &desc->chunks)) {
1288 desc->running = list_next_entry(desc->running, node);
1289 if (!desc->cyclic)
1290 ret = IRQ_HANDLED;
1291 goto done;
1292 }
1293
1294 /*
1295 * We've completed the last transfer chunk. If the transfer is
1296 * cyclic, move back to the first one.
1297 */
1298 if (desc->cyclic) {
1299 desc->running =
1300 list_first_entry(&desc->chunks,
1301 struct rcar_dmac_xfer_chunk,
1302 node);
1303 goto done;
1304 }
1305 }
1306
1307 /* The descriptor is complete, move it to the done list. */
1308 list_move_tail(&desc->node, &chan->desc.done);
1309
1310 /* Queue the next descriptor, if any. */
1311 if (!list_empty(&chan->desc.active))
1312 chan->desc.running = list_first_entry(&chan->desc.active,
1313 struct rcar_dmac_desc,
1314 node);
1315 else
1316 chan->desc.running = NULL;
1317
1318done:
1319 if (chan->desc.running)
1320 rcar_dmac_chan_start_xfer(chan);
1321
1322 return ret;
1323}
1324
1325static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
1326{
1327 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE;
1328 struct rcar_dmac_chan *chan = dev;
1329 irqreturn_t ret = IRQ_NONE;
1330 u32 chcr;
1331
1332 spin_lock(&chan->lock);
1333
1334 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
1335 if (chcr & RCAR_DMACHCR_TE)
1336 mask |= RCAR_DMACHCR_DE;
1337 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
1338
1339 if (chcr & RCAR_DMACHCR_DSE)
1340 ret |= rcar_dmac_isr_desc_stage_end(chan);
1341
1342 if (chcr & RCAR_DMACHCR_TE)
1343 ret |= rcar_dmac_isr_transfer_end(chan);
1344
1345 spin_unlock(&chan->lock);
1346
1347 return ret;
1348}
1349
1350static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
1351{
1352 struct rcar_dmac_chan *chan = dev;
1353 struct rcar_dmac_desc *desc;
1354
1355 spin_lock_irq(&chan->lock);
1356
1357 /* For cyclic transfers notify the user after every chunk. */
1358 if (chan->desc.running && chan->desc.running->cyclic) {
1359 dma_async_tx_callback callback;
1360 void *callback_param;
1361
1362 desc = chan->desc.running;
1363 callback = desc->async_tx.callback;
1364 callback_param = desc->async_tx.callback_param;
1365
1366 if (callback) {
1367 spin_unlock_irq(&chan->lock);
1368 callback(callback_param);
1369 spin_lock_irq(&chan->lock);
1370 }
1371 }
1372
1373 /*
1374 * Call the callback function for all descriptors on the done list and
1375 * move them to the ack wait list.
1376 */
1377 while (!list_empty(&chan->desc.done)) {
1378 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc,
1379 node);
1380 dma_cookie_complete(&desc->async_tx);
1381 list_del(&desc->node);
1382
1383 if (desc->async_tx.callback) {
1384 spin_unlock_irq(&chan->lock);
1385 /*
1386 * We own the only reference to this descriptor, we can
1387 * safely dereference it without holding the channel
1388 * lock.
1389 */
1390 desc->async_tx.callback(desc->async_tx.callback_param);
1391 spin_lock_irq(&chan->lock);
1392 }
1393
1394 list_add_tail(&desc->node, &chan->desc.wait);
1395 }
1396
1397 spin_unlock_irq(&chan->lock);
1398
1399 /* Recycle all acked descriptors. */
1400 rcar_dmac_desc_recycle_acked(chan);
1401
1402 return IRQ_HANDLED;
1403}
1404
1405static irqreturn_t rcar_dmac_isr_error(int irq, void *data)
1406{
1407 struct rcar_dmac *dmac = data;
1408
1409 if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE))
1410 return IRQ_NONE;
1411
1412 /*
1413 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1414 * abort transfers on all channels, and reinitialize the DMAC.
1415 */
1416 rcar_dmac_stop(dmac);
1417 rcar_dmac_abort(dmac);
1418 rcar_dmac_init(dmac);
1419
1420 return IRQ_HANDLED;
1421}
1422
1423/* -----------------------------------------------------------------------------
1424 * OF xlate and channel filter
1425 */
1426
1427static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
1428{
1429 struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
1430 struct of_phandle_args *dma_spec = arg;
1431
1432 /*
1433 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1434 * function knows from which device it wants to allocate a channel from,
1435 * and would be perfectly capable of selecting the channel it wants.
1436 * Forcing it to call dma_request_channel() and iterate through all
1437 * channels from all controllers is just pointless.
1438 */
1439 if (chan->device->device_config != rcar_dmac_device_config ||
1440 dma_spec->np != chan->device->dev->of_node)
1441 return false;
1442
1443 return !test_and_set_bit(dma_spec->args[0], dmac->modules);
1444}
1445
1446static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
1447 struct of_dma *ofdma)
1448{
1449 struct rcar_dmac_chan *rchan;
1450 struct dma_chan *chan;
1451 dma_cap_mask_t mask;
1452
1453 if (dma_spec->args_count != 1)
1454 return NULL;
1455
1456 /* Only slave DMA channels can be allocated via DT */
1457 dma_cap_zero(mask);
1458 dma_cap_set(DMA_SLAVE, mask);
1459
1460 chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
1461 if (!chan)
1462 return NULL;
1463
1464 rchan = to_rcar_dmac_chan(chan);
1465 rchan->mid_rid = dma_spec->args[0];
1466
1467 return chan;
1468}
1469
1470/* -----------------------------------------------------------------------------
1471 * Power management
1472 */
1473
1474#ifdef CONFIG_PM_SLEEP
1475static int rcar_dmac_sleep_suspend(struct device *dev)
1476{
1477 /*
1478 * TODO: Wait for the current transfer to complete and stop the device.
1479 */
1480 return 0;
1481}
1482
1483static int rcar_dmac_sleep_resume(struct device *dev)
1484{
1485 /* TODO: Resume transfers, if any. */
1486 return 0;
1487}
1488#endif
1489
1490#ifdef CONFIG_PM
1491static int rcar_dmac_runtime_suspend(struct device *dev)
1492{
1493 return 0;
1494}
1495
1496static int rcar_dmac_runtime_resume(struct device *dev)
1497{
1498 struct rcar_dmac *dmac = dev_get_drvdata(dev);
1499
1500 return rcar_dmac_init(dmac);
1501}
1502#endif
1503
1504static const struct dev_pm_ops rcar_dmac_pm = {
1505 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
1506 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
1507 NULL)
1508};
1509
1510/* -----------------------------------------------------------------------------
1511 * Probe and remove
1512 */
1513
1514static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1515 struct rcar_dmac_chan *rchan,
1516 unsigned int index)
1517{
1518 struct platform_device *pdev = to_platform_device(dmac->dev);
1519 struct dma_chan *chan = &rchan->chan;
1520 char pdev_irqname[5];
1521 char *irqname;
1522 int irq;
1523 int ret;
1524
1525 rchan->index = index;
1526 rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
1527 rchan->mid_rid = -EINVAL;
1528
1529 spin_lock_init(&rchan->lock);
1530
1531 INIT_LIST_HEAD(&rchan->desc.free);
1532 INIT_LIST_HEAD(&rchan->desc.pending);
1533 INIT_LIST_HEAD(&rchan->desc.active);
1534 INIT_LIST_HEAD(&rchan->desc.done);
1535 INIT_LIST_HEAD(&rchan->desc.wait);
1536
1537 /* Request the channel interrupt. */
1538 sprintf(pdev_irqname, "ch%u", index);
1539 irq = platform_get_irq_byname(pdev, pdev_irqname);
1540 if (irq < 0) {
1541 dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
1542 return -ENODEV;
1543 }
1544
1545 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
1546 dev_name(dmac->dev), index);
1547 if (!irqname)
1548 return -ENOMEM;
1549
1550 ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
1551 rcar_dmac_isr_channel_thread, 0,
1552 irqname, rchan);
1553 if (ret) {
1554 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
1555 return ret;
1556 }
1557
1558 /*
1559 * Initialize the DMA engine channel and add it to the DMA engine
1560 * channels list.
1561 */
1562 chan->device = &dmac->engine;
1563 dma_cookie_init(chan);
1564
1565 list_add_tail(&chan->device_node, &dmac->engine.channels);
1566
1567 return 0;
1568}
1569
1570static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1571{
1572 struct device_node *np = dev->of_node;
1573 int ret;
1574
1575 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
1576 if (ret < 0) {
1577 dev_err(dev, "unable to read dma-channels property\n");
1578 return ret;
1579 }
1580
1581 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
1582 dev_err(dev, "invalid number of channels %u\n",
1583 dmac->n_channels);
1584 return -EINVAL;
1585 }
1586
1587 return 0;
1588}
1589
1590static int rcar_dmac_probe(struct platform_device *pdev)
1591{
1592 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
1593 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1594 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1595 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1596 unsigned int channels_offset = 0;
1597 struct dma_device *engine;
1598 struct rcar_dmac *dmac;
1599 struct resource *mem;
1600 unsigned int i;
1601 char *irqname;
1602 int irq;
1603 int ret;
1604
1605 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
1606 if (!dmac)
1607 return -ENOMEM;
1608
1609 dmac->dev = &pdev->dev;
1610 platform_set_drvdata(pdev, dmac);
1611
1612 ret = rcar_dmac_parse_of(&pdev->dev, dmac);
1613 if (ret < 0)
1614 return ret;
1615
1616 /*
1617 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1618 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1619 * is connected to microTLB 0 on currently supported platforms, so we
1620 * can't use it with the IPMMU. As the IOMMU API operates at the device
1621 * level we can't disable it selectively, so ignore channel 0 for now if
1622 * the device is part of an IOMMU group.
1623 */
1624 if (pdev->dev.iommu_group) {
1625 dmac->n_channels--;
1626 channels_offset = 1;
1627 }
1628
1629 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1630 sizeof(*dmac->channels), GFP_KERNEL);
1631 if (!dmac->channels)
1632 return -ENOMEM;
1633
1634 /* Request resources. */
1635 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1636 dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
1637 if (IS_ERR(dmac->iomem))
1638 return PTR_ERR(dmac->iomem);
1639
1640 irq = platform_get_irq_byname(pdev, "error");
1641 if (irq < 0) {
1642 dev_err(&pdev->dev, "no error IRQ specified\n");
1643 return -ENODEV;
1644 }
1645
1646 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error",
1647 dev_name(dmac->dev));
1648 if (!irqname)
1649 return -ENOMEM;
1650
1651 ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0,
1652 irqname, dmac);
1653 if (ret) {
1654 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n",
1655 irq, ret);
1656 return ret;
1657 }
1658
1659 /* Enable runtime PM and initialize the device. */
1660 pm_runtime_enable(&pdev->dev);
1661 ret = pm_runtime_get_sync(&pdev->dev);
1662 if (ret < 0) {
1663 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
1664 return ret;
1665 }
1666
1667 ret = rcar_dmac_init(dmac);
1668 pm_runtime_put(&pdev->dev);
1669
1670 if (ret) {
1671 dev_err(&pdev->dev, "failed to reset device\n");
1672 goto error;
1673 }
1674
1675 /* Initialize the channels. */
1676 INIT_LIST_HEAD(&dmac->engine.channels);
1677
1678 for (i = 0; i < dmac->n_channels; ++i) {
1679 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
1680 i + channels_offset);
1681 if (ret < 0)
1682 goto error;
1683 }
1684
1685 /* Register the DMAC as a DMA provider for DT. */
1686 ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate,
1687 NULL);
1688 if (ret < 0)
1689 goto error;
1690
1691 /*
1692 * Register the DMA engine device.
1693 *
1694 * Default transfer size of 32 bytes requires 32-byte alignment.
1695 */
1696 engine = &dmac->engine;
1697 dma_cap_set(DMA_MEMCPY, engine->cap_mask);
1698 dma_cap_set(DMA_SLAVE, engine->cap_mask);
1699
1700 engine->dev = &pdev->dev;
1701 engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE);
1702
1703 engine->src_addr_widths = widths;
1704 engine->dst_addr_widths = widths;
1705 engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1706 engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1707
1708 engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources;
1709 engine->device_free_chan_resources = rcar_dmac_free_chan_resources;
1710 engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy;
1711 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg;
1712 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic;
1713 engine->device_config = rcar_dmac_device_config;
1714 engine->device_terminate_all = rcar_dmac_chan_terminate_all;
1715 engine->device_tx_status = rcar_dmac_tx_status;
1716 engine->device_issue_pending = rcar_dmac_issue_pending;
1717
1718 ret = dma_async_device_register(engine);
1719 if (ret < 0)
1720 goto error;
1721
1722 return 0;
1723
1724error:
1725 of_dma_controller_free(pdev->dev.of_node);
1726 pm_runtime_disable(&pdev->dev);
1727 return ret;
1728}
1729
1730static int rcar_dmac_remove(struct platform_device *pdev)
1731{
1732 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1733
1734 of_dma_controller_free(pdev->dev.of_node);
1735 dma_async_device_unregister(&dmac->engine);
1736
1737 pm_runtime_disable(&pdev->dev);
1738
1739 return 0;
1740}
1741
1742static void rcar_dmac_shutdown(struct platform_device *pdev)
1743{
1744 struct rcar_dmac *dmac = platform_get_drvdata(pdev);
1745
1746 rcar_dmac_stop(dmac);
1747}
1748
1749static const struct of_device_id rcar_dmac_of_ids[] = {
1750 { .compatible = "renesas,rcar-dmac", },
1751 { /* Sentinel */ }
1752};
1753MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
1754
1755static struct platform_driver rcar_dmac_driver = {
1756 .driver = {
1757 .pm = &rcar_dmac_pm,
1758 .name = "rcar-dmac",
1759 .of_match_table = rcar_dmac_of_ids,
1760 },
1761 .probe = rcar_dmac_probe,
1762 .remove = rcar_dmac_remove,
1763 .shutdown = rcar_dmac_shutdown,
1764};
1765
1766module_platform_driver(rcar_dmac_driver);
1767
1768MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1769MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1770MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index 20a6f6f2a018..749f26ecd3b3 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
534 534
535static int hpb_dmae_probe(struct platform_device *pdev) 535static int hpb_dmae_probe(struct platform_device *pdev)
536{ 536{
537 const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE |
538 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES;
537 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; 539 struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
538 struct hpb_dmae_device *hpbdev; 540 struct hpb_dmae_device *hpbdev;
539 struct dma_device *dma_dev; 541 struct dma_device *dma_dev;
@@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev)
595 597
596 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 598 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
597 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 599 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
600 dma_dev->src_addr_widths = widths;
601 dma_dev->dst_addr_widths = widths;
602 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
603 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
598 604
599 hpbdev->shdma_dev.ops = &hpb_dmae_ops; 605 hpbdev->shdma_dev.ops = &hpb_dmae_ops;
600 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); 606 hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 3a2adb131d46..8ee383d339a5 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
729 return desc; 729 return desc;
730} 730}
731 731
732static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 732static int shdma_terminate_all(struct dma_chan *chan)
733 unsigned long arg)
734{ 733{
735 struct shdma_chan *schan = to_shdma_chan(chan); 734 struct shdma_chan *schan = to_shdma_chan(chan);
736 struct shdma_dev *sdev = to_shdma_dev(chan->device); 735 struct shdma_dev *sdev = to_shdma_dev(chan->device);
737 const struct shdma_ops *ops = sdev->ops; 736 const struct shdma_ops *ops = sdev->ops;
738 struct dma_slave_config *config;
739 unsigned long flags; 737 unsigned long flags;
740 int ret;
741 738
742 switch (cmd) { 739 spin_lock_irqsave(&schan->chan_lock, flags);
743 case DMA_TERMINATE_ALL: 740 ops->halt_channel(schan);
744 spin_lock_irqsave(&schan->chan_lock, flags);
745 ops->halt_channel(schan);
746 741
747 if (ops->get_partial && !list_empty(&schan->ld_queue)) { 742 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
748 /* Record partial transfer */ 743 /* Record partial transfer */
749 struct shdma_desc *desc = list_first_entry(&schan->ld_queue, 744 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
750 struct shdma_desc, node); 745 struct shdma_desc, node);
751 desc->partial = ops->get_partial(schan, desc); 746 desc->partial = ops->get_partial(schan, desc);
752 } 747 }
753 748
754 spin_unlock_irqrestore(&schan->chan_lock, flags); 749 spin_unlock_irqrestore(&schan->chan_lock, flags);
755 750
756 shdma_chan_ld_cleanup(schan, true); 751 shdma_chan_ld_cleanup(schan, true);
757 break;
758 case DMA_SLAVE_CONFIG:
759 /*
760 * So far only .slave_id is used, but the slave drivers are
761 * encouraged to also set a transfer direction and an address.
762 */
763 if (!arg)
764 return -EINVAL;
765 /*
766 * We could lock this, but you shouldn't be configuring the
767 * channel, while using it...
768 */
769 config = (struct dma_slave_config *)arg;
770 ret = shdma_setup_slave(schan, config->slave_id,
771 config->direction == DMA_DEV_TO_MEM ?
772 config->src_addr : config->dst_addr);
773 if (ret < 0)
774 return ret;
775 break;
776 default:
777 return -ENXIO;
778 }
779 752
780 return 0; 753 return 0;
781} 754}
782 755
756static int shdma_config(struct dma_chan *chan,
757 struct dma_slave_config *config)
758{
759 struct shdma_chan *schan = to_shdma_chan(chan);
760
761 /*
762 * So far only .slave_id is used, but the slave drivers are
763 * encouraged to also set a transfer direction and an address.
764 */
765 if (!config)
766 return -EINVAL;
767 /*
768 * We could lock this, but you shouldn't be configuring the
769 * channel, while using it...
770 */
771 return shdma_setup_slave(schan, config->slave_id,
772 config->direction == DMA_DEV_TO_MEM ?
773 config->src_addr : config->dst_addr);
774}
775
783static void shdma_issue_pending(struct dma_chan *chan) 776static void shdma_issue_pending(struct dma_chan *chan)
784{ 777{
785 struct shdma_chan *schan = to_shdma_chan(chan); 778 struct shdma_chan *schan = to_shdma_chan(chan);
@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
1002 /* Compulsory for DMA_SLAVE fields */ 995 /* Compulsory for DMA_SLAVE fields */
1003 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; 996 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1004 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; 997 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1005 dma_dev->device_control = shdma_control; 998 dma_dev->device_config = shdma_config;
999 dma_dev->device_terminate_all = shdma_terminate_all;
1006 1000
1007 dma_dev->dev = dev; 1001 dma_dev->dev = dev;
1008 1002
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index aec8a84784a4..b2431aa30033 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -588,6 +588,7 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
588 sh_dmae_ctl_stop(shdev); 588 sh_dmae_ctl_stop(shdev);
589} 589}
590 590
591#ifdef CONFIG_PM
591static int sh_dmae_runtime_suspend(struct device *dev) 592static int sh_dmae_runtime_suspend(struct device *dev)
592{ 593{
593 return 0; 594 return 0;
@@ -599,8 +600,9 @@ static int sh_dmae_runtime_resume(struct device *dev)
599 600
600 return sh_dmae_rst(shdev); 601 return sh_dmae_rst(shdev);
601} 602}
603#endif
602 604
603#ifdef CONFIG_PM 605#ifdef CONFIG_PM_SLEEP
604static int sh_dmae_suspend(struct device *dev) 606static int sh_dmae_suspend(struct device *dev)
605{ 607{
606 return 0; 608 return 0;
@@ -632,16 +634,12 @@ static int sh_dmae_resume(struct device *dev)
632 634
633 return 0; 635 return 0;
634} 636}
635#else
636#define sh_dmae_suspend NULL
637#define sh_dmae_resume NULL
638#endif 637#endif
639 638
640static const struct dev_pm_ops sh_dmae_pm = { 639static const struct dev_pm_ops sh_dmae_pm = {
641 .suspend = sh_dmae_suspend, 640 SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
642 .resume = sh_dmae_resume, 641 SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
643 .runtime_suspend = sh_dmae_runtime_suspend, 642 NULL)
644 .runtime_resume = sh_dmae_runtime_resume,
645}; 643};
646 644
647static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 645static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
@@ -684,6 +682,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
684 682
685static int sh_dmae_probe(struct platform_device *pdev) 683static int sh_dmae_probe(struct platform_device *pdev)
686{ 684{
685 const enum dma_slave_buswidth widths =
686 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
687 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
688 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
687 const struct sh_dmae_pdata *pdata; 689 const struct sh_dmae_pdata *pdata;
688 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 690 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
689 int chan_irq[SH_DMAE_MAX_CHANNELS]; 691 int chan_irq[SH_DMAE_MAX_CHANNELS];
@@ -746,6 +748,11 @@ static int sh_dmae_probe(struct platform_device *pdev)
746 return PTR_ERR(shdev->dmars); 748 return PTR_ERR(shdev->dmars);
747 } 749 }
748 750
751 dma_dev->src_addr_widths = widths;
752 dma_dev->dst_addr_widths = widths;
753 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
754 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
755
749 if (!pdata->slave_only) 756 if (!pdata->slave_only)
750 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 757 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
751 if (pdata->slave && pdata->slave_num) 758 if (pdata->slave && pdata->slave_num)
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 3492a5f91d31..d0086e9f2082 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
281 return cookie; 281 return cookie;
282} 282}
283 283
284static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, 284static int sirfsoc_dma_slave_config(struct dma_chan *chan,
285 struct dma_slave_config *config) 285 struct dma_slave_config *config)
286{ 286{
287 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
287 unsigned long flags; 288 unsigned long flags;
288 289
289 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 290 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
@@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
297 return 0; 298 return 0;
298} 299}
299 300
300static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) 301static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
301{ 302{
303 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
302 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 304 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
303 int cid = schan->chan.chan_id; 305 int cid = schan->chan.chan_id;
304 unsigned long flags; 306 unsigned long flags;
@@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
327 return 0; 329 return 0;
328} 330}
329 331
330static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) 332static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
331{ 333{
334 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
332 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 335 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
333 int cid = schan->chan.chan_id; 336 int cid = schan->chan.chan_id;
334 unsigned long flags; 337 unsigned long flags;
@@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan)
348 return 0; 351 return 0;
349} 352}
350 353
351static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) 354static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
352{ 355{
356 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
353 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 357 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
354 int cid = schan->chan.chan_id; 358 int cid = schan->chan.chan_id;
355 unsigned long flags; 359 unsigned long flags;
@@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan)
369 return 0; 373 return 0;
370} 374}
371 375
372static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
373 unsigned long arg)
374{
375 struct dma_slave_config *config;
376 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
377
378 switch (cmd) {
379 case DMA_PAUSE:
380 return sirfsoc_dma_pause_chan(schan);
381 case DMA_RESUME:
382 return sirfsoc_dma_resume_chan(schan);
383 case DMA_TERMINATE_ALL:
384 return sirfsoc_dma_terminate_all(schan);
385 case DMA_SLAVE_CONFIG:
386 config = (struct dma_slave_config *)arg;
387 return sirfsoc_dma_slave_config(schan, config);
388
389 default:
390 break;
391 }
392
393 return -ENOSYS;
394}
395
396/* Alloc channel resources */ 376/* Alloc channel resources */
397static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) 377static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
398{ 378{
@@ -648,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id);
648 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 628 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
649 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 629 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
650 630
651static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
652 struct dma_slave_caps *caps)
653{
654 caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655 caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
656 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
657 caps->cmd_pause = true;
658 caps->cmd_terminate = true;
659
660 return 0;
661}
662
663static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, 631static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664 struct of_dma *ofdma) 632 struct of_dma *ofdma)
665{ 633{
@@ -739,11 +707,16 @@ static int sirfsoc_dma_probe(struct platform_device *op)
739 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; 707 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
740 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; 708 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
741 dma->device_issue_pending = sirfsoc_dma_issue_pending; 709 dma->device_issue_pending = sirfsoc_dma_issue_pending;
742 dma->device_control = sirfsoc_dma_control; 710 dma->device_config = sirfsoc_dma_slave_config;
711 dma->device_pause = sirfsoc_dma_pause_chan;
712 dma->device_resume = sirfsoc_dma_resume_chan;
713 dma->device_terminate_all = sirfsoc_dma_terminate_all;
743 dma->device_tx_status = sirfsoc_dma_tx_status; 714 dma->device_tx_status = sirfsoc_dma_tx_status;
744 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; 715 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
745 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; 716 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
746 dma->device_slave_caps = sirfsoc_dma_device_slave_caps; 717 dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
718 dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
719 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
747 720
748 INIT_LIST_HEAD(&dma->channels); 721 INIT_LIST_HEAD(&dma->channels);
749 dma_cap_set(DMA_SLAVE, dma->cap_mask); 722 dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 15d49461c0d2..68aca3334a17 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c)
1429 return is_link; 1429 return is_link;
1430} 1430}
1431 1431
1432static int d40_pause(struct d40_chan *d40c) 1432static int d40_pause(struct dma_chan *chan)
1433{ 1433{
1434 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1434 int res = 0; 1435 int res = 0;
1435 unsigned long flags; 1436 unsigned long flags;
1436 1437
1438 if (d40c->phy_chan == NULL) {
1439 chan_err(d40c, "Channel is not allocated!\n");
1440 return -EINVAL;
1441 }
1442
1437 if (!d40c->busy) 1443 if (!d40c->busy)
1438 return 0; 1444 return 0;
1439 1445
@@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c)
1448 return res; 1454 return res;
1449} 1455}
1450 1456
1451static int d40_resume(struct d40_chan *d40c) 1457static int d40_resume(struct dma_chan *chan)
1452{ 1458{
1459 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
1453 int res = 0; 1460 int res = 0;
1454 unsigned long flags; 1461 unsigned long flags;
1455 1462
1463 if (d40c->phy_chan == NULL) {
1464 chan_err(d40c, "Channel is not allocated!\n");
1465 return -EINVAL;
1466 }
1467
1456 if (!d40c->busy) 1468 if (!d40c->busy)
1457 return 0; 1469 return 0;
1458 1470
@@ -2604,12 +2616,17 @@ static void d40_issue_pending(struct dma_chan *chan)
2604 spin_unlock_irqrestore(&d40c->lock, flags); 2616 spin_unlock_irqrestore(&d40c->lock, flags);
2605} 2617}
2606 2618
2607static void d40_terminate_all(struct dma_chan *chan) 2619static int d40_terminate_all(struct dma_chan *chan)
2608{ 2620{
2609 unsigned long flags; 2621 unsigned long flags;
2610 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2622 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2611 int ret; 2623 int ret;
2612 2624
2625 if (d40c->phy_chan == NULL) {
2626 chan_err(d40c, "Channel is not allocated!\n");
2627 return -EINVAL;
2628 }
2629
2613 spin_lock_irqsave(&d40c->lock, flags); 2630 spin_lock_irqsave(&d40c->lock, flags);
2614 2631
2615 pm_runtime_get_sync(d40c->base->dev); 2632 pm_runtime_get_sync(d40c->base->dev);
@@ -2627,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan)
2627 d40c->busy = false; 2644 d40c->busy = false;
2628 2645
2629 spin_unlock_irqrestore(&d40c->lock, flags); 2646 spin_unlock_irqrestore(&d40c->lock, flags);
2647 return 0;
2630} 2648}
2631 2649
2632static int 2650static int
@@ -2673,6 +2691,11 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2673 u32 src_maxburst, dst_maxburst; 2691 u32 src_maxburst, dst_maxburst;
2674 int ret; 2692 int ret;
2675 2693
2694 if (d40c->phy_chan == NULL) {
2695 chan_err(d40c, "Channel is not allocated!\n");
2696 return -EINVAL;
2697 }
2698
2676 src_addr_width = config->src_addr_width; 2699 src_addr_width = config->src_addr_width;
2677 src_maxburst = config->src_maxburst; 2700 src_maxburst = config->src_maxburst;
2678 dst_addr_width = config->dst_addr_width; 2701 dst_addr_width = config->dst_addr_width;
@@ -2781,35 +2804,6 @@ static int d40_set_runtime_config(struct dma_chan *chan,
2781 return 0; 2804 return 0;
2782} 2805}
2783 2806
2784static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2785 unsigned long arg)
2786{
2787 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2788
2789 if (d40c->phy_chan == NULL) {
2790 chan_err(d40c, "Channel is not allocated!\n");
2791 return -EINVAL;
2792 }
2793
2794 switch (cmd) {
2795 case DMA_TERMINATE_ALL:
2796 d40_terminate_all(chan);
2797 return 0;
2798 case DMA_PAUSE:
2799 return d40_pause(d40c);
2800 case DMA_RESUME:
2801 return d40_resume(d40c);
2802 case DMA_SLAVE_CONFIG:
2803 return d40_set_runtime_config(chan,
2804 (struct dma_slave_config *) arg);
2805 default:
2806 break;
2807 }
2808
2809 /* Other commands are unimplemented */
2810 return -ENXIO;
2811}
2812
2813/* Initialization functions */ 2807/* Initialization functions */
2814 2808
2815static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2809static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
@@ -2870,7 +2864,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2870 dev->device_free_chan_resources = d40_free_chan_resources; 2864 dev->device_free_chan_resources = d40_free_chan_resources;
2871 dev->device_issue_pending = d40_issue_pending; 2865 dev->device_issue_pending = d40_issue_pending;
2872 dev->device_tx_status = d40_tx_status; 2866 dev->device_tx_status = d40_tx_status;
2873 dev->device_control = d40_control; 2867 dev->device_config = d40_set_runtime_config;
2868 dev->device_pause = d40_pause;
2869 dev->device_resume = d40_resume;
2870 dev->device_terminate_all = d40_terminate_all;
2874 dev->dev = base->dev; 2871 dev->dev = base->dev;
2875} 2872}
2876 2873
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 159f1736a16f..7ebcf9bec698 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
355 kfree(txd); 355 kfree(txd);
356} 356}
357 357
358static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
359{
360 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
361 struct sun6i_pchan *pchan = vchan->phy;
362 unsigned long flags;
363 LIST_HEAD(head);
364
365 spin_lock(&sdev->lock);
366 list_del_init(&vchan->node);
367 spin_unlock(&sdev->lock);
368
369 spin_lock_irqsave(&vchan->vc.lock, flags);
370
371 vchan_get_all_descriptors(&vchan->vc, &head);
372
373 if (pchan) {
374 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
375 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
376
377 vchan->phy = NULL;
378 pchan->vchan = NULL;
379 pchan->desc = NULL;
380 pchan->done = NULL;
381 }
382
383 spin_unlock_irqrestore(&vchan->vc.lock, flags);
384
385 vchan_dma_desc_free_list(&vchan->vc, &head);
386
387 return 0;
388}
389
390static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) 358static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
391{ 359{
392 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); 360 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
@@ -675,57 +643,92 @@ err_lli_free:
675 return NULL; 643 return NULL;
676} 644}
677 645
678static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 646static int sun6i_dma_config(struct dma_chan *chan,
679 unsigned long arg) 647 struct dma_slave_config *config)
648{
649 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
650
651 memcpy(&vchan->cfg, config, sizeof(*config));
652
653 return 0;
654}
655
656static int sun6i_dma_pause(struct dma_chan *chan)
657{
658 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
659 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
660 struct sun6i_pchan *pchan = vchan->phy;
661
662 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
663
664 if (pchan) {
665 writel(DMA_CHAN_PAUSE_PAUSE,
666 pchan->base + DMA_CHAN_PAUSE);
667 } else {
668 spin_lock(&sdev->lock);
669 list_del_init(&vchan->node);
670 spin_unlock(&sdev->lock);
671 }
672
673 return 0;
674}
675
676static int sun6i_dma_resume(struct dma_chan *chan)
680{ 677{
681 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 678 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
682 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 679 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
683 struct sun6i_pchan *pchan = vchan->phy; 680 struct sun6i_pchan *pchan = vchan->phy;
684 unsigned long flags; 681 unsigned long flags;
685 int ret = 0;
686 682
687 switch (cmd) { 683 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
688 case DMA_RESUME:
689 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
690 684
691 spin_lock_irqsave(&vchan->vc.lock, flags); 685 spin_lock_irqsave(&vchan->vc.lock, flags);
692 686
693 if (pchan) { 687 if (pchan) {
694 writel(DMA_CHAN_PAUSE_RESUME, 688 writel(DMA_CHAN_PAUSE_RESUME,
695 pchan->base + DMA_CHAN_PAUSE); 689 pchan->base + DMA_CHAN_PAUSE);
696 } else if (!list_empty(&vchan->vc.desc_issued)) { 690 } else if (!list_empty(&vchan->vc.desc_issued)) {
697 spin_lock(&sdev->lock); 691 spin_lock(&sdev->lock);
698 list_add_tail(&vchan->node, &sdev->pending); 692 list_add_tail(&vchan->node, &sdev->pending);
699 spin_unlock(&sdev->lock); 693 spin_unlock(&sdev->lock);
700 } 694 }
701 695
702 spin_unlock_irqrestore(&vchan->vc.lock, flags); 696 spin_unlock_irqrestore(&vchan->vc.lock, flags);
703 break;
704 697
705 case DMA_PAUSE: 698 return 0;
706 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); 699}
707 700
708 if (pchan) { 701static int sun6i_dma_terminate_all(struct dma_chan *chan)
709 writel(DMA_CHAN_PAUSE_PAUSE, 702{
710 pchan->base + DMA_CHAN_PAUSE); 703 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
711 } else { 704 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
712 spin_lock(&sdev->lock); 705 struct sun6i_pchan *pchan = vchan->phy;
713 list_del_init(&vchan->node); 706 unsigned long flags;
714 spin_unlock(&sdev->lock); 707 LIST_HEAD(head);
715 } 708
716 break; 709 spin_lock(&sdev->lock);
717 710 list_del_init(&vchan->node);
718 case DMA_TERMINATE_ALL: 711 spin_unlock(&sdev->lock);
719 ret = sun6i_dma_terminate_all(vchan); 712
720 break; 713 spin_lock_irqsave(&vchan->vc.lock, flags);
721 case DMA_SLAVE_CONFIG: 714
722 memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); 715 vchan_get_all_descriptors(&vchan->vc, &head);
723 break; 716
724 default: 717 if (pchan) {
725 ret = -ENXIO; 718 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
726 break; 719 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
720
721 vchan->phy = NULL;
722 pchan->vchan = NULL;
723 pchan->desc = NULL;
724 pchan->done = NULL;
727 } 725 }
728 return ret; 726
727 spin_unlock_irqrestore(&vchan->vc.lock, flags);
728
729 vchan_dma_desc_free_list(&vchan->vc, &head);
730
731 return 0;
729} 732}
730 733
731static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, 734static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
@@ -960,9 +963,20 @@ static int sun6i_dma_probe(struct platform_device *pdev)
960 sdc->slave.device_issue_pending = sun6i_dma_issue_pending; 963 sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
961 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 964 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
962 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 965 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
963 sdc->slave.device_control = sun6i_dma_control;
964 sdc->slave.copy_align = 4; 966 sdc->slave.copy_align = 4;
965 967 sdc->slave.device_config = sun6i_dma_config;
968 sdc->slave.device_pause = sun6i_dma_pause;
969 sdc->slave.device_resume = sun6i_dma_resume;
970 sdc->slave.device_terminate_all = sun6i_dma_terminate_all;
971 sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
972 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
973 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
974 sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
975 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
976 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
977 sdc->slave.directions = BIT(DMA_DEV_TO_MEM) |
978 BIT(DMA_MEM_TO_DEV);
979 sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
966 sdc->slave.dev = &pdev->dev; 980 sdc->slave.dev = &pdev->dev;
967 981
968 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, 982 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d8450c3f35f0..eaf585e8286b 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -723,7 +723,7 @@ end:
723 return; 723 return;
724} 724}
725 725
726static void tegra_dma_terminate_all(struct dma_chan *dc) 726static int tegra_dma_terminate_all(struct dma_chan *dc)
727{ 727{
728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 728 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
729 struct tegra_dma_sg_req *sgreq; 729 struct tegra_dma_sg_req *sgreq;
@@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
736 spin_lock_irqsave(&tdc->lock, flags); 736 spin_lock_irqsave(&tdc->lock, flags);
737 if (list_empty(&tdc->pending_sg_req)) { 737 if (list_empty(&tdc->pending_sg_req)) {
738 spin_unlock_irqrestore(&tdc->lock, flags); 738 spin_unlock_irqrestore(&tdc->lock, flags);
739 return; 739 return 0;
740 } 740 }
741 741
742 if (!tdc->busy) 742 if (!tdc->busy)
@@ -777,6 +777,7 @@ skip_dma_stop:
777 dma_desc->cb_count = 0; 777 dma_desc->cb_count = 0;
778 } 778 }
779 spin_unlock_irqrestore(&tdc->lock, flags); 779 spin_unlock_irqrestore(&tdc->lock, flags);
780 return 0;
780} 781}
781 782
782static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 783static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
@@ -827,25 +828,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
827 return ret; 828 return ret;
828} 829}
829 830
830static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
831 unsigned long arg)
832{
833 switch (cmd) {
834 case DMA_SLAVE_CONFIG:
835 return tegra_dma_slave_config(dc,
836 (struct dma_slave_config *)arg);
837
838 case DMA_TERMINATE_ALL:
839 tegra_dma_terminate_all(dc);
840 return 0;
841
842 default:
843 break;
844 }
845
846 return -ENXIO;
847}
848
849static inline int get_bus_width(struct tegra_dma_channel *tdc, 831static inline int get_bus_width(struct tegra_dma_channel *tdc,
850 enum dma_slave_buswidth slave_bw) 832 enum dma_slave_buswidth slave_bw)
851{ 833{
@@ -1443,7 +1425,23 @@ static int tegra_dma_probe(struct platform_device *pdev)
1443 tegra_dma_free_chan_resources; 1425 tegra_dma_free_chan_resources;
1444 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1426 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1445 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1427 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1446 tdma->dma_dev.device_control = tegra_dma_device_control; 1428 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1429 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1430 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1431 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1432 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1433 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1434 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1435 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1436 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1437 /*
1438 * XXX The hardware appears to support
1439 * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's
1440 * only used by this driver during tegra_dma_terminate_all()
1441 */
1442 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1443 tdma->dma_dev.device_config = tegra_dma_slave_config;
1444 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1447 tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1445 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1448 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1446 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1449 1447
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 2407ccf1a64b..c4c3d93fdd1b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
561 return &td_desc->txd; 561 return &td_desc->txd;
562} 562}
563 563
564static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 564static int td_terminate_all(struct dma_chan *chan)
565 unsigned long arg)
566{ 565{
567 struct timb_dma_chan *td_chan = 566 struct timb_dma_chan *td_chan =
568 container_of(chan, struct timb_dma_chan, chan); 567 container_of(chan, struct timb_dma_chan, chan);
@@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
570 569
571 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 570 dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
572 571
573 if (cmd != DMA_TERMINATE_ALL)
574 return -ENXIO;
575
576 /* first the easy part, put the queue into the free list */ 572 /* first the easy part, put the queue into the free list */
577 spin_lock_bh(&td_chan->lock); 573 spin_lock_bh(&td_chan->lock);
578 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 574 list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
@@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev)
697 dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 693 dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
698 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 694 dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
699 td->dma.device_prep_slave_sg = td_prep_slave_sg; 695 td->dma.device_prep_slave_sg = td_prep_slave_sg;
700 td->dma.device_control = td_control; 696 td->dma.device_terminate_all = td_terminate_all;
701 697
702 td->dma.dev = &pdev->dev; 698 td->dma.dev = &pdev->dev;
703 699
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 0659ec9c4488..8849318b32b7 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
901 return &first->txd; 901 return &first->txd;
902} 902}
903 903
904static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 904static int txx9dmac_terminate_all(struct dma_chan *chan)
905 unsigned long arg)
906{ 905{
907 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 906 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
908 struct txx9dmac_desc *desc, *_desc; 907 struct txx9dmac_desc *desc, *_desc;
909 LIST_HEAD(list); 908 LIST_HEAD(list);
910 909
911 /* Only supports DMA_TERMINATE_ALL */
912 if (cmd != DMA_TERMINATE_ALL)
913 return -EINVAL;
914
915 dev_vdbg(chan2dev(chan), "terminate_all\n"); 910 dev_vdbg(chan2dev(chan), "terminate_all\n");
916 spin_lock_bh(&dc->lock); 911 spin_lock_bh(&dc->lock);
917 912
@@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
1109 dc->dma.dev = &pdev->dev; 1104 dc->dma.dev = &pdev->dev;
1110 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1105 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
1111 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1106 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
1112 dc->dma.device_control = txx9dmac_control; 1107 dc->dma.device_terminate_all = txx9dmac_terminate_all;
1113 dc->dma.device_tx_status = txx9dmac_tx_status; 1108 dc->dma.device_tx_status = txx9dmac_tx_status;
1114 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1109 dc->dma.device_issue_pending = txx9dmac_issue_pending;
1115 if (pdata && pdata->memcpy_chan == ch) { 1110 if (pdata && pdata->memcpy_chan == ch) {
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 4a3a8f3137b3..bdd2a5dd7220 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -1001,13 +1001,17 @@ error:
1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors 1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors
1002 * @chan: Driver specific VDMA Channel pointer 1002 * @chan: Driver specific VDMA Channel pointer
1003 */ 1003 */
1004static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) 1004static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
1005{ 1005{
1006 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1007
1006 /* Halt the DMA engine */ 1008 /* Halt the DMA engine */
1007 xilinx_vdma_halt(chan); 1009 xilinx_vdma_halt(chan);
1008 1010
1009 /* Remove and free all of the descriptors in the lists */ 1011 /* Remove and free all of the descriptors in the lists */
1010 xilinx_vdma_free_descriptors(chan); 1012 xilinx_vdma_free_descriptors(chan);
1013
1014 return 0;
1011} 1015}
1012 1016
1013/** 1017/**
@@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1075} 1079}
1076EXPORT_SYMBOL(xilinx_vdma_channel_set_config); 1080EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
1077 1081
1078/**
1079 * xilinx_vdma_device_control - Configure DMA channel of the device
1080 * @dchan: DMA Channel pointer
1081 * @cmd: DMA control command
1082 * @arg: Channel configuration
1083 *
1084 * Return: '0' on success and failure value on error
1085 */
1086static int xilinx_vdma_device_control(struct dma_chan *dchan,
1087 enum dma_ctrl_cmd cmd, unsigned long arg)
1088{
1089 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1090
1091 if (cmd != DMA_TERMINATE_ALL)
1092 return -ENXIO;
1093
1094 xilinx_vdma_terminate_all(chan);
1095
1096 return 0;
1097}
1098
1099/* ----------------------------------------------------------------------------- 1082/* -----------------------------------------------------------------------------
1100 * Probe and remove 1083 * Probe and remove
1101 */ 1084 */
@@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
1300 xilinx_vdma_free_chan_resources; 1283 xilinx_vdma_free_chan_resources;
1301 xdev->common.device_prep_interleaved_dma = 1284 xdev->common.device_prep_interleaved_dma =
1302 xilinx_vdma_dma_prep_interleaved; 1285 xilinx_vdma_dma_prep_interleaved;
1303 xdev->common.device_control = xilinx_vdma_device_control; 1286 xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
1304 xdev->common.device_tx_status = xilinx_vdma_tx_status; 1287 xdev->common.device_tx_status = xilinx_vdma_tx_status;
1305 xdev->common.device_issue_pending = xilinx_vdma_issue_pending; 1288 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1306 1289
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index f64c5decb747..47295940a868 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
815 return txd; 815 return txd;
816} 816}
817 817
818static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 818static int tsi721_terminate_all(struct dma_chan *dchan)
819 unsigned long arg)
820{ 819{
821 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); 820 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
822 struct tsi721_tx_desc *desc, *_d; 821 struct tsi721_tx_desc *desc, *_d;
@@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
825 824
826 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); 825 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
827 826
828 if (cmd != DMA_TERMINATE_ALL)
829 return -ENOSYS;
830
831 spin_lock_bh(&bdma_chan->lock); 827 spin_lock_bh(&bdma_chan->lock);
832 828
833 bdma_chan->active = false; 829 bdma_chan->active = false;
@@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
901 mport->dma.device_tx_status = tsi721_tx_status; 897 mport->dma.device_tx_status = tsi721_tx_status;
902 mport->dma.device_issue_pending = tsi721_issue_pending; 898 mport->dma.device_issue_pending = tsi721_issue_pending;
903 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; 899 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
904 mport->dma.device_control = tsi721_device_control; 900 mport->dma.device_terminate_all = tsi721_terminate_all;
905 901
906 err = dma_async_device_register(&mport->dma); 902 err = dma_async_device_register(&mport->dma);
907 if (err) 903 if (err)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 40cd75e21ea2..b6997a0cb528 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -189,25 +189,6 @@ enum dma_ctrl_flags {
189}; 189};
190 190
191/** 191/**
192 * enum dma_ctrl_cmd - DMA operations that can optionally be exercised
193 * on a running channel.
194 * @DMA_TERMINATE_ALL: terminate all ongoing transfers
195 * @DMA_PAUSE: pause ongoing transfers
196 * @DMA_RESUME: resume paused transfer
197 * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
198 * that need to runtime reconfigure the slave channels (as opposed to passing
199 * configuration data in statically from the platform). An additional
200 * argument of struct dma_slave_config must be passed in with this
201 * command.
202 */
203enum dma_ctrl_cmd {
204 DMA_TERMINATE_ALL,
205 DMA_PAUSE,
206 DMA_RESUME,
207 DMA_SLAVE_CONFIG,
208};
209
210/**
211 * enum sum_check_bits - bit position of pq_check_flags 192 * enum sum_check_bits - bit position of pq_check_flags
212 */ 193 */
213enum sum_check_bits { 194enum sum_check_bits {
@@ -298,6 +279,9 @@ enum dma_slave_buswidth {
298 DMA_SLAVE_BUSWIDTH_3_BYTES = 3, 279 DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
299 DMA_SLAVE_BUSWIDTH_4_BYTES = 4, 280 DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
300 DMA_SLAVE_BUSWIDTH_8_BYTES = 8, 281 DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
282 DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
283 DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
284 DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
301}; 285};
302 286
303/** 287/**
@@ -336,9 +320,8 @@ enum dma_slave_buswidth {
336 * This struct is passed in as configuration data to a DMA engine 320 * This struct is passed in as configuration data to a DMA engine
337 * in order to set up a certain channel for DMA transport at runtime. 321 * in order to set up a certain channel for DMA transport at runtime.
338 * The DMA device/engine has to provide support for an additional 322 * The DMA device/engine has to provide support for an additional
339 * command in the channel config interface, DMA_SLAVE_CONFIG 323 * callback in the dma_device structure, device_config and this struct
340 * and this struct will then be passed in as an argument to the 324 * will then be passed in as an argument to the function.
341 * DMA engine device_control() function.
342 * 325 *
343 * The rationale for adding configuration information to this struct is as 326 * The rationale for adding configuration information to this struct is as
344 * follows: if it is likely that more than one DMA slave controllers in 327 * follows: if it is likely that more than one DMA slave controllers in
@@ -387,7 +370,7 @@ enum dma_residue_granularity {
387/* struct dma_slave_caps - expose capabilities of a slave channel only 370/* struct dma_slave_caps - expose capabilities of a slave channel only
388 * 371 *
389 * @src_addr_widths: bit mask of src addr widths the channel supports 372 * @src_addr_widths: bit mask of src addr widths the channel supports
390 * @dstn_addr_widths: bit mask of dstn addr widths the channel supports 373 * @dst_addr_widths: bit mask of dstn addr widths the channel supports
391 * @directions: bit mask of slave direction the channel supported 374 * @directions: bit mask of slave direction the channel supported
392 * since the enum dma_transfer_direction is not defined as bits for each 375 * since the enum dma_transfer_direction is not defined as bits for each
393 * type of direction, the dma controller should fill (1 << <TYPE>) and same 376 * type of direction, the dma controller should fill (1 << <TYPE>) and same
@@ -398,7 +381,7 @@ enum dma_residue_granularity {
398 */ 381 */
399struct dma_slave_caps { 382struct dma_slave_caps {
400 u32 src_addr_widths; 383 u32 src_addr_widths;
401 u32 dstn_addr_widths; 384 u32 dst_addr_widths;
402 u32 directions; 385 u32 directions;
403 bool cmd_pause; 386 bool cmd_pause;
404 bool cmd_terminate; 387 bool cmd_terminate;
@@ -594,6 +577,14 @@ struct dma_tx_state {
594 * @fill_align: alignment shift for memset operations 577 * @fill_align: alignment shift for memset operations
595 * @dev_id: unique device ID 578 * @dev_id: unique device ID
596 * @dev: struct device reference for dma mapping api 579 * @dev: struct device reference for dma mapping api
580 * @src_addr_widths: bit mask of src addr widths the device supports
581 * @dst_addr_widths: bit mask of dst addr widths the device supports
582 * @directions: bit mask of slave direction the device supports since
583 * the enum dma_transfer_direction is not defined as bits for
584 * each type of direction, the dma controller should fill (1 <<
585 * <TYPE>) and same should be checked by controller as well
586 * @residue_granularity: granularity of the transfer residue reported
587 * by tx_status
597 * @device_alloc_chan_resources: allocate resources and return the 588 * @device_alloc_chan_resources: allocate resources and return the
598 * number of allocated descriptors 589 * number of allocated descriptors
599 * @device_free_chan_resources: release DMA channel's resources 590 * @device_free_chan_resources: release DMA channel's resources
@@ -608,14 +599,19 @@ struct dma_tx_state {
608 * The function takes a buffer of size buf_len. The callback function will 599 * The function takes a buffer of size buf_len. The callback function will
609 * be called after period_len bytes have been transferred. 600 * be called after period_len bytes have been transferred.
610 * @device_prep_interleaved_dma: Transfer expression in a generic way. 601 * @device_prep_interleaved_dma: Transfer expression in a generic way.
611 * @device_control: manipulate all pending operations on a channel, returns 602 * @device_config: Pushes a new configuration to a channel, return 0 or an error
612 * zero or error code 603 * code
604 * @device_pause: Pauses any transfer happening on a channel. Returns
605 * 0 or an error code
606 * @device_resume: Resumes any transfer on a channel previously
607 * paused. Returns 0 or an error code
608 * @device_terminate_all: Aborts all transfers on a channel. Returns 0
609 * or an error code
613 * @device_tx_status: poll for transaction completion, the optional 610 * @device_tx_status: poll for transaction completion, the optional
614 * txstate parameter can be supplied with a pointer to get a 611 * txstate parameter can be supplied with a pointer to get a
615 * struct with auxiliary transfer status information, otherwise the call 612 * struct with auxiliary transfer status information, otherwise the call
616 * will just return a simple status code 613 * will just return a simple status code
617 * @device_issue_pending: push pending transactions to hardware 614 * @device_issue_pending: push pending transactions to hardware
618 * @device_slave_caps: return the slave channel capabilities
619 */ 615 */
620struct dma_device { 616struct dma_device {
621 617
@@ -635,14 +631,19 @@ struct dma_device {
635 int dev_id; 631 int dev_id;
636 struct device *dev; 632 struct device *dev;
637 633
634 u32 src_addr_widths;
635 u32 dst_addr_widths;
636 u32 directions;
637 enum dma_residue_granularity residue_granularity;
638
638 int (*device_alloc_chan_resources)(struct dma_chan *chan); 639 int (*device_alloc_chan_resources)(struct dma_chan *chan);
639 void (*device_free_chan_resources)(struct dma_chan *chan); 640 void (*device_free_chan_resources)(struct dma_chan *chan);
640 641
641 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( 642 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
642 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 643 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
643 size_t len, unsigned long flags); 644 size_t len, unsigned long flags);
644 struct dma_async_tx_descriptor *(*device_prep_dma_xor)( 645 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
645 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 646 struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
646 unsigned int src_cnt, size_t len, unsigned long flags); 647 unsigned int src_cnt, size_t len, unsigned long flags);
647 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( 648 struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
648 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, 649 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
@@ -674,31 +675,26 @@ struct dma_device {
674 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( 675 struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
675 struct dma_chan *chan, struct dma_interleaved_template *xt, 676 struct dma_chan *chan, struct dma_interleaved_template *xt,
676 unsigned long flags); 677 unsigned long flags);
677 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 678
678 unsigned long arg); 679 int (*device_config)(struct dma_chan *chan,
680 struct dma_slave_config *config);
681 int (*device_pause)(struct dma_chan *chan);
682 int (*device_resume)(struct dma_chan *chan);
683 int (*device_terminate_all)(struct dma_chan *chan);
679 684
680 enum dma_status (*device_tx_status)(struct dma_chan *chan, 685 enum dma_status (*device_tx_status)(struct dma_chan *chan,
681 dma_cookie_t cookie, 686 dma_cookie_t cookie,
682 struct dma_tx_state *txstate); 687 struct dma_tx_state *txstate);
683 void (*device_issue_pending)(struct dma_chan *chan); 688 void (*device_issue_pending)(struct dma_chan *chan);
684 int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
685}; 689};
686 690
687static inline int dmaengine_device_control(struct dma_chan *chan,
688 enum dma_ctrl_cmd cmd,
689 unsigned long arg)
690{
691 if (chan->device->device_control)
692 return chan->device->device_control(chan, cmd, arg);
693
694 return -ENOSYS;
695}
696
697static inline int dmaengine_slave_config(struct dma_chan *chan, 691static inline int dmaengine_slave_config(struct dma_chan *chan,
698 struct dma_slave_config *config) 692 struct dma_slave_config *config)
699{ 693{
700 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 694 if (chan->device->device_config)
701 (unsigned long)config); 695 return chan->device->device_config(chan, config);
696
697 return -ENOSYS;
702} 698}
703 699
704static inline bool is_slave_direction(enum dma_transfer_direction direction) 700static inline bool is_slave_direction(enum dma_transfer_direction direction)
@@ -765,34 +761,28 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
765 src_sg, src_nents, flags); 761 src_sg, src_nents, flags);
766} 762}
767 763
768static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
769{
770 if (!chan || !caps)
771 return -EINVAL;
772
773 /* check if the channel supports slave transactions */
774 if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
775 return -ENXIO;
776
777 if (chan->device->device_slave_caps)
778 return chan->device->device_slave_caps(chan, caps);
779
780 return -ENXIO;
781}
782
783static inline int dmaengine_terminate_all(struct dma_chan *chan) 764static inline int dmaengine_terminate_all(struct dma_chan *chan)
784{ 765{
785 return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 766 if (chan->device->device_terminate_all)
767 return chan->device->device_terminate_all(chan);
768
769 return -ENOSYS;
786} 770}
787 771
788static inline int dmaengine_pause(struct dma_chan *chan) 772static inline int dmaengine_pause(struct dma_chan *chan)
789{ 773{
790 return dmaengine_device_control(chan, DMA_PAUSE, 0); 774 if (chan->device->device_pause)
775 return chan->device->device_pause(chan);
776
777 return -ENOSYS;
791} 778}
792 779
793static inline int dmaengine_resume(struct dma_chan *chan) 780static inline int dmaengine_resume(struct dma_chan *chan)
794{ 781{
795 return dmaengine_device_control(chan, DMA_RESUME, 0); 782 if (chan->device->device_resume)
783 return chan->device->device_resume(chan);
784
785 return -ENOSYS;
796} 786}
797 787
798static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, 788static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
@@ -1059,6 +1049,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
1059 const char *name); 1049 const char *name);
1060struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1050struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
1061void dma_release_channel(struct dma_chan *chan); 1051void dma_release_channel(struct dma_chan *chan);
1052int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
1062#else 1053#else
1063static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) 1054static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
1064{ 1055{
@@ -1093,6 +1084,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
1093static inline void dma_release_channel(struct dma_chan *chan) 1084static inline void dma_release_channel(struct dma_chan *chan)
1094{ 1085{
1095} 1086}
1087static inline int dma_get_slave_caps(struct dma_chan *chan,
1088 struct dma_slave_caps *caps)
1089{
1090 return -ENXIO;
1091}
1096#endif 1092#endif
1097 1093
1098/* --- DMA device --- */ 1094/* --- DMA device --- */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index d8155c005242..87ac14c584f2 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -13,10 +13,12 @@
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15 15
16#define DW_DMA_MAX_NR_MASTERS 4
17
16/** 18/**
17 * struct dw_dma_slave - Controller-specific information about a slave 19 * struct dw_dma_slave - Controller-specific information about a slave
18 * 20 *
19 * @dma_dev: required DMA master device. Depricated. 21 * @dma_dev: required DMA master device
20 * @src_id: src request line 22 * @src_id: src request line
21 * @dst_id: dst request line 23 * @dst_id: dst request line
22 * @src_master: src master for transfers on allocated channel. 24 * @src_master: src master for transfers on allocated channel.
@@ -53,7 +55,7 @@ struct dw_dma_platform_data {
53 unsigned char chan_priority; 55 unsigned char chan_priority;
54 unsigned short block_size; 56 unsigned short block_size;
55 unsigned char nr_masters; 57 unsigned char nr_masters;
56 unsigned char data_width[4]; 58 unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
57}; 59};
58 60
59#endif /* _PLATFORM_DATA_DMA_DW_H */ 61#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 66574ea39f97..0c72886030ef 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -28,6 +28,13 @@ struct sram_platdata {
28 int granularity; 28 int granularity;
29}; 29};
30 30
31#ifdef CONFIG_ARM
31extern struct gen_pool *sram_get_gpool(char *pool_name); 32extern struct gen_pool *sram_get_gpool(char *pool_name);
33#else
34static inline struct gen_pool *sram_get_gpool(char *pool_name)
35{
36 return NULL;
37}
38#endif
32 39
33#endif /* __DMA_MMP_TDMA_H */ 40#endif /* __DMA_MMP_TDMA_H */
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index 4864392bfcba..c9917ca5de1a 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -151,7 +151,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea
151 hw.info |= SNDRV_PCM_INFO_BATCH; 151 hw.info |= SNDRV_PCM_INFO_BATCH;
152 152
153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
154 addr_widths = dma_caps.dstn_addr_widths; 154 addr_widths = dma_caps.dst_addr_widths;
155 else 155 else
156 addr_widths = dma_caps.src_addr_widths; 156 addr_widths = dma_caps.src_addr_widths;
157 } 157 }