aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 23:27:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 23:27:23 -0500
commitca2a650f3dfdc30d71d21bcbb04d2d057779f3f9 (patch)
tree12e5f7f4dea5ba17cc82f2c633bbe9dbf725fb11
parente9e352e9100b98aed1a5fb9e33355c29fb07d5b1 (diff)
parent15cec530e4bc7bed3f51cde8404f96fd28a8c7c5 (diff)
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dma updates from Vinod Koul: - new driver for BCM2835 used in R-pi - new driver for MOXA ART - dma_get_any_slave_channel API for DT based systems - minor fixes and updates spread acrooss driver [ The fsl-ssi dual fifo mode support addition clashed badly with the other changes to fsl-ssi that came in through the sound merge. I did a very rough cut at fixing up the conflict, but Nicolin Chen (author of both sides) will need to verify and check things ] * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (36 commits) dmaengine: mmp_pdma: fix mismerge dma: pl08x: Export pl08x_filter_id acpi-dma: align documentation with kernel-doc format dma: fix vchan_cookie_complete() debug print DMA: dmatest: extend the "device" module parameter to 32 characters drivers/dma: fix error return code dma: omap: Set debug level to debugging messages dmaengine: fix kernel-doc style typos for few comments dma: tegra: add support for Tegra148/124 dma: dw: use %pad instead of casting dma_addr_t dma: dw: join split up messages dma: dw: fix style of multiline comment dmaengine: k3dma: fix sparse warnings dma: pl330: Use dma_get_slave_channel() in the of xlate callback dma: pl330: Differentiate between submitted and issued descriptors dmaengine: sirf: Add device_slave_caps interface DMA: Freescale: change BWC from 256 bytes to 1024 bytes dmaengine: Add MOXA ART DMA engine driver dmaengine: Add DMA_PRIVATE to BCM2835 driver dma: imx-sdma: Assign a default script number for ROM firmware cases ...
-rw-r--r--Documentation/devicetree/bindings/dma/bcm2835-dma.txt57
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt45
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c36
-rw-r--r--drivers/dma/amba-pl08x.c4
-rw-r--r--drivers/dma/bcm2835-dma.c707
-rw-r--r--drivers/dma/cppi41.c4
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c35
-rw-r--r--drivers/dma/edma.c6
-rw-r--r--drivers/dma/fsldma.h2
-rw-r--r--drivers/dma/imx-sdma.c23
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c197
-rw-r--r--drivers/dma/mmp_tdma.c28
-rw-r--r--drivers/dma/moxart-dma.c699
-rw-r--r--drivers/dma/omap-dma.c4
-rw-r--r--drivers/dma/pl330.c65
-rw-r--r--drivers/dma/ppc4xx/adma.c1
-rw-r--r--drivers/dma/sirf-dma.c20
-rw-r--r--drivers/dma/tegra20-apb-dma.c62
-rw-r--r--drivers/dma/virt-dma.h4
-rw-r--r--include/linux/dmaengine.h15
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h5
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h8
-rw-r--r--include/linux/platform_data/dma-mv_xor.h6
-rw-r--r--sound/soc/fsl/fsl_ssi.c28
30 files changed, 1885 insertions, 202 deletions
diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
new file mode 100644
index 000000000000..1396078d15ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
@@ -0,0 +1,57 @@
1* BCM2835 DMA controller
2
3The BCM2835 DMA controller has 16 channels in total.
4Only the lower 13 channels have an associated IRQ.
5Some arbitrary channels are used by the firmware
6(1,3,6,7 in the current firmware version).
7The channels 0,2 and 3 have special functionality
8and should not be used by the driver.
9
10Required properties:
11- compatible: Should be "brcm,bcm2835-dma".
12- reg: Should contain DMA registers location and length.
13- interrupts: Should contain the DMA interrupts associated
14 to the DMA channels in ascending order.
15- #dma-cells: Must be <1>, the cell in the dmas property of the
16 client device represents the DREQ number.
17- brcm,dma-channel-mask: Bit mask representing the channels
18 not used by the firmware in ascending order,
19 i.e. first channel corresponds to LSB.
20
21Example:
22
23dma: dma@7e007000 {
24 compatible = "brcm,bcm2835-dma";
25 reg = <0x7e007000 0xf00>;
26 interrupts = <1 16>,
27 <1 17>,
28 <1 18>,
29 <1 19>,
30 <1 20>,
31 <1 21>,
32 <1 22>,
33 <1 23>,
34 <1 24>,
35 <1 25>,
36 <1 26>,
37 <1 27>,
38 <1 28>;
39
40 #dma-cells = <1>;
41 brcm,dma-channel-mask = <0x7f35>;
42};
43
44DMA clients connected to the BCM2835 DMA controller must use the format
45described in the dma.txt file, using a two-cell specifier for each channel.
46
47Example:
48
49bcm2835_i2s: i2s@7e203000 {
50 compatible = "brcm,bcm2835-i2s";
51 reg = < 0x7e203000 0x20>,
52 < 0x7e101098 0x02>;
53
54 dmas = <&dma 2>,
55 <&dma 3>;
56 dma-names = "tx", "rx";
57};
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 4fa814d38321..68b83ecc3850 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -42,6 +42,7 @@ The full ID of peripheral types can be found below.
42 19 IPU Memory 42 19 IPU Memory
43 20 ASRC 43 20 ASRC
44 21 ESAI 44 21 ESAI
45 22 SSI Dual FIFO (needs firmware ver >= 2)
45 46
46The third cell specifies the transfer priority as below. 47The third cell specifies the transfer priority as below.
47 48
diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644
index 000000000000..8a9f3559335b
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
@@ -0,0 +1,45 @@
1MOXA ART DMA Controller
2
3See dma.txt first
4
5Required properties:
6
7- compatible : Must be "moxa,moxart-dma"
8- reg : Should contain registers location and length
9- interrupts : Should contain an interrupt-specifier for the sole
10 interrupt generated by the device
11- #dma-cells : Should be 1, a single cell holding a line request number
12
13Example:
14
15 dma: dma@90500000 {
16 compatible = "moxa,moxart-dma";
17 reg = <0x90500080 0x40>;
18 interrupts = <24 0>;
19 #dma-cells = <1>;
20 };
21
22
23Clients:
24
25DMA clients connected to the MOXA ART DMA controller must use the format
26described in the dma.txt file, using a two-cell specifier for each channel:
27a phandle plus one integer cells.
28The two cells in order are:
29
301. A phandle pointing to the DMA controller.
312. Peripheral identifier for the hardware handshaking interface.
32
33Example:
34Use specific request line passing from dma
35For example, MMC request line is 5
36
37 sdhci: sdhci@98e00000 {
38 compatible = "moxa,moxart-sdhci";
39 reg = <0x98e00000 0x5C>;
40 interrupts = <5 0>;
41 clocks = <&clk_apb>;
42 dmas = <&dma 5>,
43 <&dma 5>;
44 dma-names = "tx", "rx";
45 };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c10eb89a3c1b..9bed1a2a67a1 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -306,6 +306,12 @@ config DMA_OMAP
306 select DMA_ENGINE 306 select DMA_ENGINE
307 select DMA_VIRTUAL_CHANNELS 307 select DMA_VIRTUAL_CHANNELS
308 308
309config DMA_BCM2835
310 tristate "BCM2835 DMA engine support"
311 depends on (ARCH_BCM2835 || MACH_BCM2708)
312 select DMA_ENGINE
313 select DMA_VIRTUAL_CHANNELS
314
309config TI_CPPI41 315config TI_CPPI41
310 tristate "AM33xx CPPI41 DMA support" 316 tristate "AM33xx CPPI41 DMA support"
311 depends on ARCH_OMAP 317 depends on ARCH_OMAP
@@ -336,6 +342,14 @@ config K3_DMA
336 Support the DMA engine for Hisilicon K3 platform 342 Support the DMA engine for Hisilicon K3 platform
337 devices. 343 devices.
338 344
345config MOXART_DMA
346 tristate "MOXART DMA support"
347 depends on ARCH_MOXART
348 select DMA_ENGINE
349 select DMA_VIRTUAL_CHANNELS
350 help
351 Enable support for the MOXA ART SoC DMA controller.
352
339config DMA_ENGINE 353config DMA_ENGINE
340 bool 354 bool
341 355
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0ce2da97e429..a029d0f4a1be 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -38,7 +38,9 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
38obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 38obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
39obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o 39obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
40obj-$(CONFIG_DMA_OMAP) += omap-dma.o 40obj-$(CONFIG_DMA_OMAP) += omap-dma.o
41obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
41obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o 42obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
42obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o 43obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
43obj-$(CONFIG_TI_CPPI41) += cppi41.o 44obj-$(CONFIG_TI_CPPI41) += cppi41.o
44obj-$(CONFIG_K3_DMA) += k3dma.o 45obj-$(CONFIG_K3_DMA) += k3dma.o
46obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index e69b03c0fa50..1e506afa33f5 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock);
30 * @adev: ACPI device to match with 30 * @adev: ACPI device to match with
31 * @adma: struct acpi_dma of the given DMA controller 31 * @adma: struct acpi_dma of the given DMA controller
32 * 32 *
33 * Returns 1 on success, 0 when no information is available, or appropriate
34 * errno value on error.
35 *
36 * In order to match a device from DSDT table to the corresponding CSRT device 33 * In order to match a device from DSDT table to the corresponding CSRT device
37 * we use MMIO address and IRQ. 34 * we use MMIO address and IRQ.
35 *
36 * Return:
37 * 1 on success, 0 when no information is available, or appropriate errno value
38 * on error.
38 */ 39 */
39static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, 40static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
40 struct acpi_device *adev, struct acpi_dma *adma) 41 struct acpi_device *adev, struct acpi_dma *adma)
@@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
101 * 102 *
102 * We are using this table to get the request line range of the specific DMA 103 * We are using this table to get the request line range of the specific DMA
103 * controller to be used later. 104 * controller to be used later.
104 *
105 */ 105 */
106static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) 106static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
107{ 107{
@@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
141 * @data pointer to controller specific data to be used by 141 * @data pointer to controller specific data to be used by
142 * translation function 142 * translation function
143 * 143 *
144 * Returns 0 on success or appropriate errno value on error.
145 *
146 * Allocated memory should be freed with appropriate acpi_dma_controller_free() 144 * Allocated memory should be freed with appropriate acpi_dma_controller_free()
147 * call. 145 * call.
146 *
147 * Return:
148 * 0 on success or appropriate errno value on error.
148 */ 149 */
149int acpi_dma_controller_register(struct device *dev, 150int acpi_dma_controller_register(struct device *dev,
150 struct dma_chan *(*acpi_dma_xlate) 151 struct dma_chan *(*acpi_dma_xlate)
@@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
188 * @dev: struct device of DMA controller 189 * @dev: struct device of DMA controller
189 * 190 *
190 * Memory allocated by acpi_dma_controller_register() is freed here. 191 * Memory allocated by acpi_dma_controller_register() is freed here.
192 *
193 * Return:
194 * 0 on success or appropriate errno value on error.
191 */ 195 */
192int acpi_dma_controller_free(struct device *dev) 196int acpi_dma_controller_free(struct device *dev)
193{ 197{
@@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
225 * Managed acpi_dma_controller_register(). DMA controller registered by this 229 * Managed acpi_dma_controller_register(). DMA controller registered by this
226 * function are automatically freed on driver detach. See 230 * function are automatically freed on driver detach. See
227 * acpi_dma_controller_register() for more information. 231 * acpi_dma_controller_register() for more information.
232 *
233 * Return:
234 * 0 on success or appropriate errno value on error.
228 */ 235 */
229int devm_acpi_dma_controller_register(struct device *dev, 236int devm_acpi_dma_controller_register(struct device *dev,
230 struct dma_chan *(*acpi_dma_xlate) 237 struct dma_chan *(*acpi_dma_xlate)
@@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
267 * @adma: struct acpi_dma of DMA controller 274 * @adma: struct acpi_dma of DMA controller
268 * @dma_spec: dma specifier to update 275 * @dma_spec: dma specifier to update
269 * 276 *
270 * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
271 *
272 * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource 277 * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
273 * Descriptor": 278 * Descriptor":
274 * DMA Request Line bits is a platform-relative number uniquely 279 * DMA Request Line bits is a platform-relative number uniquely
@@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
276 * mapping is done in a controller-specific OS driver. 281 * mapping is done in a controller-specific OS driver.
277 * That's why we can safely adjust slave_id when the appropriate controller is 282 * That's why we can safely adjust slave_id when the appropriate controller is
278 * found. 283 * found.
284 *
285 * Return:
286 * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
279 */ 287 */
280static int acpi_dma_update_dma_spec(struct acpi_dma *adma, 288static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
281 struct acpi_dma_spec *dma_spec) 289 struct acpi_dma_spec *dma_spec)
@@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
334 * @dev: struct device to get DMA request from 342 * @dev: struct device to get DMA request from
335 * @index: index of FixedDMA descriptor for @dev 343 * @index: index of FixedDMA descriptor for @dev
336 * 344 *
337 * Returns pointer to appropriate dma channel on success or NULL on error. 345 * Return:
346 * Pointer to appropriate dma channel on success or NULL on error.
338 */ 347 */
339struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, 348struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
340 size_t index) 349 size_t index)
@@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
403 * translate the names "tx" and "rx" here based on the most common case where 412 * translate the names "tx" and "rx" here based on the most common case where
404 * the first FixedDMA descriptor is TX and second is RX. 413 * the first FixedDMA descriptor is TX and second is RX.
405 * 414 *
406 * Returns pointer to appropriate dma channel on success or NULL on error. 415 * Return:
416 * Pointer to appropriate dma channel on success or NULL on error.
407 */ 417 */
408struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, 418struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
409 const char *name) 419 const char *name)
@@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
427 * @adma: pointer to ACPI DMA controller data 437 * @adma: pointer to ACPI DMA controller data
428 * 438 *
429 * A simple translation function for ACPI based devices. Passes &struct 439 * A simple translation function for ACPI based devices. Passes &struct
430 * dma_spec to the DMA controller driver provided filter function. Returns 440 * dma_spec to the DMA controller driver provided filter function.
431 * pointer to the channel if found or %NULL otherwise. 441 *
442 * Return:
443 * Pointer to the channel if found or %NULL otherwise.
432 */ 444 */
433struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, 445struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
434 struct acpi_dma *adma) 446 struct acpi_dma *adma)
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index ec4ee5c1fe9d..8114731a1c62 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,7 @@
83#include <linux/dmaengine.h> 83#include <linux/dmaengine.h>
84#include <linux/dmapool.h> 84#include <linux/dmapool.h>
85#include <linux/dma-mapping.h> 85#include <linux/dma-mapping.h>
86#include <linux/export.h>
86#include <linux/init.h> 87#include <linux/init.h>
87#include <linux/interrupt.h> 88#include <linux/interrupt.h>
88#include <linux/module.h> 89#include <linux/module.h>
@@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1771 1772
1772 return false; 1773 return false;
1773} 1774}
1775EXPORT_SYMBOL_GPL(pl08x_filter_id);
1774 1776
1775/* 1777/*
1776 * Just check that the device is there and active 1778 * Just check that the device is there and active
@@ -2167,7 +2169,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2167 /* Register slave channels */ 2169 /* Register slave channels */
2168 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2170 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2169 pl08x->pd->num_slave_channels, true); 2171 pl08x->pd->num_slave_channels, true);
2170 if (ret <= 0) { 2172 if (ret < 0) {
2171 dev_warn(&pl08x->adev->dev, 2173 dev_warn(&pl08x->adev->dev,
2172 "%s failed to enumerate slave channels - %d\n", 2174 "%s failed to enumerate slave channels - %d\n",
2173 __func__, ret); 2175 __func__, ret);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644
index 000000000000..a03602164e3e
--- /dev/null
+++ b/drivers/dma/bcm2835-dma.c
@@ -0,0 +1,707 @@
1/*
2 * BCM2835 DMA engine support
3 *
4 * This driver only supports cyclic DMA transfers
5 * as needed for the I2S module.
6 *
7 * Author: Florian Meier <florian.meier@koalo.de>
8 * Copyright 2013
9 *
10 * Based on
11 * OMAP DMAengine support by Russell King
12 *
13 * BCM2708 DMA Driver
14 * Copyright (C) 2010 Broadcom
15 *
16 * Raspberry Pi PCM I2S ALSA Driver
17 * Copyright (c) by Phil Poole 2013
18 *
19 * MARVELL MMP Peripheral DMA Driver
20 * Copyright 2012 Marvell International Ltd.
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 */
32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/list.h>
38#include <linux/module.h>
39#include <linux/platform_device.h>
40#include <linux/slab.h>
41#include <linux/io.h>
42#include <linux/spinlock.h>
43#include <linux/of.h>
44#include <linux/of_dma.h>
45
46#include "virt-dma.h"
47
48struct bcm2835_dmadev {
49 struct dma_device ddev;
50 spinlock_t lock;
51 void __iomem *base;
52 struct device_dma_parameters dma_parms;
53};
54
55struct bcm2835_dma_cb {
56 uint32_t info;
57 uint32_t src;
58 uint32_t dst;
59 uint32_t length;
60 uint32_t stride;
61 uint32_t next;
62 uint32_t pad[2];
63};
64
65struct bcm2835_chan {
66 struct virt_dma_chan vc;
67 struct list_head node;
68
69 struct dma_slave_config cfg;
70 bool cyclic;
71 unsigned int dreq;
72
73 int ch;
74 struct bcm2835_desc *desc;
75
76 void __iomem *chan_base;
77 int irq_number;
78};
79
80struct bcm2835_desc {
81 struct virt_dma_desc vd;
82 enum dma_transfer_direction dir;
83
84 unsigned int control_block_size;
85 struct bcm2835_dma_cb *control_block_base;
86 dma_addr_t control_block_base_phys;
87
88 unsigned int frames;
89 size_t size;
90};
91
92#define BCM2835_DMA_CS 0x00
93#define BCM2835_DMA_ADDR 0x04
94#define BCM2835_DMA_SOURCE_AD 0x0c
95#define BCM2835_DMA_DEST_AD 0x10
96#define BCM2835_DMA_NEXTCB 0x1C
97
98/* DMA CS Control and Status bits */
99#define BCM2835_DMA_ACTIVE BIT(0)
100#define BCM2835_DMA_INT BIT(2)
101#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
102#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
103#define BCM2835_DMA_ERR BIT(8)
104#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
105#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
106
107#define BCM2835_DMA_INT_EN BIT(0)
108#define BCM2835_DMA_D_INC BIT(4)
109#define BCM2835_DMA_D_DREQ BIT(6)
110#define BCM2835_DMA_S_INC BIT(8)
111#define BCM2835_DMA_S_DREQ BIT(10)
112
113#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
114
115#define BCM2835_DMA_DATA_TYPE_S8 1
116#define BCM2835_DMA_DATA_TYPE_S16 2
117#define BCM2835_DMA_DATA_TYPE_S32 4
118#define BCM2835_DMA_DATA_TYPE_S128 16
119
120#define BCM2835_DMA_BULK_MASK BIT(0)
121#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3))
122
123/* Valid only for channels 0 - 14, 15 has its own base address */
124#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
125#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
126
127static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
128{
129 return container_of(d, struct bcm2835_dmadev, ddev);
130}
131
132static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
133{
134 return container_of(c, struct bcm2835_chan, vc.chan);
135}
136
137static inline struct bcm2835_desc *to_bcm2835_dma_desc(
138 struct dma_async_tx_descriptor *t)
139{
140 return container_of(t, struct bcm2835_desc, vd.tx);
141}
142
143static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
144{
145 struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
146 dma_free_coherent(desc->vd.tx.chan->device->dev,
147 desc->control_block_size,
148 desc->control_block_base,
149 desc->control_block_base_phys);
150 kfree(desc);
151}
152
153static int bcm2835_dma_abort(void __iomem *chan_base)
154{
155 unsigned long cs;
156 long int timeout = 10000;
157
158 cs = readl(chan_base + BCM2835_DMA_CS);
159 if (!(cs & BCM2835_DMA_ACTIVE))
160 return 0;
161
162 /* Write 0 to the active bit - Pause the DMA */
163 writel(0, chan_base + BCM2835_DMA_CS);
164
165 /* Wait for any current AXI transfer to complete */
166 while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
167 cpu_relax();
168 cs = readl(chan_base + BCM2835_DMA_CS);
169 }
170
171 /* We'll un-pause when we set of our next DMA */
172 if (!timeout)
173 return -ETIMEDOUT;
174
175 if (!(cs & BCM2835_DMA_ACTIVE))
176 return 0;
177
178 /* Terminate the control block chain */
179 writel(0, chan_base + BCM2835_DMA_NEXTCB);
180
181 /* Abort the whole DMA */
182 writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
183 chan_base + BCM2835_DMA_CS);
184
185 return 0;
186}
187
188static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
189{
190 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
191 struct bcm2835_desc *d;
192
193 if (!vd) {
194 c->desc = NULL;
195 return;
196 }
197
198 list_del(&vd->node);
199
200 c->desc = d = to_bcm2835_dma_desc(&vd->tx);
201
202 writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
203 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
204}
205
206static irqreturn_t bcm2835_dma_callback(int irq, void *data)
207{
208 struct bcm2835_chan *c = data;
209 struct bcm2835_desc *d;
210 unsigned long flags;
211
212 spin_lock_irqsave(&c->vc.lock, flags);
213
214 /* Acknowledge interrupt */
215 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
216
217 d = c->desc;
218
219 if (d) {
220 /* TODO Only works for cyclic DMA */
221 vchan_cyclic_callback(&d->vd);
222 }
223
224 /* Keep the DMA engine running */
225 writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
226
227 spin_unlock_irqrestore(&c->vc.lock, flags);
228
229 return IRQ_HANDLED;
230}
231
232static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
233{
234 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
235
236 dev_dbg(c->vc.chan.device->dev,
237 "Allocating DMA channel %d\n", c->ch);
238
239 return request_irq(c->irq_number,
240 bcm2835_dma_callback, 0, "DMA IRQ", c);
241}
242
243static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
244{
245 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
246
247 vchan_free_chan_resources(&c->vc);
248 free_irq(c->irq_number, c);
249
250 dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
251}
252
253static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
254{
255 return d->size;
256}
257
258static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
259{
260 unsigned int i;
261 size_t size;
262
263 for (size = i = 0; i < d->frames; i++) {
264 struct bcm2835_dma_cb *control_block =
265 &d->control_block_base[i];
266 size_t this_size = control_block->length;
267 dma_addr_t dma;
268
269 if (d->dir == DMA_DEV_TO_MEM)
270 dma = control_block->dst;
271 else
272 dma = control_block->src;
273
274 if (size)
275 size += this_size;
276 else if (addr >= dma && addr < dma + this_size)
277 size += dma + this_size - addr;
278 }
279
280 return size;
281}
282
283static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
284 dma_cookie_t cookie, struct dma_tx_state *txstate)
285{
286 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
287 struct virt_dma_desc *vd;
288 enum dma_status ret;
289 unsigned long flags;
290
291 ret = dma_cookie_status(chan, cookie, txstate);
292 if (ret == DMA_COMPLETE || !txstate)
293 return ret;
294
295 spin_lock_irqsave(&c->vc.lock, flags);
296 vd = vchan_find_desc(&c->vc, cookie);
297 if (vd) {
298 txstate->residue =
299 bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
300 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
301 struct bcm2835_desc *d = c->desc;
302 dma_addr_t pos;
303
304 if (d->dir == DMA_MEM_TO_DEV)
305 pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
306 else if (d->dir == DMA_DEV_TO_MEM)
307 pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
308 else
309 pos = 0;
310
311 txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
312 } else {
313 txstate->residue = 0;
314 }
315
316 spin_unlock_irqrestore(&c->vc.lock, flags);
317
318 return ret;
319}
320
321static void bcm2835_dma_issue_pending(struct dma_chan *chan)
322{
323 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
324 unsigned long flags;
325
326 c->cyclic = true; /* Nothing else is implemented */
327
328 spin_lock_irqsave(&c->vc.lock, flags);
329 if (vchan_issue_pending(&c->vc) && !c->desc)
330 bcm2835_dma_start_desc(c);
331
332 spin_unlock_irqrestore(&c->vc.lock, flags);
333}
334
335static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
336 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
337 size_t period_len, enum dma_transfer_direction direction,
338 unsigned long flags, void *context)
339{
340 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
341 enum dma_slave_buswidth dev_width;
342 struct bcm2835_desc *d;
343 dma_addr_t dev_addr;
344 unsigned int es, sync_type;
345 unsigned int frame;
346
347 /* Grab configuration */
348 if (!is_slave_direction(direction)) {
349 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
350 return NULL;
351 }
352
353 if (direction == DMA_DEV_TO_MEM) {
354 dev_addr = c->cfg.src_addr;
355 dev_width = c->cfg.src_addr_width;
356 sync_type = BCM2835_DMA_S_DREQ;
357 } else {
358 dev_addr = c->cfg.dst_addr;
359 dev_width = c->cfg.dst_addr_width;
360 sync_type = BCM2835_DMA_D_DREQ;
361 }
362
363 /* Bus width translates to the element size (ES) */
364 switch (dev_width) {
365 case DMA_SLAVE_BUSWIDTH_4_BYTES:
366 es = BCM2835_DMA_DATA_TYPE_S32;
367 break;
368 default:
369 return NULL;
370 }
371
372 /* Now allocate and setup the descriptor. */
373 d = kzalloc(sizeof(*d), GFP_NOWAIT);
374 if (!d)
375 return NULL;
376
377 d->dir = direction;
378 d->frames = buf_len / period_len;
379
380 /* Allocate memory for control blocks */
381 d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
382 d->control_block_base = dma_zalloc_coherent(chan->device->dev,
383 d->control_block_size, &d->control_block_base_phys,
384 GFP_NOWAIT);
385
386 if (!d->control_block_base) {
387 kfree(d);
388 return NULL;
389 }
390
391 /*
392 * Iterate over all frames, create a control block
393 * for each frame and link them together.
394 */
395 for (frame = 0; frame < d->frames; frame++) {
396 struct bcm2835_dma_cb *control_block =
397 &d->control_block_base[frame];
398
399 /* Setup adresses */
400 if (d->dir == DMA_DEV_TO_MEM) {
401 control_block->info = BCM2835_DMA_D_INC;
402 control_block->src = dev_addr;
403 control_block->dst = buf_addr + frame * period_len;
404 } else {
405 control_block->info = BCM2835_DMA_S_INC;
406 control_block->src = buf_addr + frame * period_len;
407 control_block->dst = dev_addr;
408 }
409
410 /* Enable interrupt */
411 control_block->info |= BCM2835_DMA_INT_EN;
412
413 /* Setup synchronization */
414 if (sync_type != 0)
415 control_block->info |= sync_type;
416
417 /* Setup DREQ channel */
418 if (c->dreq != 0)
419 control_block->info |=
420 BCM2835_DMA_PER_MAP(c->dreq);
421
422 /* Length of a frame */
423 control_block->length = period_len;
424 d->size += control_block->length;
425
426 /*
427 * Next block is the next frame.
428 * This DMA engine driver currently only supports cyclic DMA.
429 * Therefore, wrap around at number of frames.
430 */
431 control_block->next = d->control_block_base_phys +
432 sizeof(struct bcm2835_dma_cb)
433 * ((frame + 1) % d->frames);
434 }
435
436 return vchan_tx_prep(&c->vc, &d->vd, flags);
437}
438
439static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
440 struct dma_slave_config *cfg)
441{
442 if ((cfg->direction == DMA_DEV_TO_MEM &&
443 cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
444 (cfg->direction == DMA_MEM_TO_DEV &&
445 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
446 !is_slave_direction(cfg->direction)) {
447 return -EINVAL;
448 }
449
450 c->cfg = *cfg;
451
452 return 0;
453}
454
455static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
456{
457 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
458 unsigned long flags;
459 int timeout = 10000;
460 LIST_HEAD(head);
461
462 spin_lock_irqsave(&c->vc.lock, flags);
463
464 /* Prevent this channel being scheduled */
465 spin_lock(&d->lock);
466 list_del_init(&c->node);
467 spin_unlock(&d->lock);
468
469 /*
470 * Stop DMA activity: we assume the callback will not be called
471 * after bcm_dma_abort() returns (even if it does, it will see
472 * c->desc is NULL and exit.)
473 */
474 if (c->desc) {
475 c->desc = NULL;
476 bcm2835_dma_abort(c->chan_base);
477
478 /* Wait for stopping */
479 while (--timeout) {
480 if (!(readl(c->chan_base + BCM2835_DMA_CS) &
481 BCM2835_DMA_ACTIVE))
482 break;
483
484 cpu_relax();
485 }
486
487 if (!timeout)
488 dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
489 }
490
491 vchan_get_all_descriptors(&c->vc, &head);
492 spin_unlock_irqrestore(&c->vc.lock, flags);
493 vchan_dma_desc_free_list(&c->vc, &head);
494
495 return 0;
496}
497
498static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
499 unsigned long arg)
500{
501 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
502
503 switch (cmd) {
504 case DMA_SLAVE_CONFIG:
505 return bcm2835_dma_slave_config(c,
506 (struct dma_slave_config *)arg);
507
508 case DMA_TERMINATE_ALL:
509 return bcm2835_dma_terminate_all(c);
510
511 default:
512 return -ENXIO;
513 }
514}
515
516static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
517{
518 struct bcm2835_chan *c;
519
520 c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
521 if (!c)
522 return -ENOMEM;
523
524 c->vc.desc_free = bcm2835_dma_desc_free;
525 vchan_init(&c->vc, &d->ddev);
526 INIT_LIST_HEAD(&c->node);
527
528 d->ddev.chancnt++;
529
530 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
531 c->ch = chan_id;
532 c->irq_number = irq;
533
534 return 0;
535}
536
537static void bcm2835_dma_free(struct bcm2835_dmadev *od)
538{
539 struct bcm2835_chan *c, *next;
540
541 list_for_each_entry_safe(c, next, &od->ddev.channels,
542 vc.chan.device_node) {
543 list_del(&c->vc.chan.device_node);
544 tasklet_kill(&c->vc.task);
545 }
546}
547
548static const struct of_device_id bcm2835_dma_of_match[] = {
549 { .compatible = "brcm,bcm2835-dma", },
550 {},
551};
552MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
553
554static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
555 struct of_dma *ofdma)
556{
557 struct bcm2835_dmadev *d = ofdma->of_dma_data;
558 struct dma_chan *chan;
559
560 chan = dma_get_any_slave_channel(&d->ddev);
561 if (!chan)
562 return NULL;
563
564 /* Set DREQ from param */
565 to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
566
567 return chan;
568}
569
570static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
571 struct dma_slave_caps *caps)
572{
573 caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
574 caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
575 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
576 caps->cmd_pause = false;
577 caps->cmd_terminate = true;
578
579 return 0;
580}
581
582static int bcm2835_dma_probe(struct platform_device *pdev)
583{
584 struct bcm2835_dmadev *od;
585 struct resource *res;
586 void __iomem *base;
587 int rc;
588 int i;
589 int irq;
590 uint32_t chans_available;
591
592 if (!pdev->dev.dma_mask)
593 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
594
595 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
596 if (rc)
597 return rc;
598
599 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
600 if (!od)
601 return -ENOMEM;
602
603 pdev->dev.dma_parms = &od->dma_parms;
604 dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
605
606 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
607 base = devm_ioremap_resource(&pdev->dev, res);
608 if (IS_ERR(base))
609 return PTR_ERR(base);
610
611 od->base = base;
612
613 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
614 dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
615 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
616 od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
617 od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
618 od->ddev.device_tx_status = bcm2835_dma_tx_status;
619 od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
620 od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
621 od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
622 od->ddev.device_control = bcm2835_dma_control;
623 od->ddev.dev = &pdev->dev;
624 INIT_LIST_HEAD(&od->ddev.channels);
625 spin_lock_init(&od->lock);
626
627 platform_set_drvdata(pdev, od);
628
629 /* Request DMA channel mask from device tree */
630 if (of_property_read_u32(pdev->dev.of_node,
631 "brcm,dma-channel-mask",
632 &chans_available)) {
633 dev_err(&pdev->dev, "Failed to get channel mask\n");
634 rc = -EINVAL;
635 goto err_no_dma;
636 }
637
638 /*
639 * Do not use the FIQ and BULK channels,
640 * because they are used by the GPU.
641 */
642 chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
643
644 for (i = 0; i < pdev->num_resources; i++) {
645 irq = platform_get_irq(pdev, i);
646 if (irq < 0)
647 break;
648
649 if (chans_available & (1 << i)) {
650 rc = bcm2835_dma_chan_init(od, i, irq);
651 if (rc)
652 goto err_no_dma;
653 }
654 }
655
656 dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
657
658 /* Device-tree DMA controller registration */
659 rc = of_dma_controller_register(pdev->dev.of_node,
660 bcm2835_dma_xlate, od);
661 if (rc) {
662 dev_err(&pdev->dev, "Failed to register DMA controller\n");
663 goto err_no_dma;
664 }
665
666 rc = dma_async_device_register(&od->ddev);
667 if (rc) {
668 dev_err(&pdev->dev,
669 "Failed to register slave DMA engine device: %d\n", rc);
670 goto err_no_dma;
671 }
672
673 dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
674
675 return 0;
676
677err_no_dma:
678 bcm2835_dma_free(od);
679 return rc;
680}
681
682static int bcm2835_dma_remove(struct platform_device *pdev)
683{
684 struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
685
686 dma_async_device_unregister(&od->ddev);
687 bcm2835_dma_free(od);
688
689 return 0;
690}
691
692static struct platform_driver bcm2835_dma_driver = {
693 .probe = bcm2835_dma_probe,
694 .remove = bcm2835_dma_remove,
695 .driver = {
696 .name = "bcm2835-dma",
697 .owner = THIS_MODULE,
698 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
699 },
700};
701
702module_platform_driver(bcm2835_dma_driver);
703
704MODULE_ALIAS("platform:bcm2835-dma");
705MODULE_DESCRIPTION("BCM2835 DMA engine driver");
706MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
707MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c29dacff66fa..c18aebf7d5aa 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
972 goto err_chans; 972 goto err_chans;
973 973
974 irq = irq_of_parse_and_map(dev->of_node, 0); 974 irq = irq_of_parse_and_map(dev->of_node, 0);
975 if (!irq) 975 if (!irq) {
976 ret = -EINVAL;
976 goto err_irq; 977 goto err_irq;
978 }
977 979
978 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); 980 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
979 981
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 9dfcaf5c1288..05b6dea770a4 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel),
31 S_IRUGO | S_IWUSR); 31 S_IRUGO | S_IWUSR);
32MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); 32MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
33 33
34static char test_device[20]; 34static char test_device[32];
35module_param_string(device, test_device, sizeof(test_device), 35module_param_string(device, test_device, sizeof(test_device),
36 S_IRUGO | S_IWUSR); 36 S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); 37MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
89struct dmatest_params { 89struct dmatest_params {
90 unsigned int buf_size; 90 unsigned int buf_size;
91 char channel[20]; 91 char channel[20];
92 char device[20]; 92 char device[32];
93 unsigned int threads_per_chan; 93 unsigned int threads_per_chan;
94 unsigned int max_channels; 94 unsigned int max_channels;
95 unsigned int iterations; 95 unsigned int iterations;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7516be4677cf..13ac3f240e79 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
218 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 218 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
219 u32 ctllo; 219 u32 ctllo;
220 220
221 /* Software emulation of LLP mode relies on interrupts to continue 221 /*
222 * multi block transfer. */ 222 * Software emulation of LLP mode relies on interrupts to continue
223 * multi block transfer.
224 */
223 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; 225 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
224 226
225 channel_writel(dwc, SAR, desc->lli.sar); 227 channel_writel(dwc, SAR, desc->lli.sar);
@@ -253,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
253 &dwc->flags); 255 &dwc->flags);
254 if (was_soft_llp) { 256 if (was_soft_llp) {
255 dev_err(chan2dev(&dwc->chan), 257 dev_err(chan2dev(&dwc->chan),
256 "BUG: Attempted to start new LLP transfer " 258 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
257 "inside ongoing one\n");
258 return; 259 return;
259 } 260 }
260 261
@@ -420,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
420 return; 421 return;
421 } 422 }
422 423
423 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, 424 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
424 (unsigned long long)llp);
425 425
426 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 426 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
427 /* Initial residue value */ 427 /* Initial residue value */
@@ -567,9 +567,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
567 unlikely(status_xfer & dwc->mask)) { 567 unlikely(status_xfer & dwc->mask)) {
568 int i; 568 int i;
569 569
570 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 570 dev_err(chan2dev(&dwc->chan),
571 "interrupt, stopping DMA transfer\n", 571 "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
572 status_xfer ? "xfer" : "error"); 572 status_xfer ? "xfer" : "error");
573 573
574 spin_lock_irqsave(&dwc->lock, flags); 574 spin_lock_irqsave(&dwc->lock, flags);
575 575
@@ -711,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
711 u32 ctllo; 711 u32 ctllo;
712 712
713 dev_vdbg(chan2dev(chan), 713 dev_vdbg(chan2dev(chan),
714 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, 714 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
715 (unsigned long long)dest, (unsigned long long)src, 715 &dest, &src, len, flags);
716 len, flags);
717 716
718 if (unlikely(!len)) { 717 if (unlikely(!len)) {
719 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 718 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1401,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1401 /* Let's make a cyclic list */ 1400 /* Let's make a cyclic list */
1402 last->lli.llp = cdesc->desc[0]->txd.phys; 1401 last->lli.llp = cdesc->desc[0]->txd.phys;
1403 1402
1404 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " 1403 dev_dbg(chan2dev(&dwc->chan),
1405 "period %zu periods %d\n", (unsigned long long)buf_addr, 1404 "cyclic prepared buf %pad len %zu period %zu periods %d\n",
1406 buf_len, period_len, periods); 1405 &buf_addr, buf_len, period_len, periods);
1407 1406
1408 cdesc->periods = periods; 1407 cdesc->periods = periods;
1409 dwc->cdesc = cdesc; 1408 dwc->cdesc = cdesc;
@@ -1603,9 +1602,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1603 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1602 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1604 dwc_params); 1603 dwc_params);
1605 1604
1606 /* Decode maximum block size for given channel. The 1605 /*
1606 * Decode maximum block size for given channel. The
1607 * stored 4 bit value represents blocks from 0x00 for 3 1607 * stored 4 bit value represents blocks from 0x00 for 3
1608 * up to 0x0a for 4095. */ 1608 * up to 0x0a for 4095.
1609 */
1609 dwc->block_size = 1610 dwc->block_size =
1610 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; 1611 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1611 dwc->nollp = 1612 dwc->nollp =
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 2539ea0cbc63..cd8da451d199 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
699 echan->alloced = true; 699 echan->alloced = true;
700 echan->slot[0] = echan->ch_num; 700 echan->slot[0] = echan->ch_num;
701 701
702 dev_info(dev, "allocated channel for %u:%u\n", 702 dev_dbg(dev, "allocated channel for %u:%u\n",
703 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 703 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
704 704
705 return 0; 705 return 0;
706 706
@@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan)
736 echan->alloced = false; 736 echan->alloced = false;
737 } 737 }
738 738
739 dev_info(dev, "freeing channel for %u\n", echan->ch_num); 739 dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
740} 740}
741 741
742/* Send pending descriptor to hardware */ 742/* Send pending descriptor to hardware */
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 1ffc24484d23..d56e83599825 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -41,7 +41,7 @@
41 * channel is allowed to transfer before the DMA engine pauses 41 * channel is allowed to transfer before the DMA engine pauses
42 * the current channel and switches to the next channel 42 * the current channel and switches to the next channel
43 */ 43 */
44#define FSL_DMA_MR_BWC 0x08000000 44#define FSL_DMA_MR_BWC 0x0A000000
45 45
46/* Special MR definition for MPC8349 */ 46/* Special MR definition for MPC8349 */
47#define FSL_DMA_MR_EOTIE 0x00000080 47#define FSL_DMA_MR_EOTIE 0x00000080
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index c75679d42028..4e7918339b12 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -323,6 +323,7 @@ struct sdma_engine {
323 struct clk *clk_ipg; 323 struct clk *clk_ipg;
324 struct clk *clk_ahb; 324 struct clk *clk_ahb;
325 spinlock_t channel_0_lock; 325 spinlock_t channel_0_lock;
326 u32 script_number;
326 struct sdma_script_start_addrs *script_addrs; 327 struct sdma_script_start_addrs *script_addrs;
327 const struct sdma_driver_data *drvdata; 328 const struct sdma_driver_data *drvdata;
328}; 329};
@@ -724,6 +725,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
724 per_2_emi = sdma->script_addrs->app_2_mcu_addr; 725 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
725 emi_2_per = sdma->script_addrs->mcu_2_app_addr; 726 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
726 break; 727 break;
728 case IMX_DMATYPE_SSI_DUAL:
729 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
730 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
731 break;
727 case IMX_DMATYPE_SSI_SP: 732 case IMX_DMATYPE_SSI_SP:
728 case IMX_DMATYPE_MMC: 733 case IMX_DMATYPE_MMC:
729 case IMX_DMATYPE_SDHC: 734 case IMX_DMATYPE_SDHC:
@@ -1238,6 +1243,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
1238} 1243}
1239 1244
1240#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1245#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1246#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1241 1247
1242static void sdma_add_scripts(struct sdma_engine *sdma, 1248static void sdma_add_scripts(struct sdma_engine *sdma,
1243 const struct sdma_script_start_addrs *addr) 1249 const struct sdma_script_start_addrs *addr)
@@ -1246,7 +1252,11 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
1246 s32 *saddr_arr = (u32 *)sdma->script_addrs; 1252 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1247 int i; 1253 int i;
1248 1254
1249 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 1255 /* use the default firmware in ROM if missing external firmware */
1256 if (!sdma->script_number)
1257 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1258
1259 for (i = 0; i < sdma->script_number; i++)
1250 if (addr_arr[i] > 0) 1260 if (addr_arr[i] > 0)
1251 saddr_arr[i] = addr_arr[i]; 1261 saddr_arr[i] = addr_arr[i];
1252} 1262}
@@ -1272,6 +1282,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1272 goto err_firmware; 1282 goto err_firmware;
1273 if (header->ram_code_start + header->ram_code_size > fw->size) 1283 if (header->ram_code_start + header->ram_code_size > fw->size)
1274 goto err_firmware; 1284 goto err_firmware;
1285 switch (header->version_major) {
1286 case 1:
1287 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1288 break;
1289 case 2:
1290 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1291 break;
1292 default:
1293 dev_err(sdma->dev, "unknown firmware version\n");
1294 goto err_firmware;
1295 }
1275 1296
1276 addr = (void *)header + header->script_addrs_start; 1297 addr = (void *)header + header->script_addrs_start;
1277 ram_code = (void *)header + header->ram_code_start; 1298 ram_code = (void *)header + header->ram_code_start;
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index e26075408e9b..a1f911aaf220 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
477 dma_addr_t addr, src = 0, dst = 0; 477 dma_addr_t addr, src = 0, dst = 0;
478 int num = sglen, i; 478 int num = sglen, i;
479 479
480 if (sgl == 0) 480 if (sgl == NULL)
481 return NULL; 481 return NULL;
482 482
483 for_each_sg(sgl, sg, sglen, i) { 483 for_each_sg(sgl, sg, sglen, i) {
@@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev)
817 return 0; 817 return 0;
818} 818}
819 819
820SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 820static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
821 821
822static struct platform_driver k3_pdma_driver = { 822static struct platform_driver k3_pdma_driver = {
823 .driver = { 823 .driver = {
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index c6a01ea8bc59..b439679f4126 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -5,6 +5,7 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8
8#include <linux/err.h> 9#include <linux/err.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/init.h> 11#include <linux/init.h>
@@ -32,38 +33,37 @@
32#define DTADR 0x0208 33#define DTADR 0x0208
33#define DCMD 0x020c 34#define DCMD 0x020c
34 35
35#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ 36#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
36#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ 37#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
37#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ 38#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
38#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ 39#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
39#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ 40#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
40#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ 41#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
41#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ 42#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
42#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ 43#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
43 44
44#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ 45#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
45#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ 46#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
46#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ 47#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
47#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ 48#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
48#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ 49#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
49#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ 50#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
50#define DCSR_EORINTR (1 << 9) /* The end of Receive */ 51#define DCSR_EORINTR BIT(9) /* The end of Receive */
51 52
52#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ 53#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
53 (((n) & 0x3f) << 2)) 54#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
54#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ 55#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
55#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
56 56
57#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ 57#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58#define DDADR_STOP (1 << 0) /* Stop (read / write) */ 58#define DDADR_STOP BIT(0) /* Stop (read / write) */
59 59
60#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ 60#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
61#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ 61#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
62#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ 62#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
63#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ 63#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
64#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ 64#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
65#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ 65#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
66#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ 66#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
67#define DCMD_BURST8 (1 << 16) /* 8 byte burst */ 67#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68#define DCMD_BURST16 (2 << 16) /* 16 byte burst */ 68#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69#define DCMD_BURST32 (3 << 16) /* 32 byte burst */ 69#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
@@ -132,10 +132,14 @@ struct mmp_pdma_device {
132 spinlock_t phy_lock; /* protect alloc/free phy channels */ 132 spinlock_t phy_lock; /* protect alloc/free phy channels */
133}; 133};
134 134
135#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) 135#define tx_to_mmp_pdma_desc(tx) \
136#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) 136 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
137#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) 137#define to_mmp_pdma_desc(lh) \
138#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) 138 container_of(lh, struct mmp_pdma_desc_sw, node)
139#define to_mmp_pdma_chan(dchan) \
140 container_of(dchan, struct mmp_pdma_chan, chan)
141#define to_mmp_pdma_dev(dmadev) \
142 container_of(dmadev, struct mmp_pdma_device, device)
139 143
140static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) 144static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
141{ 145{
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
162 writel(dalgn, phy->base + DALGN); 166 writel(dalgn, phy->base + DALGN);
163 167
164 reg = (phy->idx << 2) + DCSR; 168 reg = (phy->idx << 2) + DCSR;
165 writel(readl(phy->base + reg) | DCSR_RUN, 169 writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
166 phy->base + reg);
167} 170}
168 171
169static void disable_chan(struct mmp_pdma_phy *phy) 172static void disable_chan(struct mmp_pdma_phy *phy)
170{ 173{
171 u32 reg; 174 u32 reg;
172 175
173 if (phy) { 176 if (!phy)
174 reg = (phy->idx << 2) + DCSR; 177 return;
175 writel(readl(phy->base + reg) & ~DCSR_RUN, 178
176 phy->base + reg); 179 reg = (phy->idx << 2) + DCSR;
177 } 180 writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
178} 181}
179 182
180static int clear_chan_irq(struct mmp_pdma_phy *phy) 183static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
183 u32 dint = readl(phy->base + DINT); 186 u32 dint = readl(phy->base + DINT);
184 u32 reg = (phy->idx << 2) + DCSR; 187 u32 reg = (phy->idx << 2) + DCSR;
185 188
186 if (dint & BIT(phy->idx)) { 189 if (!(dint & BIT(phy->idx)))
187 /* clear irq */ 190 return -EAGAIN;
188 dcsr = readl(phy->base + reg); 191
189 writel(dcsr, phy->base + reg); 192 /* clear irq */
190 if ((dcsr & DCSR_BUSERR) && (phy->vchan)) 193 dcsr = readl(phy->base + reg);
191 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); 194 writel(dcsr, phy->base + reg);
192 return 0; 195 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
193 } 196 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
194 return -EAGAIN; 197
198 return 0;
195} 199}
196 200
197static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) 201static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
198{ 202{
199 struct mmp_pdma_phy *phy = dev_id; 203 struct mmp_pdma_phy *phy = dev_id;
200 204
201 if (clear_chan_irq(phy) == 0) { 205 if (clear_chan_irq(phy) != 0)
202 tasklet_schedule(&phy->vchan->tasklet);
203 return IRQ_HANDLED;
204 } else
205 return IRQ_NONE; 206 return IRQ_NONE;
207
208 tasklet_schedule(&phy->vchan->tasklet);
209 return IRQ_HANDLED;
206} 210}
207 211
208static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) 212static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
224 228
225 if (irq_num) 229 if (irq_num)
226 return IRQ_HANDLED; 230 return IRQ_HANDLED;
227 else 231
228 return IRQ_NONE; 232 return IRQ_NONE;
229} 233}
230 234
231/* lookup free phy channel as descending priority */ 235/* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
245 */ 249 */
246 250
247 spin_lock_irqsave(&pdev->phy_lock, flags); 251 spin_lock_irqsave(&pdev->phy_lock, flags);
248 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { 252 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
249 for (i = 0; i < pdev->dma_channels; i++) { 253 for (i = 0; i < pdev->dma_channels; i++) {
250 if (prio != ((i & 0xf) >> 2)) 254 if (prio != (i & 0xf) >> 2)
251 continue; 255 continue;
252 phy = &pdev->phy[i]; 256 phy = &pdev->phy[i];
253 if (!phy->vchan) { 257 if (!phy->vchan) {
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
389 if (chan->desc_pool) 393 if (chan->desc_pool)
390 return 1; 394 return 1;
391 395
392 chan->desc_pool = 396 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
393 dma_pool_create(dev_name(&dchan->dev->device), chan->dev, 397 chan->dev,
394 sizeof(struct mmp_pdma_desc_sw), 398 sizeof(struct mmp_pdma_desc_sw),
395 __alignof__(struct mmp_pdma_desc_sw), 0); 399 __alignof__(struct mmp_pdma_desc_sw),
400 0);
396 if (!chan->desc_pool) { 401 if (!chan->desc_pool) {
397 dev_err(chan->dev, "unable to allocate descriptor pool\n"); 402 dev_err(chan->dev, "unable to allocate descriptor pool\n");
398 return -ENOMEM; 403 return -ENOMEM;
399 } 404 }
405
400 mmp_pdma_free_phy(chan); 406 mmp_pdma_free_phy(chan);
401 chan->idle = true; 407 chan->idle = true;
402 chan->dev_addr = 0; 408 chan->dev_addr = 0;
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
404} 410}
405 411
406static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, 412static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
407 struct list_head *list) 413 struct list_head *list)
408{ 414{
409 struct mmp_pdma_desc_sw *desc, *_desc; 415 struct mmp_pdma_desc_sw *desc, *_desc;
410 416
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
434 440
435static struct dma_async_tx_descriptor * 441static struct dma_async_tx_descriptor *
436mmp_pdma_prep_memcpy(struct dma_chan *dchan, 442mmp_pdma_prep_memcpy(struct dma_chan *dchan,
437 dma_addr_t dma_dst, dma_addr_t dma_src, 443 dma_addr_t dma_dst, dma_addr_t dma_src,
438 size_t len, unsigned long flags) 444 size_t len, unsigned long flags)
439{ 445{
440 struct mmp_pdma_chan *chan; 446 struct mmp_pdma_chan *chan;
441 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 447 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@ fail:
515 521
516static struct dma_async_tx_descriptor * 522static struct dma_async_tx_descriptor *
517mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 523mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
518 unsigned int sg_len, enum dma_transfer_direction dir, 524 unsigned int sg_len, enum dma_transfer_direction dir,
519 unsigned long flags, void *context) 525 unsigned long flags, void *context)
520{ 526{
521 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 527 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
522 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; 528 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@ fail:
591 return NULL; 597 return NULL;
592} 598}
593 599
594static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( 600static struct dma_async_tx_descriptor *
595 struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, 601mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
596 size_t period_len, enum dma_transfer_direction direction, 602 dma_addr_t buf_addr, size_t len, size_t period_len,
597 unsigned long flags, void *context) 603 enum dma_transfer_direction direction,
604 unsigned long flags, void *context)
598{ 605{
599 struct mmp_pdma_chan *chan; 606 struct mmp_pdma_chan *chan;
600 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 607 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
636 goto fail; 643 goto fail;
637 } 644 }
638 645
639 new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | 646 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
640 (DCMD_LENGTH & period_len); 647 (DCMD_LENGTH & period_len));
641 new->desc.dsadr = dma_src; 648 new->desc.dsadr = dma_src;
642 new->desc.dtadr = dma_dst; 649 new->desc.dtadr = dma_dst;
643 650
@@ -677,12 +684,11 @@ fail:
677} 684}
678 685
679static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 686static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
680 unsigned long arg) 687 unsigned long arg)
681{ 688{
682 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 689 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
683 struct dma_slave_config *cfg = (void *)arg; 690 struct dma_slave_config *cfg = (void *)arg;
684 unsigned long flags; 691 unsigned long flags;
685 int ret = 0;
686 u32 maxburst = 0, addr = 0; 692 u32 maxburst = 0, addr = 0;
687 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 693 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
688 694
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
739 return -ENOSYS; 745 return -ENOSYS;
740 } 746 }
741 747
742 return ret; 748 return 0;
743} 749}
744 750
745static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, 751static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
746 dma_cookie_t cookie, struct dma_tx_state *txstate) 752 dma_cookie_t cookie,
753 struct dma_tx_state *txstate)
747{ 754{
748 return dma_cookie_status(dchan, cookie, txstate); 755 return dma_cookie_status(dchan, cookie, txstate);
749} 756}
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
845 return 0; 852 return 0;
846} 853}
847 854
848static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, 855static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
849 int idx, int irq)
850{ 856{
851 struct mmp_pdma_phy *phy = &pdev->phy[idx]; 857 struct mmp_pdma_phy *phy = &pdev->phy[idx];
852 struct mmp_pdma_chan *chan; 858 struct mmp_pdma_chan *chan;
853 int ret; 859 int ret;
854 860
855 chan = devm_kzalloc(pdev->dev, 861 chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
856 sizeof(struct mmp_pdma_chan), GFP_KERNEL); 862 GFP_KERNEL);
857 if (chan == NULL) 863 if (chan == NULL)
858 return -ENOMEM; 864 return -ENOMEM;
859 865
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
861 phy->base = pdev->base; 867 phy->base = pdev->base;
862 868
863 if (irq) { 869 if (irq) {
864 ret = devm_request_irq(pdev->dev, irq, 870 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
865 mmp_pdma_chan_handler, 0, "pdma", phy); 871 "pdma", phy);
866 if (ret) { 872 if (ret) {
867 dev_err(pdev->dev, "channel request irq fail!\n"); 873 dev_err(pdev->dev, "channel request irq fail!\n");
868 return ret; 874 return ret;
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
877 INIT_LIST_HEAD(&chan->chain_running); 883 INIT_LIST_HEAD(&chan->chain_running);
878 884
879 /* register virt channel to dma engine */ 885 /* register virt channel to dma engine */
880 list_add_tail(&chan->chan.device_node, 886 list_add_tail(&chan->chan.device_node, &pdev->device.channels);
881 &pdev->device.channels);
882 887
883 return 0; 888 return 0;
884} 889}
@@ -894,14 +899,12 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
894{ 899{
895 struct mmp_pdma_device *d = ofdma->of_dma_data; 900 struct mmp_pdma_device *d = ofdma->of_dma_data;
896 struct dma_chan *chan; 901 struct dma_chan *chan;
897 struct mmp_pdma_chan *c;
898 902
899 chan = dma_get_any_slave_channel(&d->device); 903 chan = dma_get_any_slave_channel(&d->device);
900 if (!chan) 904 if (!chan)
901 return NULL; 905 return NULL;
902 906
903 c = to_mmp_pdma_chan(chan); 907 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
904 c->drcmr = dma_spec->args[0];
905 908
906 return chan; 909 return chan;
907} 910}
@@ -918,6 +921,7 @@ static int mmp_pdma_probe(struct platform_device *op)
918 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 921 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
919 if (!pdev) 922 if (!pdev)
920 return -ENOMEM; 923 return -ENOMEM;
924
921 pdev->dev = &op->dev; 925 pdev->dev = &op->dev;
922 926
923 spin_lock_init(&pdev->phy_lock); 927 spin_lock_init(&pdev->phy_lock);
@@ -929,8 +933,8 @@ static int mmp_pdma_probe(struct platform_device *op)
929 933
930 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); 934 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
931 if (of_id) 935 if (of_id)
932 of_property_read_u32(pdev->dev->of_node, 936 of_property_read_u32(pdev->dev->of_node, "#dma-channels",
933 "#dma-channels", &dma_channels); 937 &dma_channels);
934 else if (pdata && pdata->dma_channels) 938 else if (pdata && pdata->dma_channels)
935 dma_channels = pdata->dma_channels; 939 dma_channels = pdata->dma_channels;
936 else 940 else
@@ -942,8 +946,9 @@ static int mmp_pdma_probe(struct platform_device *op)
942 irq_num++; 946 irq_num++;
943 } 947 }
944 948
945 pdev->phy = devm_kzalloc(pdev->dev, 949 pdev->phy = devm_kcalloc(pdev->dev,
946 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); 950 dma_channels, sizeof(struct mmp_pdma_chan),
951 GFP_KERNEL);
947 if (pdev->phy == NULL) 952 if (pdev->phy == NULL)
948 return -ENOMEM; 953 return -ENOMEM;
949 954
@@ -952,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
952 if (irq_num != dma_channels) { 957 if (irq_num != dma_channels) {
953 /* all chan share one irq, demux inside */ 958 /* all chan share one irq, demux inside */
954 irq = platform_get_irq(op, 0); 959 irq = platform_get_irq(op, 0);
955 ret = devm_request_irq(pdev->dev, irq, 960 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
956 mmp_pdma_int_handler, 0, "pdma", pdev); 961 "pdma", pdev);
957 if (ret) 962 if (ret)
958 return ret; 963 return ret;
959 } 964 }
@@ -1029,7 +1034,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
1029 if (chan->device->dev->driver != &mmp_pdma_driver.driver) 1034 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
1030 return false; 1035 return false;
1031 1036
1032 c->drcmr = *(unsigned int *) param; 1037 c->drcmr = *(unsigned int *)param;
1033 1038
1034 return true; 1039 return true;
1035} 1040}
@@ -1037,6 +1042,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
1037 1042
1038module_platform_driver(mmp_pdma_driver); 1043module_platform_driver(mmp_pdma_driver);
1039 1044
1040MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); 1045MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1041MODULE_AUTHOR("Marvell International Ltd."); 1046MODULE_AUTHOR("Marvell International Ltd.");
1042MODULE_LICENSE("GPL v2"); 1047MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 3ddacc14a736..33f96aaa80c7 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -121,11 +121,13 @@ struct mmp_tdma_chan {
121 int idx; 121 int idx;
122 enum mmp_tdma_type type; 122 enum mmp_tdma_type type;
123 int irq; 123 int irq;
124 unsigned long reg_base; 124 void __iomem *reg_base;
125 125
126 size_t buf_len; 126 size_t buf_len;
127 size_t period_len; 127 size_t period_len;
128 size_t pos; 128 size_t pos;
129
130 struct gen_pool *pool;
129}; 131};
130 132
131#define TDMA_CHANNEL_NUM 2 133#define TDMA_CHANNEL_NUM 2
@@ -182,7 +184,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
182 184
183static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 185static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
184{ 186{
185 unsigned int tdcr; 187 unsigned int tdcr = 0;
186 188
187 mmp_tdma_disable_chan(tdmac); 189 mmp_tdma_disable_chan(tdmac);
188 190
@@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
324 struct gen_pool *gpool; 326 struct gen_pool *gpool;
325 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 327 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
326 328
327 gpool = sram_get_gpool("asram"); 329 gpool = tdmac->pool;
328 if (tdmac->desc_arr) 330 if (tdmac->desc_arr)
329 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 331 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
330 size); 332 size);
@@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
374 struct gen_pool *gpool; 376 struct gen_pool *gpool;
375 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 377 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
376 378
377 gpool = sram_get_gpool("asram"); 379 gpool = tdmac->pool;
378 if (!gpool) 380 if (!gpool)
379 return NULL; 381 return NULL;
380 382
@@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev)
505} 507}
506 508
507static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, 509static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
508 int idx, int irq, int type) 510 int idx, int irq,
511 int type, struct gen_pool *pool)
509{ 512{
510 struct mmp_tdma_chan *tdmac; 513 struct mmp_tdma_chan *tdmac;
511 514
@@ -526,7 +529,8 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
526 tdmac->chan.device = &tdev->device; 529 tdmac->chan.device = &tdev->device;
527 tdmac->idx = idx; 530 tdmac->idx = idx;
528 tdmac->type = type; 531 tdmac->type = type;
529 tdmac->reg_base = (unsigned long)tdev->base + idx * 4; 532 tdmac->reg_base = tdev->base + idx * 4;
533 tdmac->pool = pool;
530 tdmac->status = DMA_COMPLETE; 534 tdmac->status = DMA_COMPLETE;
531 tdev->tdmac[tdmac->idx] = tdmac; 535 tdev->tdmac[tdmac->idx] = tdmac;
532 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); 536 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
553 int i, ret; 557 int i, ret;
554 int irq = 0, irq_num = 0; 558 int irq = 0, irq_num = 0;
555 int chan_num = TDMA_CHANNEL_NUM; 559 int chan_num = TDMA_CHANNEL_NUM;
560 struct gen_pool *pool;
556 561
557 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 562 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
558 if (of_id) 563 if (of_id)
@@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev)
579 584
580 INIT_LIST_HEAD(&tdev->device.channels); 585 INIT_LIST_HEAD(&tdev->device.channels);
581 586
587 if (pdev->dev.of_node)
588 pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
589 else
590 pool = sram_get_gpool("asram");
591 if (!pool) {
592 dev_err(&pdev->dev, "asram pool not available\n");
593 return -ENOMEM;
594 }
595
582 if (irq_num != chan_num) { 596 if (irq_num != chan_num) {
583 irq = platform_get_irq(pdev, 0); 597 irq = platform_get_irq(pdev, 0);
584 ret = devm_request_irq(&pdev->dev, irq, 598 ret = devm_request_irq(&pdev->dev, irq,
@@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
590 /* initialize channel parameters */ 604 /* initialize channel parameters */
591 for (i = 0; i < chan_num; i++) { 605 for (i = 0; i < chan_num; i++) {
592 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); 606 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
593 ret = mmp_tdma_chan_init(tdev, i, irq, type); 607 ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
594 if (ret) 608 if (ret)
595 return ret; 609 return ret;
596 } 610 }
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644
index 000000000000..3258e484e4f6
--- /dev/null
+++ b/drivers/dma/moxart-dma.c
@@ -0,0 +1,699 @@
1/*
2 * MOXA ART SoCs DMA Engine support.
3 *
4 * Copyright (C) 2013 Jonas Jensen
5 *
6 * Jonas Jensen <jonas.jensen@gmail.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/dmaengine.h>
14#include <linux/dma-mapping.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/of_dma.h>
26#include <linux/bitops.h>
27
28#include <asm/cacheflush.h>
29
30#include "dmaengine.h"
31#include "virt-dma.h"
32
33#define APB_DMA_MAX_CHANNEL 4
34
35#define REG_OFF_ADDRESS_SOURCE 0
36#define REG_OFF_ADDRESS_DEST 4
37#define REG_OFF_CYCLES 8
38#define REG_OFF_CTRL 12
39#define REG_OFF_CHAN_SIZE 16
40
41#define APB_DMA_ENABLE BIT(0)
42#define APB_DMA_FIN_INT_STS BIT(1)
43#define APB_DMA_FIN_INT_EN BIT(2)
44#define APB_DMA_BURST_MODE BIT(3)
45#define APB_DMA_ERR_INT_STS BIT(4)
46#define APB_DMA_ERR_INT_EN BIT(5)
47
48/*
49 * Unset: APB
50 * Set: AHB
51 */
52#define APB_DMA_SOURCE_SELECT 0x40
53#define APB_DMA_DEST_SELECT 0x80
54
55#define APB_DMA_SOURCE 0x100
56#define APB_DMA_DEST 0x1000
57
58#define APB_DMA_SOURCE_MASK 0x700
59#define APB_DMA_DEST_MASK 0x7000
60
61/*
62 * 000: No increment
63 * 001: +1 (Burst=0), +4 (Burst=1)
64 * 010: +2 (Burst=0), +8 (Burst=1)
65 * 011: +4 (Burst=0), +16 (Burst=1)
66 * 101: -1 (Burst=0), -4 (Burst=1)
67 * 110: -2 (Burst=0), -8 (Burst=1)
68 * 111: -4 (Burst=0), -16 (Burst=1)
69 */
70#define APB_DMA_SOURCE_INC_0 0
71#define APB_DMA_SOURCE_INC_1_4 0x100
72#define APB_DMA_SOURCE_INC_2_8 0x200
73#define APB_DMA_SOURCE_INC_4_16 0x300
74#define APB_DMA_SOURCE_DEC_1_4 0x500
75#define APB_DMA_SOURCE_DEC_2_8 0x600
76#define APB_DMA_SOURCE_DEC_4_16 0x700
77#define APB_DMA_DEST_INC_0 0
78#define APB_DMA_DEST_INC_1_4 0x1000
79#define APB_DMA_DEST_INC_2_8 0x2000
80#define APB_DMA_DEST_INC_4_16 0x3000
81#define APB_DMA_DEST_DEC_1_4 0x5000
82#define APB_DMA_DEST_DEC_2_8 0x6000
83#define APB_DMA_DEST_DEC_4_16 0x7000
84
85/*
86 * Request signal select source/destination address for DMA hardware handshake.
87 *
88 * The request line number is a property of the DMA controller itself,
89 * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
90 *
91 * 0: No request / Grant signal
92 * 1-15: Request / Grant signal
93 */
94#define APB_DMA_SOURCE_REQ_NO 0x1000000
95#define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000
96#define APB_DMA_DEST_REQ_NO 0x10000
97#define APB_DMA_DEST_REQ_NO_MASK 0xf0000
98
99#define APB_DMA_DATA_WIDTH 0x100000
100#define APB_DMA_DATA_WIDTH_MASK 0x300000
101/*
102 * Data width of transfer:
103 *
104 * 00: Word
105 * 01: Half
106 * 10: Byte
107 */
108#define APB_DMA_DATA_WIDTH_4 0
109#define APB_DMA_DATA_WIDTH_2 0x100000
110#define APB_DMA_DATA_WIDTH_1 0x200000
111
112#define APB_DMA_CYCLES_MASK 0x00ffffff
113
114#define MOXART_DMA_DATA_TYPE_S8 0x00
115#define MOXART_DMA_DATA_TYPE_S16 0x01
116#define MOXART_DMA_DATA_TYPE_S32 0x02
117
118struct moxart_sg {
119 dma_addr_t addr;
120 uint32_t len;
121};
122
123struct moxart_desc {
124 enum dma_transfer_direction dma_dir;
125 dma_addr_t dev_addr;
126 unsigned int sglen;
127 unsigned int dma_cycles;
128 struct virt_dma_desc vd;
129 uint8_t es;
130 struct moxart_sg sg[0];
131};
132
133struct moxart_chan {
134 struct virt_dma_chan vc;
135
136 void __iomem *base;
137 struct moxart_desc *desc;
138
139 struct dma_slave_config cfg;
140
141 bool allocated;
142 bool error;
143 int ch_num;
144 unsigned int line_reqno;
145 unsigned int sgidx;
146};
147
148struct moxart_dmadev {
149 struct dma_device dma_slave;
150 struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL];
151};
152
153struct moxart_filter_data {
154 struct moxart_dmadev *mdc;
155 struct of_phandle_args *dma_spec;
156};
157
158static const unsigned int es_bytes[] = {
159 [MOXART_DMA_DATA_TYPE_S8] = 1,
160 [MOXART_DMA_DATA_TYPE_S16] = 2,
161 [MOXART_DMA_DATA_TYPE_S32] = 4,
162};
163
164static struct device *chan2dev(struct dma_chan *chan)
165{
166 return &chan->dev->device;
167}
168
169static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
170{
171 return container_of(c, struct moxart_chan, vc.chan);
172}
173
174static inline struct moxart_desc *to_moxart_dma_desc(
175 struct dma_async_tx_descriptor *t)
176{
177 return container_of(t, struct moxart_desc, vd.tx);
178}
179
180static void moxart_dma_desc_free(struct virt_dma_desc *vd)
181{
182 kfree(container_of(vd, struct moxart_desc, vd));
183}
184
185static int moxart_terminate_all(struct dma_chan *chan)
186{
187 struct moxart_chan *ch = to_moxart_dma_chan(chan);
188 unsigned long flags;
189 LIST_HEAD(head);
190 u32 ctrl;
191
192 dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
193
194 spin_lock_irqsave(&ch->vc.lock, flags);
195
196 if (ch->desc)
197 ch->desc = NULL;
198
199 ctrl = readl(ch->base + REG_OFF_CTRL);
200 ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
201 writel(ctrl, ch->base + REG_OFF_CTRL);
202
203 vchan_get_all_descriptors(&ch->vc, &head);
204 spin_unlock_irqrestore(&ch->vc.lock, flags);
205 vchan_dma_desc_free_list(&ch->vc, &head);
206
207 return 0;
208}
209
210static int moxart_slave_config(struct dma_chan *chan,
211 struct dma_slave_config *cfg)
212{
213 struct moxart_chan *ch = to_moxart_dma_chan(chan);
214 u32 ctrl;
215
216 ch->cfg = *cfg;
217
218 ctrl = readl(ch->base + REG_OFF_CTRL);
219 ctrl |= APB_DMA_BURST_MODE;
220 ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
221 ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
222
223 switch (ch->cfg.src_addr_width) {
224 case DMA_SLAVE_BUSWIDTH_1_BYTE:
225 ctrl |= APB_DMA_DATA_WIDTH_1;
226 if (ch->cfg.direction != DMA_MEM_TO_DEV)
227 ctrl |= APB_DMA_DEST_INC_1_4;
228 else
229 ctrl |= APB_DMA_SOURCE_INC_1_4;
230 break;
231 case DMA_SLAVE_BUSWIDTH_2_BYTES:
232 ctrl |= APB_DMA_DATA_WIDTH_2;
233 if (ch->cfg.direction != DMA_MEM_TO_DEV)
234 ctrl |= APB_DMA_DEST_INC_2_8;
235 else
236 ctrl |= APB_DMA_SOURCE_INC_2_8;
237 break;
238 case DMA_SLAVE_BUSWIDTH_4_BYTES:
239 ctrl &= ~APB_DMA_DATA_WIDTH;
240 if (ch->cfg.direction != DMA_MEM_TO_DEV)
241 ctrl |= APB_DMA_DEST_INC_4_16;
242 else
243 ctrl |= APB_DMA_SOURCE_INC_4_16;
244 break;
245 default:
246 return -EINVAL;
247 }
248
249 if (ch->cfg.direction == DMA_MEM_TO_DEV) {
250 ctrl &= ~APB_DMA_DEST_SELECT;
251 ctrl |= APB_DMA_SOURCE_SELECT;
252 ctrl |= (ch->line_reqno << 16 &
253 APB_DMA_DEST_REQ_NO_MASK);
254 } else {
255 ctrl |= APB_DMA_DEST_SELECT;
256 ctrl &= ~APB_DMA_SOURCE_SELECT;
257 ctrl |= (ch->line_reqno << 24 &
258 APB_DMA_SOURCE_REQ_NO_MASK);
259 }
260
261 writel(ctrl, ch->base + REG_OFF_CTRL);
262
263 return 0;
264}
265
266static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
267 unsigned long arg)
268{
269 int ret = 0;
270
271 switch (cmd) {
272 case DMA_PAUSE:
273 case DMA_RESUME:
274 return -EINVAL;
275 case DMA_TERMINATE_ALL:
276 moxart_terminate_all(chan);
277 break;
278 case DMA_SLAVE_CONFIG:
279 ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
280 break;
281 default:
282 ret = -ENOSYS;
283 }
284
285 return ret;
286}
287
288static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
289 struct dma_chan *chan, struct scatterlist *sgl,
290 unsigned int sg_len, enum dma_transfer_direction dir,
291 unsigned long tx_flags, void *context)
292{
293 struct moxart_chan *ch = to_moxart_dma_chan(chan);
294 struct moxart_desc *d;
295 enum dma_slave_buswidth dev_width;
296 dma_addr_t dev_addr;
297 struct scatterlist *sgent;
298 unsigned int es;
299 unsigned int i;
300
301 if (!is_slave_direction(dir)) {
302 dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
303 __func__);
304 return NULL;
305 }
306
307 if (dir == DMA_DEV_TO_MEM) {
308 dev_addr = ch->cfg.src_addr;
309 dev_width = ch->cfg.src_addr_width;
310 } else {
311 dev_addr = ch->cfg.dst_addr;
312 dev_width = ch->cfg.dst_addr_width;
313 }
314
315 switch (dev_width) {
316 case DMA_SLAVE_BUSWIDTH_1_BYTE:
317 es = MOXART_DMA_DATA_TYPE_S8;
318 break;
319 case DMA_SLAVE_BUSWIDTH_2_BYTES:
320 es = MOXART_DMA_DATA_TYPE_S16;
321 break;
322 case DMA_SLAVE_BUSWIDTH_4_BYTES:
323 es = MOXART_DMA_DATA_TYPE_S32;
324 break;
325 default:
326 dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
327 __func__, dev_width);
328 return NULL;
329 }
330
331 d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
332 if (!d)
333 return NULL;
334
335 d->dma_dir = dir;
336 d->dev_addr = dev_addr;
337 d->es = es;
338
339 for_each_sg(sgl, sgent, sg_len, i) {
340 d->sg[i].addr = sg_dma_address(sgent);
341 d->sg[i].len = sg_dma_len(sgent);
342 }
343
344 d->sglen = sg_len;
345
346 ch->error = 0;
347
348 return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
349}
350
351static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
352 struct of_dma *ofdma)
353{
354 struct moxart_dmadev *mdc = ofdma->of_dma_data;
355 struct dma_chan *chan;
356 struct moxart_chan *ch;
357
358 chan = dma_get_any_slave_channel(&mdc->dma_slave);
359 if (!chan)
360 return NULL;
361
362 ch = to_moxart_dma_chan(chan);
363 ch->line_reqno = dma_spec->args[0];
364
365 return chan;
366}
367
368static int moxart_alloc_chan_resources(struct dma_chan *chan)
369{
370 struct moxart_chan *ch = to_moxart_dma_chan(chan);
371
372 dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
373 __func__, ch->ch_num);
374 ch->allocated = 1;
375
376 return 0;
377}
378
379static void moxart_free_chan_resources(struct dma_chan *chan)
380{
381 struct moxart_chan *ch = to_moxart_dma_chan(chan);
382
383 vchan_free_chan_resources(&ch->vc);
384
385 dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
386 __func__, ch->ch_num);
387 ch->allocated = 0;
388}
389
390static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
391 dma_addr_t dst_addr)
392{
393 writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
394 writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
395}
396
397static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
398{
399 struct moxart_desc *d = ch->desc;
400 unsigned int sglen_div = es_bytes[d->es];
401
402 d->dma_cycles = len >> sglen_div;
403
404 /*
405 * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
406 * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
407 */
408 writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
409
410 dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
411 __func__, d->dma_cycles, len);
412}
413
414static void moxart_start_dma(struct moxart_chan *ch)
415{
416 u32 ctrl;
417
418 ctrl = readl(ch->base + REG_OFF_CTRL);
419 ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
420 writel(ctrl, ch->base + REG_OFF_CTRL);
421}
422
423static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
424{
425 struct moxart_desc *d = ch->desc;
426 struct moxart_sg *sg = ch->desc->sg + idx;
427
428 if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
429 moxart_dma_set_params(ch, sg->addr, d->dev_addr);
430 else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
431 moxart_dma_set_params(ch, d->dev_addr, sg->addr);
432
433 moxart_set_transfer_params(ch, sg->len);
434
435 moxart_start_dma(ch);
436}
437
438static void moxart_dma_start_desc(struct dma_chan *chan)
439{
440 struct moxart_chan *ch = to_moxart_dma_chan(chan);
441 struct virt_dma_desc *vd;
442
443 vd = vchan_next_desc(&ch->vc);
444
445 if (!vd) {
446 ch->desc = NULL;
447 return;
448 }
449
450 list_del(&vd->node);
451
452 ch->desc = to_moxart_dma_desc(&vd->tx);
453 ch->sgidx = 0;
454
455 moxart_dma_start_sg(ch, 0);
456}
457
458static void moxart_issue_pending(struct dma_chan *chan)
459{
460 struct moxart_chan *ch = to_moxart_dma_chan(chan);
461 unsigned long flags;
462
463 spin_lock_irqsave(&ch->vc.lock, flags);
464 if (vchan_issue_pending(&ch->vc) && !ch->desc)
465 moxart_dma_start_desc(chan);
466 spin_unlock_irqrestore(&ch->vc.lock, flags);
467}
468
469static size_t moxart_dma_desc_size(struct moxart_desc *d,
470 unsigned int completed_sgs)
471{
472 unsigned int i;
473 size_t size;
474
475 for (size = i = completed_sgs; i < d->sglen; i++)
476 size += d->sg[i].len;
477
478 return size;
479}
480
481static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
482{
483 size_t size;
484 unsigned int completed_cycles, cycles;
485
486 size = moxart_dma_desc_size(ch->desc, ch->sgidx);
487 cycles = readl(ch->base + REG_OFF_CYCLES);
488 completed_cycles = (ch->desc->dma_cycles - cycles);
489 size -= completed_cycles << es_bytes[ch->desc->es];
490
491 dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
492
493 return size;
494}
495
496static enum dma_status moxart_tx_status(struct dma_chan *chan,
497 dma_cookie_t cookie,
498 struct dma_tx_state *txstate)
499{
500 struct moxart_chan *ch = to_moxart_dma_chan(chan);
501 struct virt_dma_desc *vd;
502 struct moxart_desc *d;
503 enum dma_status ret;
504 unsigned long flags;
505
506 /*
507 * dma_cookie_status() assigns initial residue value.
508 */
509 ret = dma_cookie_status(chan, cookie, txstate);
510
511 spin_lock_irqsave(&ch->vc.lock, flags);
512 vd = vchan_find_desc(&ch->vc, cookie);
513 if (vd) {
514 d = to_moxart_dma_desc(&vd->tx);
515 txstate->residue = moxart_dma_desc_size(d, 0);
516 } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
517 txstate->residue = moxart_dma_desc_size_in_flight(ch);
518 }
519 spin_unlock_irqrestore(&ch->vc.lock, flags);
520
521 if (ch->error)
522 return DMA_ERROR;
523
524 return ret;
525}
526
527static void moxart_dma_init(struct dma_device *dma, struct device *dev)
528{
529 dma->device_prep_slave_sg = moxart_prep_slave_sg;
530 dma->device_alloc_chan_resources = moxart_alloc_chan_resources;
531 dma->device_free_chan_resources = moxart_free_chan_resources;
532 dma->device_issue_pending = moxart_issue_pending;
533 dma->device_tx_status = moxart_tx_status;
534 dma->device_control = moxart_control;
535 dma->dev = dev;
536
537 INIT_LIST_HEAD(&dma->channels);
538}
539
540static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
541{
542 struct moxart_dmadev *mc = devid;
543 struct moxart_chan *ch = &mc->slave_chans[0];
544 unsigned int i;
545 unsigned long flags;
546 u32 ctrl;
547
548 dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
549
550 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
551 if (!ch->allocated)
552 continue;
553
554 ctrl = readl(ch->base + REG_OFF_CTRL);
555
556 dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
557 __func__, ch, ch->base, ctrl);
558
559 if (ctrl & APB_DMA_FIN_INT_STS) {
560 ctrl &= ~APB_DMA_FIN_INT_STS;
561 if (ch->desc) {
562 spin_lock_irqsave(&ch->vc.lock, flags);
563 if (++ch->sgidx < ch->desc->sglen) {
564 moxart_dma_start_sg(ch, ch->sgidx);
565 } else {
566 vchan_cookie_complete(&ch->desc->vd);
567 moxart_dma_start_desc(&ch->vc.chan);
568 }
569 spin_unlock_irqrestore(&ch->vc.lock, flags);
570 }
571 }
572
573 if (ctrl & APB_DMA_ERR_INT_STS) {
574 ctrl &= ~APB_DMA_ERR_INT_STS;
575 ch->error = 1;
576 }
577
578 writel(ctrl, ch->base + REG_OFF_CTRL);
579 }
580
581 return IRQ_HANDLED;
582}
583
584static int moxart_probe(struct platform_device *pdev)
585{
586 struct device *dev = &pdev->dev;
587 struct device_node *node = dev->of_node;
588 struct resource *res;
589 static void __iomem *dma_base_addr;
590 int ret, i;
591 unsigned int irq;
592 struct moxart_chan *ch;
593 struct moxart_dmadev *mdc;
594
595 mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
596 if (!mdc) {
597 dev_err(dev, "can't allocate DMA container\n");
598 return -ENOMEM;
599 }
600
601 irq = irq_of_parse_and_map(node, 0);
602 if (irq == NO_IRQ) {
603 dev_err(dev, "no IRQ resource\n");
604 return -EINVAL;
605 }
606
607 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
608 dma_base_addr = devm_ioremap_resource(dev, res);
609 if (IS_ERR(dma_base_addr))
610 return PTR_ERR(dma_base_addr);
611
612 dma_cap_zero(mdc->dma_slave.cap_mask);
613 dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
614 dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
615
616 moxart_dma_init(&mdc->dma_slave, dev);
617
618 ch = &mdc->slave_chans[0];
619 for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
620 ch->ch_num = i;
621 ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
622 ch->allocated = 0;
623
624 ch->vc.desc_free = moxart_dma_desc_free;
625 vchan_init(&ch->vc, &mdc->dma_slave);
626
627 dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
628 __func__, i, ch->ch_num, ch->base);
629 }
630
631 platform_set_drvdata(pdev, mdc);
632
633 ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
634 "moxart-dma-engine", mdc);
635 if (ret) {
636 dev_err(dev, "devm_request_irq failed\n");
637 return ret;
638 }
639
640 ret = dma_async_device_register(&mdc->dma_slave);
641 if (ret) {
642 dev_err(dev, "dma_async_device_register failed\n");
643 return ret;
644 }
645
646 ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
647 if (ret) {
648 dev_err(dev, "of_dma_controller_register failed\n");
649 dma_async_device_unregister(&mdc->dma_slave);
650 return ret;
651 }
652
653 dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
654
655 return 0;
656}
657
658static int moxart_remove(struct platform_device *pdev)
659{
660 struct moxart_dmadev *m = platform_get_drvdata(pdev);
661
662 dma_async_device_unregister(&m->dma_slave);
663
664 if (pdev->dev.of_node)
665 of_dma_controller_free(pdev->dev.of_node);
666
667 return 0;
668}
669
670static const struct of_device_id moxart_dma_match[] = {
671 { .compatible = "moxa,moxart-dma" },
672 { }
673};
674
675static struct platform_driver moxart_driver = {
676 .probe = moxart_probe,
677 .remove = moxart_remove,
678 .driver = {
679 .name = "moxart-dma-engine",
680 .owner = THIS_MODULE,
681 .of_match_table = moxart_dma_match,
682 },
683};
684
685static int moxart_init(void)
686{
687 return platform_driver_register(&moxart_driver);
688}
689subsys_initcall(moxart_init);
690
691static void __exit moxart_exit(void)
692{
693 platform_driver_unregister(&moxart_driver);
694}
695module_exit(moxart_exit);
696
697MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
698MODULE_DESCRIPTION("MOXART DMA engine driver");
699MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 2f66cf4e54fe..362e7c49f2e1 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
190{ 190{
191 struct omap_chan *c = to_omap_dma_chan(chan); 191 struct omap_chan *c = to_omap_dma_chan(chan);
192 192
193 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); 193 dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
194 194
195 return omap_request_dma(c->dma_sig, "DMA engine", 195 return omap_request_dma(c->dma_sig, "DMA engine",
196 omap_dma_callback, c, &c->dma_ch); 196 omap_dma_callback, c, &c->dma_ch);
@@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
203 vchan_free_chan_resources(&c->vc); 203 vchan_free_chan_resources(&c->vc);
204 omap_free_dma(c->dma_ch); 204 omap_free_dma(c->dma_ch);
205 205
206 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); 206 dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
207} 207}
208 208
209static size_t omap_dma_sg_size(struct omap_sg *sg) 209static size_t omap_dma_sg_size(struct omap_sg *sg)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index c90edecee463..73fa9b7a10ab 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -543,7 +543,9 @@ struct dma_pl330_chan {
543 /* DMA-Engine Channel */ 543 /* DMA-Engine Channel */
544 struct dma_chan chan; 544 struct dma_chan chan;
545 545
546 /* List of to be xfered descriptors */ 546 /* List of submitted descriptors */
547 struct list_head submitted_list;
548 /* List of issued descriptors */
547 struct list_head work_list; 549 struct list_head work_list;
548 /* List of completed descriptors */ 550 /* List of completed descriptors */
549 struct list_head completed_list; 551 struct list_head completed_list;
@@ -578,12 +580,16 @@ struct dma_pl330_dmac {
578 /* DMA-Engine Device */ 580 /* DMA-Engine Device */
579 struct dma_device ddma; 581 struct dma_device ddma;
580 582
583 /* Holds info about sg limitations */
584 struct device_dma_parameters dma_parms;
585
581 /* Pool of descriptors available for the DMAC's channels */ 586 /* Pool of descriptors available for the DMAC's channels */
582 struct list_head desc_pool; 587 struct list_head desc_pool;
583 /* To protect desc_pool manipulation */ 588 /* To protect desc_pool manipulation */
584 spinlock_t pool_lock; 589 spinlock_t pool_lock;
585 590
586 /* Peripheral channels connected to this DMAC */ 591 /* Peripheral channels connected to this DMAC */
592 unsigned int num_peripherals;
587 struct dma_pl330_chan *peripherals; /* keep at end */ 593 struct dma_pl330_chan *peripherals; /* keep at end */
588}; 594};
589 595
@@ -606,11 +612,6 @@ struct dma_pl330_desc {
606 struct dma_pl330_chan *pchan; 612 struct dma_pl330_chan *pchan;
607}; 613};
608 614
609struct dma_pl330_filter_args {
610 struct dma_pl330_dmac *pdmac;
611 unsigned int chan_id;
612};
613
614static inline void _callback(struct pl330_req *r, enum pl330_op_err err) 615static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
615{ 616{
616 if (r && r->xfer_cb) 617 if (r && r->xfer_cb)
@@ -2298,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
2298 tasklet_schedule(&pch->task); 2299 tasklet_schedule(&pch->task);
2299} 2300}
2300 2301
2301static bool pl330_dt_filter(struct dma_chan *chan, void *param)
2302{
2303 struct dma_pl330_filter_args *fargs = param;
2304
2305 if (chan->device != &fargs->pdmac->ddma)
2306 return false;
2307
2308 return (chan->chan_id == fargs->chan_id);
2309}
2310
2311bool pl330_filter(struct dma_chan *chan, void *param) 2302bool pl330_filter(struct dma_chan *chan, void *param)
2312{ 2303{
2313 u8 *peri_id; 2304 u8 *peri_id;
@@ -2325,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2325{ 2316{
2326 int count = dma_spec->args_count; 2317 int count = dma_spec->args_count;
2327 struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; 2318 struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
2328 struct dma_pl330_filter_args fargs; 2319 unsigned int chan_id;
2329 dma_cap_mask_t cap;
2330
2331 if (!pdmac)
2332 return NULL;
2333 2320
2334 if (count != 1) 2321 if (count != 1)
2335 return NULL; 2322 return NULL;
2336 2323
2337 fargs.pdmac = pdmac; 2324 chan_id = dma_spec->args[0];
2338 fargs.chan_id = dma_spec->args[0]; 2325 if (chan_id >= pdmac->num_peripherals)
2339 2326 return NULL;
2340 dma_cap_zero(cap);
2341 dma_cap_set(DMA_SLAVE, cap);
2342 dma_cap_set(DMA_CYCLIC, cap);
2343 2327
2344 return dma_request_channel(cap, pl330_dt_filter, &fargs); 2328 return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
2345} 2329}
2346 2330
2347static int pl330_alloc_chan_resources(struct dma_chan *chan) 2331static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2385,6 +2369,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2385 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 2369 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
2386 2370
2387 /* Mark all desc done */ 2371 /* Mark all desc done */
2372 list_for_each_entry(desc, &pch->submitted_list, node) {
2373 desc->status = FREE;
2374 dma_cookie_complete(&desc->txd);
2375 }
2376
2388 list_for_each_entry(desc, &pch->work_list , node) { 2377 list_for_each_entry(desc, &pch->work_list , node) {
2389 desc->status = FREE; 2378 desc->status = FREE;
2390 dma_cookie_complete(&desc->txd); 2379 dma_cookie_complete(&desc->txd);
@@ -2395,6 +2384,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2395 dma_cookie_complete(&desc->txd); 2384 dma_cookie_complete(&desc->txd);
2396 } 2385 }
2397 2386
2387 list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
2398 list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); 2388 list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
2399 list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); 2389 list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
2400 spin_unlock_irqrestore(&pch->lock, flags); 2390 spin_unlock_irqrestore(&pch->lock, flags);
@@ -2453,7 +2443,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2453 2443
2454static void pl330_issue_pending(struct dma_chan *chan) 2444static void pl330_issue_pending(struct dma_chan *chan)
2455{ 2445{
2456 pl330_tasklet((unsigned long) to_pchan(chan)); 2446 struct dma_pl330_chan *pch = to_pchan(chan);
2447 unsigned long flags;
2448
2449 spin_lock_irqsave(&pch->lock, flags);
2450 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2451 spin_unlock_irqrestore(&pch->lock, flags);
2452
2453 pl330_tasklet((unsigned long)pch);
2457} 2454}
2458 2455
2459/* 2456/*
@@ -2480,11 +2477,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2480 2477
2481 dma_cookie_assign(&desc->txd); 2478 dma_cookie_assign(&desc->txd);
2482 2479
2483 list_move_tail(&desc->node, &pch->work_list); 2480 list_move_tail(&desc->node, &pch->submitted_list);
2484 } 2481 }
2485 2482
2486 cookie = dma_cookie_assign(&last->txd); 2483 cookie = dma_cookie_assign(&last->txd);
2487 list_add_tail(&last->node, &pch->work_list); 2484 list_add_tail(&last->node, &pch->submitted_list);
2488 spin_unlock_irqrestore(&pch->lock, flags); 2485 spin_unlock_irqrestore(&pch->lock, flags);
2489 2486
2490 return cookie; 2487 return cookie;
@@ -2960,6 +2957,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2960 else 2957 else
2961 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); 2958 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
2962 2959
2960 pdmac->num_peripherals = num_chan;
2961
2963 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 2962 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
2964 if (!pdmac->peripherals) { 2963 if (!pdmac->peripherals) {
2965 ret = -ENOMEM; 2964 ret = -ENOMEM;
@@ -2974,6 +2973,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2974 else 2973 else
2975 pch->chan.private = adev->dev.of_node; 2974 pch->chan.private = adev->dev.of_node;
2976 2975
2976 INIT_LIST_HEAD(&pch->submitted_list);
2977 INIT_LIST_HEAD(&pch->work_list); 2977 INIT_LIST_HEAD(&pch->work_list);
2978 INIT_LIST_HEAD(&pch->completed_list); 2978 INIT_LIST_HEAD(&pch->completed_list);
2979 spin_lock_init(&pch->lock); 2979 spin_lock_init(&pch->lock);
@@ -3021,6 +3021,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
3021 "unable to register DMA to the generic DT DMA helpers\n"); 3021 "unable to register DMA to the generic DT DMA helpers\n");
3022 } 3022 }
3023 } 3023 }
3024
3025 adev->dev.dma_parms = &pdmac->dma_parms;
3026
3024 /* 3027 /*
3025 * This is the limit for transfers with a buswidth of 1, larger 3028 * This is the limit for transfers with a buswidth of 1, larger
3026 * buswidths will have larger limits. 3029 * buswidths will have larger limits.
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8bba298535b0..ce7a8d7564ba 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4114,6 +4114,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
4114 regs = ioremap(res.start, resource_size(&res)); 4114 regs = ioremap(res.start, resource_size(&res));
4115 if (!regs) { 4115 if (!regs) {
4116 dev_err(&ofdev->dev, "failed to ioremap regs!\n"); 4116 dev_err(&ofdev->dev, "failed to ioremap regs!\n");
4117 ret = -ENOMEM;
4117 goto err_regs_alloc; 4118 goto err_regs_alloc;
4118 } 4119 }
4119 4120
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 6aec3ad814d3..d4d3a3109b16 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
640} 640}
641EXPORT_SYMBOL(sirfsoc_dma_filter_id); 641EXPORT_SYMBOL(sirfsoc_dma_filter_id);
642 642
643#define SIRFSOC_DMA_BUSWIDTHS \
644 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
645 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
646 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
647 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
648 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
649
650static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
651 struct dma_slave_caps *caps)
652{
653 caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
654 caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
655 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
656 caps->cmd_pause = true;
657 caps->cmd_terminate = true;
658
659 return 0;
660}
661
643static int sirfsoc_dma_probe(struct platform_device *op) 662static int sirfsoc_dma_probe(struct platform_device *op)
644{ 663{
645 struct device_node *dn = op->dev.of_node; 664 struct device_node *dn = op->dev.of_node;
@@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
712 dma->device_tx_status = sirfsoc_dma_tx_status; 731 dma->device_tx_status = sirfsoc_dma_tx_status;
713 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; 732 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
714 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; 733 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
734 dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
715 735
716 INIT_LIST_HEAD(&dma->channels); 736 INIT_LIST_HEAD(&dma->channels);
717 dma_cap_set(DMA_SLAVE, dma->cap_mask); 737 dma_cap_set(DMA_SLAVE, dma->cap_mask);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d11bb3620f27..03ad64ecaaf0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -100,6 +100,11 @@
100#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 100#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
101#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 101#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
102 102
103/* Tegra148 specific registers */
104#define TEGRA_APBDMA_CHAN_WCOUNT 0x20
105
106#define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
107
103/* 108/*
104 * If any burst is in flight and DMA paused then this is the time to complete 109 * If any burst is in flight and DMA paused then this is the time to complete
105 * on-flight burst and update DMA status register. 110 * on-flight burst and update DMA status register.
@@ -109,21 +114,22 @@
109/* Channel base address offset from APBDMA base address */ 114/* Channel base address offset from APBDMA base address */
110#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 115#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
111 116
112/* DMA channel register space size */
113#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
114
115struct tegra_dma; 117struct tegra_dma;
116 118
117/* 119/*
118 * tegra_dma_chip_data Tegra chip specific DMA data 120 * tegra_dma_chip_data Tegra chip specific DMA data
119 * @nr_channels: Number of channels available in the controller. 121 * @nr_channels: Number of channels available in the controller.
122 * @channel_reg_size: Channel register size/stride.
120 * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 123 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
121 * @support_channel_pause: Support channel wise pause of dma. 124 * @support_channel_pause: Support channel wise pause of dma.
125 * @support_separate_wcount_reg: Support separate word count register.
122 */ 126 */
123struct tegra_dma_chip_data { 127struct tegra_dma_chip_data {
124 int nr_channels; 128 int nr_channels;
129 int channel_reg_size;
125 int max_dma_count; 130 int max_dma_count;
126 bool support_channel_pause; 131 bool support_channel_pause;
132 bool support_separate_wcount_reg;
127}; 133};
128 134
129/* DMA channel registers */ 135/* DMA channel registers */
@@ -133,6 +139,7 @@ struct tegra_dma_channel_regs {
133 unsigned long apb_ptr; 139 unsigned long apb_ptr;
134 unsigned long ahb_seq; 140 unsigned long ahb_seq;
135 unsigned long apb_seq; 141 unsigned long apb_seq;
142 unsigned long wcount;
136}; 143};
137 144
138/* 145/*
@@ -426,6 +433,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
426 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 433 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
427 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 434 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 435 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
436 if (tdc->tdma->chip_data->support_separate_wcount_reg)
437 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
429 438
430 /* Start DMA */ 439 /* Start DMA */
431 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 440 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
@@ -465,6 +474,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
465 /* Safe to program new configuration */ 474 /* Safe to program new configuration */
466 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 475 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
467 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 476 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
477 if (tdc->tdma->chip_data->support_separate_wcount_reg)
478 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
479 nsg_req->ch_regs.wcount);
468 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 480 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
469 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 481 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
470 nsg_req->configured = true; 482 nsg_req->configured = true;
@@ -718,6 +730,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
718 struct tegra_dma_desc *dma_desc; 730 struct tegra_dma_desc *dma_desc;
719 unsigned long flags; 731 unsigned long flags;
720 unsigned long status; 732 unsigned long status;
733 unsigned long wcount;
721 bool was_busy; 734 bool was_busy;
722 735
723 spin_lock_irqsave(&tdc->lock, flags); 736 spin_lock_irqsave(&tdc->lock, flags);
@@ -738,6 +751,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
738 tdc->isr_handler(tdc, true); 751 tdc->isr_handler(tdc, true);
739 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 752 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
740 } 753 }
754 if (tdc->tdma->chip_data->support_separate_wcount_reg)
755 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
756 else
757 wcount = status;
741 758
742 was_busy = tdc->busy; 759 was_busy = tdc->busy;
743 tegra_dma_stop(tdc); 760 tegra_dma_stop(tdc);
@@ -746,7 +763,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
746 sgreq = list_first_entry(&tdc->pending_sg_req, 763 sgreq = list_first_entry(&tdc->pending_sg_req,
747 typeof(*sgreq), node); 764 typeof(*sgreq), node);
748 sgreq->dma_desc->bytes_transferred += 765 sgreq->dma_desc->bytes_transferred +=
749 get_current_xferred_count(tdc, sgreq, status); 766 get_current_xferred_count(tdc, sgreq, wcount);
750 } 767 }
751 tegra_dma_resume(tdc); 768 tegra_dma_resume(tdc);
752 769
@@ -908,6 +925,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
908 return -EINVAL; 925 return -EINVAL;
909} 926}
910 927
928static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
929 struct tegra_dma_channel_regs *ch_regs, u32 len)
930{
931 u32 len_field = (len - 4) & 0xFFFC;
932
933 if (tdc->tdma->chip_data->support_separate_wcount_reg)
934 ch_regs->wcount = len_field;
935 else
936 ch_regs->csr |= len_field;
937}
938
911static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( 939static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
912 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, 940 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
913 enum dma_transfer_direction direction, unsigned long flags, 941 enum dma_transfer_direction direction, unsigned long flags,
@@ -991,7 +1019,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
991 1019
992 sg_req->ch_regs.apb_ptr = apb_ptr; 1020 sg_req->ch_regs.apb_ptr = apb_ptr;
993 sg_req->ch_regs.ahb_ptr = mem; 1021 sg_req->ch_regs.ahb_ptr = mem;
994 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1022 sg_req->ch_regs.csr = csr;
1023 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
995 sg_req->ch_regs.apb_seq = apb_seq; 1024 sg_req->ch_regs.apb_seq = apb_seq;
996 sg_req->ch_regs.ahb_seq = ahb_seq; 1025 sg_req->ch_regs.ahb_seq = ahb_seq;
997 sg_req->configured = false; 1026 sg_req->configured = false;
@@ -1120,7 +1149,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1120 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1149 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1121 sg_req->ch_regs.apb_ptr = apb_ptr; 1150 sg_req->ch_regs.apb_ptr = apb_ptr;
1122 sg_req->ch_regs.ahb_ptr = mem; 1151 sg_req->ch_regs.ahb_ptr = mem;
1123 sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1152 sg_req->ch_regs.csr = csr;
1153 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1124 sg_req->ch_regs.apb_seq = apb_seq; 1154 sg_req->ch_regs.apb_seq = apb_seq;
1125 sg_req->ch_regs.ahb_seq = ahb_seq; 1155 sg_req->ch_regs.ahb_seq = ahb_seq;
1126 sg_req->configured = false; 1156 sg_req->configured = false;
@@ -1234,27 +1264,45 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1234/* Tegra20 specific DMA controller information */ 1264/* Tegra20 specific DMA controller information */
1235static const struct tegra_dma_chip_data tegra20_dma_chip_data = { 1265static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1236 .nr_channels = 16, 1266 .nr_channels = 16,
1267 .channel_reg_size = 0x20,
1237 .max_dma_count = 1024UL * 64, 1268 .max_dma_count = 1024UL * 64,
1238 .support_channel_pause = false, 1269 .support_channel_pause = false,
1270 .support_separate_wcount_reg = false,
1239}; 1271};
1240 1272
1241/* Tegra30 specific DMA controller information */ 1273/* Tegra30 specific DMA controller information */
1242static const struct tegra_dma_chip_data tegra30_dma_chip_data = { 1274static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1243 .nr_channels = 32, 1275 .nr_channels = 32,
1276 .channel_reg_size = 0x20,
1244 .max_dma_count = 1024UL * 64, 1277 .max_dma_count = 1024UL * 64,
1245 .support_channel_pause = false, 1278 .support_channel_pause = false,
1279 .support_separate_wcount_reg = false,
1246}; 1280};
1247 1281
1248/* Tegra114 specific DMA controller information */ 1282/* Tegra114 specific DMA controller information */
1249static const struct tegra_dma_chip_data tegra114_dma_chip_data = { 1283static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1250 .nr_channels = 32, 1284 .nr_channels = 32,
1285 .channel_reg_size = 0x20,
1251 .max_dma_count = 1024UL * 64, 1286 .max_dma_count = 1024UL * 64,
1252 .support_channel_pause = true, 1287 .support_channel_pause = true,
1288 .support_separate_wcount_reg = false,
1289};
1290
1291/* Tegra148 specific DMA controller information */
1292static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1293 .nr_channels = 32,
1294 .channel_reg_size = 0x40,
1295 .max_dma_count = 1024UL * 64,
1296 .support_channel_pause = true,
1297 .support_separate_wcount_reg = true,
1253}; 1298};
1254 1299
1255 1300
1256static const struct of_device_id tegra_dma_of_match[] = { 1301static const struct of_device_id tegra_dma_of_match[] = {
1257 { 1302 {
1303 .compatible = "nvidia,tegra148-apbdma",
1304 .data = &tegra148_dma_chip_data,
1305 }, {
1258 .compatible = "nvidia,tegra114-apbdma", 1306 .compatible = "nvidia,tegra114-apbdma",
1259 .data = &tegra114_dma_chip_data, 1307 .data = &tegra114_dma_chip_data,
1260 }, { 1308 }, {
@@ -1348,7 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
1348 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1396 struct tegra_dma_channel *tdc = &tdma->channels[i];
1349 1397
1350 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1398 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1351 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1399 i * cdata->channel_reg_size;
1352 1400
1353 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1401 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1354 if (!res) { 1402 if (!res) {
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 85c19d63f9fb..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
84static inline void vchan_cookie_complete(struct virt_dma_desc *vd) 84static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
85{ 85{
86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
87 dma_cookie_t cookie;
87 88
89 cookie = vd->tx.cookie;
88 dma_cookie_complete(&vd->tx); 90 dma_cookie_complete(&vd->tx);
89 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", 91 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
90 vd, vd->tx.cookie); 92 vd, cookie);
91 list_add_tail(&vd->node, &vc->desc_completed); 93 list_add_tail(&vd->node, &vc->desc_completed);
92 94
93 tasklet_schedule(&vc->task); 95 tasklet_schedule(&vc->task);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6fd9390ccf91..c5c92d59e531 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -257,7 +257,7 @@ struct dma_chan_percpu {
257 * @dev: class device for sysfs 257 * @dev: class device for sysfs
258 * @device_node: used to add this to the device chan list 258 * @device_node: used to add this to the device chan list
259 * @local: per-cpu pointer to a struct dma_chan_percpu 259 * @local: per-cpu pointer to a struct dma_chan_percpu
260 * @client-count: how many clients are using this channel 260 * @client_count: how many clients are using this channel
261 * @table_count: number of appearances in the mem-to-mem allocation table 261 * @table_count: number of appearances in the mem-to-mem allocation table
262 * @private: private data for certain client-channel associations 262 * @private: private data for certain client-channel associations
263 */ 263 */
@@ -279,10 +279,10 @@ struct dma_chan {
279 279
280/** 280/**
281 * struct dma_chan_dev - relate sysfs device node to backing channel device 281 * struct dma_chan_dev - relate sysfs device node to backing channel device
282 * @chan - driver channel device 282 * @chan: driver channel device
283 * @device - sysfs device 283 * @device: sysfs device
284 * @dev_id - parent dma_device dev_id 284 * @dev_id: parent dma_device dev_id
285 * @idr_ref - reference count to gate release of dma_device dev_id 285 * @idr_ref: reference count to gate release of dma_device dev_id
286 */ 286 */
287struct dma_chan_dev { 287struct dma_chan_dev {
288 struct dma_chan *chan; 288 struct dma_chan *chan;
@@ -306,9 +306,8 @@ enum dma_slave_buswidth {
306/** 306/**
307 * struct dma_slave_config - dma slave channel runtime config 307 * struct dma_slave_config - dma slave channel runtime config
308 * @direction: whether the data shall go in or out on this slave 308 * @direction: whether the data shall go in or out on this slave
309 * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are 309 * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
310 * legal values, DMA_BIDIRECTIONAL is not acceptable since we 310 * legal values.
311 * need to differentiate source and target addresses.
312 * @src_addr: this is the physical address where DMA slave data 311 * @src_addr: this is the physical address where DMA slave data
313 * should be read (RX), if the source is memory this argument is 312 * should be read (RX), if the source is memory this argument is
314 * ignored. 313 * ignored.
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
index 3a3942823c20..eabac4e2fc99 100644
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -43,6 +43,11 @@ struct sdma_script_start_addrs {
43 s32 dptc_dvfs_addr; 43 s32 dptc_dvfs_addr;
44 s32 utra_addr; 44 s32 utra_addr;
45 s32 ram_code_start_addr; 45 s32 ram_code_start_addr;
46 /* End of v1 array */
47 s32 mcu_2_ssish_addr;
48 s32 ssish_2_mcu_addr;
49 s32 hdmi_dma_addr;
50 /* End of v2 array */
46}; 51};
47 52
48/** 53/**
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index beac6b8b6a7b..bcbc6c3c14c0 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -39,6 +39,7 @@ enum sdma_peripheral_type {
39 IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ 39 IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
40 IMX_DMATYPE_ASRC, /* ASRC */ 40 IMX_DMATYPE_ASRC, /* ASRC */
41 IMX_DMATYPE_ESAI, /* ESAI */ 41 IMX_DMATYPE_ESAI, /* ESAI */
42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
42}; 43};
43 44
44enum imx_dma_prio { 45enum imx_dma_prio {
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
index 239e0fc1bb1f..66574ea39f97 100644
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/arch/arm/mach-mmp/include/mach/sram.h
3 *
4 * SRAM Memory Management 2 * SRAM Memory Management
5 * 3 *
6 * Copyright (c) 2011 Marvell Semiconductors Inc. 4 * Copyright (c) 2011 Marvell Semiconductors Inc.
@@ -11,8 +9,8 @@
11 * 9 *
12 */ 10 */
13 11
14#ifndef __ASM_ARCH_SRAM_H 12#ifndef __DMA_MMP_TDMA_H
15#define __ASM_ARCH_SRAM_H 13#define __DMA_MMP_TDMA_H
16 14
17#include <linux/genalloc.h> 15#include <linux/genalloc.h>
18 16
@@ -32,4 +30,4 @@ struct sram_platdata {
32 30
33extern struct gen_pool *sram_get_gpool(char *pool_name); 31extern struct gen_pool *sram_get_gpool(char *pool_name);
34 32
35#endif /* __ASM_ARCH_SRAM_H */ 33#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
index 8ec18f64e396..92ffd3245f76 100644
--- a/include/linux/platform_data/dma-mv_xor.h
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -1,11 +1,9 @@
1/* 1/*
2 * arch/arm/plat-orion/include/plat/mv_xor.h
3 *
4 * Marvell XOR platform device data definition file. 2 * Marvell XOR platform device data definition file.
5 */ 3 */
6 4
7#ifndef __PLAT_MV_XOR_H 5#ifndef __DMA_MV_XOR_H
8#define __PLAT_MV_XOR_H 6#define __DMA_MV_XOR_H
9 7
10#include <linux/dmaengine.h> 8#include <linux/dmaengine.h>
11#include <linux/mbus.h> 9#include <linux/mbus.h>
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index f9090b167ad7..6404e1ef20d0 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -164,6 +164,7 @@ struct fsl_ssi_private {
164 bool baudclk_locked; 164 bool baudclk_locked;
165 bool irq_stats; 165 bool irq_stats;
166 bool offline_config; 166 bool offline_config;
167 bool use_dual_fifo;
167 u8 i2s_mode; 168 u8 i2s_mode;
168 spinlock_t baudclk_lock; 169 spinlock_t baudclk_lock;
169 struct clk *baudclk; 170 struct clk *baudclk;
@@ -721,6 +722,12 @@ static int fsl_ssi_setup(struct fsl_ssi_private *ssi_private)
721 CCSR_SSI_SxCCR_DC(2)); 722 CCSR_SSI_SxCCR_DC(2));
722 } 723 }
723 724
725 if (ssi_private->use_dual_fifo) {
726 write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1);
727 write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1);
728 write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN);
729 }
730
724 return 0; 731 return 0;
725} 732}
726 733
@@ -752,6 +759,15 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
752 spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags); 759 spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
753 } 760 }
754 761
762 /* When using dual fifo mode, it is safer to ensure an even period
763 * size. If appearing to an odd number while DMA always starts its
764 * task from fifo0, fifo1 would be neglected at the end of each
765 * period. But SSI would still access fifo1 with an invalid data.
766 */
767 if (ssi_private->use_dual_fifo)
768 snd_pcm_hw_constraint_step(substream->runtime, 0,
769 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
770
755 return 0; 771 return 0;
756} 772}
757 773
@@ -1370,7 +1386,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1370 1386
1371 if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 || 1387 if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
1372 hw_type == FSL_SSI_MX35) { 1388 hw_type == FSL_SSI_MX35) {
1373 u32 dma_events[2]; 1389 u32 dma_events[2], dmas[4];
1374 ssi_private->ssi_on_imx = true; 1390 ssi_private->ssi_on_imx = true;
1375 1391
1376 ssi_private->clk = devm_clk_get(&pdev->dev, NULL); 1392 ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1426,6 +1442,16 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1426 goto error_clk; 1442 goto error_clk;
1427 } 1443 }
1428 } 1444 }
1445 /* Should this be merge with the above? */
1446 if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4)
1447 && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
1448 ssi_private->use_dual_fifo = true;
1449 /* When using dual fifo mode, we need to keep watermark
1450 * as even numbers due to dma script limitation.
1451 */
1452 ssi_private->dma_params_tx.maxburst &= ~0x1;
1453 ssi_private->dma_params_rx.maxburst &= ~0x1;
1454 }
1429 1455
1430 shared = of_device_is_compatible(of_get_parent(np), 1456 shared = of_device_is_compatible(of_get_parent(np),
1431 "fsl,spba-bus"); 1457 "fsl,spba-bus");