aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-edma.txt76
-rw-r--r--Documentation/devicetree/bindings/dma/sirfsoc-dma.txt43
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi2
-rw-r--r--drivers/dma/Kconfig12
-rw-r--r--drivers/dma/acpi-dma.c17
-rw-r--r--drivers/dma/at_hdmac.c1
-rw-r--r--drivers/dma/cppi41.c7
-rw-r--r--drivers/dma/dmaengine.c9
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c21
-rw-r--r--drivers/dma/dw/pci.c36
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/fsl-edma.c975
-rw-r--r--drivers/dma/imx-dma.c13
-rw-r--r--drivers/dma/mmp_pdma.c8
-rw-r--r--drivers/dma/mmp_tdma.c50
-rw-r--r--drivers/dma/omap-dma.c18
-rw-r--r--drivers/dma/pch_dma.c4
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-audmapp.c320
-rw-r--r--drivers/dma/sh/shdma-base.c10
-rw-r--r--drivers/dma/sh/shdma-of.c3
-rw-r--r--drivers/dma/sh/shdmac.c13
-rw-r--r--drivers/dma/sh/sudmac.c4
-rw-r--r--drivers/dma/sirf-dma.c23
-rw-r--r--drivers/usb/musb/musb_cppi41.c3
-rw-r--r--include/linux/acpi_dma.h5
-rw-r--r--include/linux/dmaengine.h14
-rw-r--r--include/linux/dw_dmac.h5
-rw-r--r--include/linux/platform_data/dma-rcar-audmapp.h34
34 files changed, 1676 insertions, 74 deletions
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
new file mode 100644
index 000000000000..191d7bd8a6fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt
@@ -0,0 +1,76 @@
1* Freescale enhanced Direct Memory Access(eDMA) Controller
2
3 The eDMA channels have multiplex capability by programmble memory-mapped
4registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
5specific DMA request source can only be multiplexed by any channel of certain
6group, DMAMUX0 or DMAMUX1, but not both.
7
8* eDMA Controller
9Required properties:
10- compatible :
11 - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
12- reg : Specifies base physical address(s) and size of the eDMA registers.
13 The 1st region is eDMA control register's address and size.
14 The 2nd and the 3rd regions are programmable channel multiplexing
15 control register's address and size.
16- interrupts : A list of interrupt-specifiers, one for each entry in
17 interrupt-names.
18- interrupt-names : Should contain:
19 "edma-tx" - the transmission interrupt
20 "edma-err" - the error interrupt
21- #dma-cells : Must be <2>.
22 The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
23 Specific request source can only be multiplexed by specific channels
24 group called DMAMUX.
25 The 2nd cell specifies the request source(slot) ID.
26 See the SoC's reference manual for all the supported request sources.
27- dma-channels : Number of channels supported by the controller
28- clock-names : A list of channel group clock names. Should contain:
29 "dmamux0" - clock name of mux0 group
30 "dmamux1" - clock name of mux1 group
31- clocks : A list of phandle and clock-specifier pairs, one for each entry in
32 clock-names.
33
34Optional properties:
35- big-endian: If present registers and hardware scatter/gather descriptors
36 of the eDMA are implemented in big endian mode, otherwise in little
37 mode.
38
39
40Examples:
41
42edma0: dma-controller@40018000 {
43 #dma-cells = <2>;
44 compatible = "fsl,vf610-edma";
45 reg = <0x40018000 0x2000>,
46 <0x40024000 0x1000>,
47 <0x40025000 0x1000>;
48 interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
49 <0 9 IRQ_TYPE_LEVEL_HIGH>;
50 interrupt-names = "edma-tx", "edma-err";
51 dma-channels = <32>;
52 clock-names = "dmamux0", "dmamux1";
53 clocks = <&clks VF610_CLK_DMAMUX0>,
54 <&clks VF610_CLK_DMAMUX1>;
55};
56
57
58* DMA clients
59DMA client drivers that uses the DMA function must use the format described
60in the dma.txt file, using a two-cell specifier for each channel: the 1st
61specifies the channel group(DMAMUX) in which this request can be multiplexed,
62and the 2nd specifies the request source.
63
64Examples:
65
66sai2: sai@40031000 {
67 compatible = "fsl,vf610-sai";
68 reg = <0x40031000 0x1000>;
69 interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
70 clock-names = "sai";
71 clocks = <&clks VF610_CLK_SAI2>;
72 dma-names = "tx", "rx";
73 dmas = <&edma0 0 21>,
74 <&edma0 0 20>;
75 status = "disabled";
76};
diff --git a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
new file mode 100644
index 000000000000..ecbc96ad36f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
@@ -0,0 +1,43 @@
1* CSR SiRFSoC DMA controller
2
3See dma.txt first
4
5Required properties:
6- compatible: Should be "sirf,prima2-dmac" or "sirf,marco-dmac"
7- reg: Should contain DMA registers location and length.
8- interrupts: Should contain one interrupt shared by all channel
9- #dma-cells: must be <1>. used to represent the number of integer
10 cells in the dmas property of client device.
11- clocks: clock required
12
13Example:
14
15Controller:
16dmac0: dma-controller@b00b0000 {
17 compatible = "sirf,prima2-dmac";
18 reg = <0xb00b0000 0x10000>;
19 interrupts = <12>;
20 clocks = <&clks 24>;
21 #dma-cells = <1>;
22};
23
24
25Client:
26Fill the specific dma request line in dmas. In the below example, spi0 read
27channel request line is 9 of the 2nd dma controller, while write channel uses
284 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st
29dma controller, while write channel uses 13 of the 1st dma controller:
30
31spi0: spi@b00d0000 {
32 compatible = "sirf,prima2-spi";
33 dmas = <&dmac1 9>,
34 <&dmac1 4>;
35 dma-names = "rx", "tx";
36};
37
38spi1: spi@b0170000 {
39 compatible = "sirf,prima2-spi";
40 dmas = <&dmac0 12>,
41 <&dmac0 13>;
42 dma-names = "rx", "tx";
43};
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index f8674bcc4489..ec23b5c86fb7 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -269,6 +269,7 @@
269 reg = <0xb00b0000 0x10000>; 269 reg = <0xb00b0000 0x10000>;
270 interrupts = <12>; 270 interrupts = <12>;
271 clocks = <&clks 24>; 271 clocks = <&clks 24>;
272 #dma-cells = <1>;
272 }; 273 };
273 274
274 dmac1: dma-controller@b0160000 { 275 dmac1: dma-controller@b0160000 {
@@ -277,6 +278,7 @@
277 reg = <0xb0160000 0x10000>; 278 reg = <0xb0160000 0x10000>;
278 interrupts = <13>; 279 interrupts = <13>;
279 clocks = <&clks 25>; 280 clocks = <&clks 25>;
281 #dma-cells = <1>;
280 }; 282 };
281 283
282 vip@b00C0000 { 284 vip@b00C0000 {
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 0e219932d7cc..3a15dee2e8ab 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -286,6 +286,7 @@
286 reg = <0xb00b0000 0x10000>; 286 reg = <0xb00b0000 0x10000>;
287 interrupts = <12>; 287 interrupts = <12>;
288 clocks = <&clks 24>; 288 clocks = <&clks 24>;
289 #dma-cells = <1>;
289 }; 290 };
290 291
291 dmac1: dma-controller@b0160000 { 292 dmac1: dma-controller@b0160000 {
@@ -294,6 +295,7 @@
294 reg = <0xb0160000 0x10000>; 295 reg = <0xb0160000 0x10000>;
295 interrupts = <13>; 296 interrupts = <13>;
296 clocks = <&clks 25>; 297 clocks = <&clks 25>;
298 #dma-cells = <1>;
297 }; 299 };
298 300
299 vip@b00C0000 { 301 vip@b00C0000 {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index bcdacc73b6a9..edc739f986a8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,7 +308,7 @@ config DMA_OMAP
308 308
309config DMA_BCM2835 309config DMA_BCM2835
310 tristate "BCM2835 DMA engine support" 310 tristate "BCM2835 DMA engine support"
311 depends on (ARCH_BCM2835 || MACH_BCM2708) 311 depends on ARCH_BCM2835
312 select DMA_ENGINE 312 select DMA_ENGINE
313 select DMA_VIRTUAL_CHANNELS 313 select DMA_VIRTUAL_CHANNELS
314 314
@@ -349,6 +349,16 @@ config MOXART_DMA
349 select DMA_VIRTUAL_CHANNELS 349 select DMA_VIRTUAL_CHANNELS
350 help 350 help
351 Enable support for the MOXA ART SoC DMA controller. 351 Enable support for the MOXA ART SoC DMA controller.
352
353config FSL_EDMA
354 tristate "Freescale eDMA engine support"
355 depends on OF
356 select DMA_ENGINE
357 select DMA_VIRTUAL_CHANNELS
358 help
359 Support the Freescale eDMA engine with programmable channel
360 multiplexing capability for DMA request sources(slot).
361 This module can be found on Freescale Vybrid and LS-1 SoCs.
352 362
353config DMA_ENGINE 363config DMA_ENGINE
354 bool 364 bool
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 1e506afa33f5..de361a156b34 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/err.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/list.h> 18#include <linux/list.h>
18#include <linux/mutex.h> 19#include <linux/mutex.h>
@@ -265,7 +266,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
265 */ 266 */
266void devm_acpi_dma_controller_free(struct device *dev) 267void devm_acpi_dma_controller_free(struct device *dev)
267{ 268{
268 WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); 269 WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
269} 270}
270EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); 271EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
271 272
@@ -343,7 +344,7 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
343 * @index: index of FixedDMA descriptor for @dev 344 * @index: index of FixedDMA descriptor for @dev
344 * 345 *
345 * Return: 346 * Return:
346 * Pointer to appropriate dma channel on success or NULL on error. 347 * Pointer to appropriate dma channel on success or an error pointer.
347 */ 348 */
348struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, 349struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
349 size_t index) 350 size_t index)
@@ -358,10 +359,10 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
358 359
359 /* Check if the device was enumerated by ACPI */ 360 /* Check if the device was enumerated by ACPI */
360 if (!dev || !ACPI_HANDLE(dev)) 361 if (!dev || !ACPI_HANDLE(dev))
361 return NULL; 362 return ERR_PTR(-ENODEV);
362 363
363 if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) 364 if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
364 return NULL; 365 return ERR_PTR(-ENODEV);
365 366
366 memset(&pdata, 0, sizeof(pdata)); 367 memset(&pdata, 0, sizeof(pdata));
367 pdata.index = index; 368 pdata.index = index;
@@ -376,7 +377,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
376 acpi_dev_free_resource_list(&resource_list); 377 acpi_dev_free_resource_list(&resource_list);
377 378
378 if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) 379 if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
379 return NULL; 380 return ERR_PTR(-ENODEV);
380 381
381 mutex_lock(&acpi_dma_lock); 382 mutex_lock(&acpi_dma_lock);
382 383
@@ -399,7 +400,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
399 } 400 }
400 401
401 mutex_unlock(&acpi_dma_lock); 402 mutex_unlock(&acpi_dma_lock);
402 return chan; 403 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
403} 404}
404EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); 405EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
405 406
@@ -413,7 +414,7 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
413 * the first FixedDMA descriptor is TX and second is RX. 414 * the first FixedDMA descriptor is TX and second is RX.
414 * 415 *
415 * Return: 416 * Return:
416 * Pointer to appropriate dma channel on success or NULL on error. 417 * Pointer to appropriate dma channel on success or an error pointer.
417 */ 418 */
418struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, 419struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
419 const char *name) 420 const char *name)
@@ -425,7 +426,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
425 else if (!strcmp(name, "rx")) 426 else if (!strcmp(name, "rx"))
426 index = 1; 427 index = 1;
427 else 428 else
428 return NULL; 429 return ERR_PTR(-ENODEV);
429 430
430 return acpi_dma_request_slave_chan_by_index(dev, index); 431 return acpi_dma_request_slave_chan_by_index(dev, index);
431} 432}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e2c04dc81e2a..c13a3bb0f594 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1569,7 +1569,6 @@ static int at_dma_remove(struct platform_device *pdev)
1569 1569
1570 /* Disable interrupts */ 1570 /* Disable interrupts */
1571 atc_disable_chan_irq(atdma, chan->chan_id); 1571 atc_disable_chan_irq(atdma, chan->chan_id);
1572 tasklet_disable(&atchan->tasklet);
1573 1572
1574 tasklet_kill(&atchan->tasklet); 1573 tasklet_kill(&atchan->tasklet);
1575 list_del(&chan->device_node); 1574 list_del(&chan->device_node);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c18aebf7d5aa..d028f36ae655 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -620,12 +620,15 @@ static int cppi41_stop_chan(struct dma_chan *chan)
620 u32 desc_phys; 620 u32 desc_phys;
621 int ret; 621 int ret;
622 622
623 desc_phys = lower_32_bits(c->desc_phys);
624 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
625 if (!cdd->chan_busy[desc_num])
626 return 0;
627
623 ret = cppi41_tear_down_chan(c); 628 ret = cppi41_tear_down_chan(c);
624 if (ret) 629 if (ret)
625 return ret; 630 return ret;
626 631
627 desc_phys = lower_32_bits(c->desc_phys);
628 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
629 WARN_ON(!cdd->chan_busy[desc_num]); 632 WARN_ON(!cdd->chan_busy[desc_num]);
630 cdd->chan_busy[desc_num] = NULL; 633 cdd->chan_busy[desc_num] = NULL;
631 634
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed610b497518..a886713937fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -627,18 +627,13 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
627struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 627struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
628 const char *name) 628 const char *name)
629{ 629{
630 struct dma_chan *chan;
631
632 /* If device-tree is present get slave info from here */ 630 /* If device-tree is present get slave info from here */
633 if (dev->of_node) 631 if (dev->of_node)
634 return of_dma_request_slave_channel(dev->of_node, name); 632 return of_dma_request_slave_channel(dev->of_node, name);
635 633
636 /* If device was enumerated by ACPI get slave info from here */ 634 /* If device was enumerated by ACPI get slave info from here */
637 if (ACPI_HANDLE(dev)) { 635 if (ACPI_HANDLE(dev))
638 chan = acpi_dma_request_slave_chan_by_name(dev, name); 636 return acpi_dma_request_slave_chan_by_name(dev, name);
639 if (chan)
640 return chan;
641 }
642 637
643 return ERR_PTR(-ENODEV); 638 return ERR_PTR(-ENODEV);
644} 639}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 05b6dea770a4..e27cec25c59e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -340,7 +340,7 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
340static void result(const char *err, unsigned int n, unsigned int src_off, 340static void result(const char *err, unsigned int n, unsigned int src_off,
341 unsigned int dst_off, unsigned int len, unsigned long data) 341 unsigned int dst_off, unsigned int len, unsigned long data)
342{ 342{
343 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", 343 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
344 current->comm, n, err, src_off, dst_off, len, data); 344 current->comm, n, err, src_off, dst_off, len, data);
345} 345}
346 346
@@ -348,7 +348,7 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
348 unsigned int dst_off, unsigned int len, 348 unsigned int dst_off, unsigned int len,
349 unsigned long data) 349 unsigned long data)
350{ 350{
351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", 351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
352 current->comm, n, err, src_off, dst_off, len, data); 352 current->comm, n, err, src_off, dst_off, len, data);
353} 353}
354 354
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 13ac3f240e79..cfdbb92aae1d 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -33,8 +33,8 @@
33 * of which use ARM any more). See the "Databook" from Synopsys for 33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide. 34 * information beyond what licensees probably provide.
35 * 35 *
36 * The driver has currently been tested only with the Atmel AT32AP7000, 36 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * which does not support descriptor writeback. 37 * support descriptor writeback.
38 */ 38 */
39 39
40static inline bool is_request_line_unset(struct dw_dma_chan *dwc) 40static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
@@ -1479,7 +1479,6 @@ static void dw_dma_off(struct dw_dma *dw)
1479int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) 1479int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1480{ 1480{
1481 struct dw_dma *dw; 1481 struct dw_dma *dw;
1482 size_t size;
1483 bool autocfg; 1482 bool autocfg;
1484 unsigned int dw_params; 1483 unsigned int dw_params;
1485 unsigned int nr_channels; 1484 unsigned int nr_channels;
@@ -1487,6 +1486,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1487 int err; 1486 int err;
1488 int i; 1487 int i;
1489 1488
1489 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1490 if (!dw)
1491 return -ENOMEM;
1492
1493 dw->regs = chip->regs;
1494 chip->dw = dw;
1495
1490 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1496 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1491 autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1497 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1492 1498
@@ -1509,9 +1515,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1509 else 1515 else
1510 nr_channels = pdata->nr_channels; 1516 nr_channels = pdata->nr_channels;
1511 1517
1512 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); 1518 dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
1513 dw = devm_kzalloc(chip->dev, size, GFP_KERNEL); 1519 GFP_KERNEL);
1514 if (!dw) 1520 if (!dw->chan)
1515 return -ENOMEM; 1521 return -ENOMEM;
1516 1522
1517 dw->clk = devm_clk_get(chip->dev, "hclk"); 1523 dw->clk = devm_clk_get(chip->dev, "hclk");
@@ -1519,9 +1525,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1519 return PTR_ERR(dw->clk); 1525 return PTR_ERR(dw->clk);
1520 clk_prepare_enable(dw->clk); 1526 clk_prepare_enable(dw->clk);
1521 1527
1522 dw->regs = chip->regs;
1523 chip->dw = dw;
1524
1525 /* Get hardware configuration parameters */ 1528 /* Get hardware configuration parameters */
1526 if (autocfg) { 1529 if (autocfg) {
1527 max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1530 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e89fc24b8293..fec59f1a77bb 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -75,6 +75,36 @@ static void dw_pci_remove(struct pci_dev *pdev)
75 dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); 75 dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
76} 76}
77 77
78#ifdef CONFIG_PM_SLEEP
79
80static int dw_pci_suspend_late(struct device *dev)
81{
82 struct pci_dev *pci = to_pci_dev(dev);
83 struct dw_dma_chip *chip = pci_get_drvdata(pci);
84
85 return dw_dma_suspend(chip);
86};
87
88static int dw_pci_resume_early(struct device *dev)
89{
90 struct pci_dev *pci = to_pci_dev(dev);
91 struct dw_dma_chip *chip = pci_get_drvdata(pci);
92
93 return dw_dma_resume(chip);
94};
95
96#else /* !CONFIG_PM_SLEEP */
97
98#define dw_pci_suspend_late NULL
99#define dw_pci_resume_early NULL
100
101#endif /* !CONFIG_PM_SLEEP */
102
103static const struct dev_pm_ops dw_pci_dev_pm_ops = {
104 .suspend_late = dw_pci_suspend_late,
105 .resume_early = dw_pci_resume_early,
106};
107
78static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { 108static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
79 /* Medfield */ 109 /* Medfield */
80 { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, 110 { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
@@ -83,6 +113,9 @@ static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
83 /* BayTrail */ 113 /* BayTrail */
84 { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, 114 { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
85 { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, 115 { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
116
117 /* Haswell */
118 { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
86 { } 119 { }
87}; 120};
88MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 121MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
@@ -92,6 +125,9 @@ static struct pci_driver dw_pci_driver = {
92 .id_table = dw_pci_id_table, 125 .id_table = dw_pci_id_table,
93 .probe = dw_pci_probe, 126 .probe = dw_pci_probe,
94 .remove = dw_pci_remove, 127 .remove = dw_pci_remove,
128 .driver = {
129 .pm = &dw_pci_dev_pm_ops,
130 },
95}; 131};
96 132
97module_pci_driver(dw_pci_driver); 133module_pci_driver(dw_pci_driver);
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index deb4274f80f4..bb98d3e91e8b 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,13 +252,13 @@ struct dw_dma {
252 struct tasklet_struct tasklet; 252 struct tasklet_struct tasklet;
253 struct clk *clk; 253 struct clk *clk;
254 254
255 /* channels */
256 struct dw_dma_chan *chan;
255 u8 all_chan_mask; 257 u8 all_chan_mask;
256 258
257 /* hardware configuration */ 259 /* hardware configuration */
258 unsigned char nr_masters; 260 unsigned char nr_masters;
259 unsigned char data_width[4]; 261 unsigned char data_width[4];
260
261 struct dw_dma_chan chan[0];
262}; 262};
263 263
264static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) 264static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd8da451d199..cd04eb7b182e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -539,6 +539,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
539 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 539 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
540 EDMA_SLOT_ANY); 540 EDMA_SLOT_ANY);
541 if (echan->slot[i] < 0) { 541 if (echan->slot[i] < 0) {
542 kfree(edesc);
542 dev_err(dev, "Failed to allocate slot\n"); 543 dev_err(dev, "Failed to allocate slot\n");
543 return NULL; 544 return NULL;
544 } 545 }
@@ -553,8 +554,10 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
553 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, 554 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
554 dst_addr, burst, dev_width, period_len, 555 dst_addr, burst, dev_width, period_len,
555 direction); 556 direction);
556 if (ret < 0) 557 if (ret < 0) {
558 kfree(edesc);
557 return NULL; 559 return NULL;
560 }
558 561
559 if (direction == DMA_DEV_TO_MEM) 562 if (direction == DMA_DEV_TO_MEM)
560 dst_addr += period_len; 563 dst_addr += period_len;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644
index 000000000000..381e793184ba
--- /dev/null
+++ b/drivers/dma/fsl-edma.c
@@ -0,0 +1,975 @@
1/*
2 * drivers/dma/fsl-edma.c
3 *
4 * Copyright 2013-2014 Freescale Semiconductor, Inc.
5 *
6 * Driver for the Freescale eDMA engine with flexible channel multiplexing
7 * capability for DMA request sources. The eDMA block can be found on some
8 * Vybrid and Layerscape SoCs.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/interrupt.h>
19#include <linux/clk.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_dma.h>
29
30#include "virt-dma.h"
31
32#define EDMA_CR 0x00
33#define EDMA_ES 0x04
34#define EDMA_ERQ 0x0C
35#define EDMA_EEI 0x14
36#define EDMA_SERQ 0x1B
37#define EDMA_CERQ 0x1A
38#define EDMA_SEEI 0x19
39#define EDMA_CEEI 0x18
40#define EDMA_CINT 0x1F
41#define EDMA_CERR 0x1E
42#define EDMA_SSRT 0x1D
43#define EDMA_CDNE 0x1C
44#define EDMA_INTR 0x24
45#define EDMA_ERR 0x2C
46
47#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
48#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
49#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
50#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
51#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
52#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
53#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
54#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
55#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
56#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
57#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
58#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
59#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
60
61#define EDMA_CR_EDBG BIT(1)
62#define EDMA_CR_ERCA BIT(2)
63#define EDMA_CR_ERGA BIT(3)
64#define EDMA_CR_HOE BIT(4)
65#define EDMA_CR_HALT BIT(5)
66#define EDMA_CR_CLM BIT(6)
67#define EDMA_CR_EMLM BIT(7)
68#define EDMA_CR_ECX BIT(16)
69#define EDMA_CR_CX BIT(17)
70
71#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
72#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
73#define EDMA_CINT_CINT(x) ((x) & 0x1F)
74#define EDMA_CERR_CERR(x) ((x) & 0x1F)
75
76#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
77#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
78#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
79#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
80#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
81#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
82#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
83#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
84#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
85#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
86#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
87#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
88#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
89#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
90
91#define EDMA_TCD_SOFF_SOFF(x) (x)
92#define EDMA_TCD_NBYTES_NBYTES(x) (x)
93#define EDMA_TCD_SLAST_SLAST(x) (x)
94#define EDMA_TCD_DADDR_DADDR(x) (x)
95#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
96#define EDMA_TCD_DOFF_DOFF(x) (x)
97#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
98#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
99
100#define EDMA_TCD_CSR_START BIT(0)
101#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
102#define EDMA_TCD_CSR_INT_HALF BIT(2)
103#define EDMA_TCD_CSR_D_REQ BIT(3)
104#define EDMA_TCD_CSR_E_SG BIT(4)
105#define EDMA_TCD_CSR_E_LINK BIT(5)
106#define EDMA_TCD_CSR_ACTIVE BIT(6)
107#define EDMA_TCD_CSR_DONE BIT(7)
108
109#define EDMAMUX_CHCFG_DIS 0x0
110#define EDMAMUX_CHCFG_ENBL 0x80
111#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
112
113#define DMAMUX_NR 2
114
115#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119
120struct fsl_edma_hw_tcd {
121 u32 saddr;
122 u16 soff;
123 u16 attr;
124 u32 nbytes;
125 u32 slast;
126 u32 daddr;
127 u16 doff;
128 u16 citer;
129 u32 dlast_sga;
130 u16 csr;
131 u16 biter;
132};
133
134struct fsl_edma_sw_tcd {
135 dma_addr_t ptcd;
136 struct fsl_edma_hw_tcd *vtcd;
137};
138
139struct fsl_edma_slave_config {
140 enum dma_transfer_direction dir;
141 enum dma_slave_buswidth addr_width;
142 u32 dev_addr;
143 u32 burst;
144 u32 attr;
145};
146
147struct fsl_edma_chan {
148 struct virt_dma_chan vchan;
149 enum dma_status status;
150 struct fsl_edma_engine *edma;
151 struct fsl_edma_desc *edesc;
152 struct fsl_edma_slave_config fsc;
153 struct dma_pool *tcd_pool;
154};
155
156struct fsl_edma_desc {
157 struct virt_dma_desc vdesc;
158 struct fsl_edma_chan *echan;
159 bool iscyclic;
160 unsigned int n_tcds;
161 struct fsl_edma_sw_tcd tcd[];
162};
163
164struct fsl_edma_engine {
165 struct dma_device dma_dev;
166 void __iomem *membase;
167 void __iomem *muxbase[DMAMUX_NR];
168 struct clk *muxclk[DMAMUX_NR];
169 struct mutex fsl_edma_mutex;
170 u32 n_chans;
171 int txirq;
172 int errirq;
173 bool big_endian;
174 struct fsl_edma_chan chans[];
175};
176
177/*
178 * R/W functions for big- or little-endian registers
179 * the eDMA controller's endian is independent of the CPU core's endian.
180 */
181
182static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
183{
184 if (edma->big_endian)
185 return ioread16be(addr);
186 else
187 return ioread16(addr);
188}
189
190static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
191{
192 if (edma->big_endian)
193 return ioread32be(addr);
194 else
195 return ioread32(addr);
196}
197
198static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
199{
200 iowrite8(val, addr);
201}
202
203static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
204{
205 if (edma->big_endian)
206 iowrite16be(val, addr);
207 else
208 iowrite16(val, addr);
209}
210
211static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
212{
213 if (edma->big_endian)
214 iowrite32be(val, addr);
215 else
216 iowrite32(val, addr);
217}
218
219static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
220{
221 return container_of(chan, struct fsl_edma_chan, vchan.chan);
222}
223
224static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
225{
226 return container_of(vd, struct fsl_edma_desc, vdesc);
227}
228
229static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
230{
231 void __iomem *addr = fsl_chan->edma->membase;
232 u32 ch = fsl_chan->vchan.chan.chan_id;
233
234 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
235 edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
236}
237
238static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
239{
240 void __iomem *addr = fsl_chan->edma->membase;
241 u32 ch = fsl_chan->vchan.chan.chan_id;
242
243 edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
244 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
245}
246
247static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
248 unsigned int slot, bool enable)
249{
250 u32 ch = fsl_chan->vchan.chan.chan_id;
251 void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
252 unsigned chans_per_mux, ch_off;
253
254 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
255 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
256
257 if (enable)
258 edma_writeb(fsl_chan->edma,
259 EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
260 muxaddr + ch_off);
261 else
262 edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
263}
264
265static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
266{
267 switch (addr_width) {
268 case 1:
269 return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
270 case 2:
271 return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
272 case 4:
273 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
274 case 8:
275 return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
276 default:
277 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
278 }
279}
280
281static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
282{
283 struct fsl_edma_desc *fsl_desc;
284 int i;
285
286 fsl_desc = to_fsl_edma_desc(vdesc);
287 for (i = 0; i < fsl_desc->n_tcds; i++)
288 dma_pool_free(fsl_desc->echan->tcd_pool,
289 fsl_desc->tcd[i].vtcd,
290 fsl_desc->tcd[i].ptcd);
291 kfree(fsl_desc);
292}
293
294static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
295 unsigned long arg)
296{
297 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
298 struct dma_slave_config *cfg = (void *)arg;
299 unsigned long flags;
300 LIST_HEAD(head);
301
302 switch (cmd) {
303 case DMA_TERMINATE_ALL:
304 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
305 fsl_edma_disable_request(fsl_chan);
306 fsl_chan->edesc = NULL;
307 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
308 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
309 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
310 return 0;
311
312 case DMA_SLAVE_CONFIG:
313 fsl_chan->fsc.dir = cfg->direction;
314 if (cfg->direction == DMA_DEV_TO_MEM) {
315 fsl_chan->fsc.dev_addr = cfg->src_addr;
316 fsl_chan->fsc.addr_width = cfg->src_addr_width;
317 fsl_chan->fsc.burst = cfg->src_maxburst;
318 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
319 } else if (cfg->direction == DMA_MEM_TO_DEV) {
320 fsl_chan->fsc.dev_addr = cfg->dst_addr;
321 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
322 fsl_chan->fsc.burst = cfg->dst_maxburst;
323 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
324 } else {
325 return -EINVAL;
326 }
327 return 0;
328
329 case DMA_PAUSE:
330 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
331 if (fsl_chan->edesc) {
332 fsl_edma_disable_request(fsl_chan);
333 fsl_chan->status = DMA_PAUSED;
334 }
335 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
336 return 0;
337
338 case DMA_RESUME:
339 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
340 if (fsl_chan->edesc) {
341 fsl_edma_enable_request(fsl_chan);
342 fsl_chan->status = DMA_IN_PROGRESS;
343 }
344 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
345 return 0;
346
347 default:
348 return -ENXIO;
349 }
350}
351
352static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
353 struct virt_dma_desc *vdesc, bool in_progress)
354{
355 struct fsl_edma_desc *edesc = fsl_chan->edesc;
356 void __iomem *addr = fsl_chan->edma->membase;
357 u32 ch = fsl_chan->vchan.chan.chan_id;
358 enum dma_transfer_direction dir = fsl_chan->fsc.dir;
359 dma_addr_t cur_addr, dma_addr;
360 size_t len, size;
361 int i;
362
363 /* calculate the total size in this desc */
364 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
365 len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
366 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
367
368 if (!in_progress)
369 return len;
370
371 if (dir == DMA_MEM_TO_DEV)
372 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
373 else
374 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
375
376 /* figure out the finished and calculate the residue */
377 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
378 size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
379 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
380 if (dir == DMA_MEM_TO_DEV)
381 dma_addr = edma_readl(fsl_chan->edma,
382 &(edesc->tcd[i].vtcd->saddr));
383 else
384 dma_addr = edma_readl(fsl_chan->edma,
385 &(edesc->tcd[i].vtcd->daddr));
386
387 len -= size;
388 if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
389 len += dma_addr + size - cur_addr;
390 break;
391 }
392 }
393
394 return len;
395}
396
397static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
398 dma_cookie_t cookie, struct dma_tx_state *txstate)
399{
400 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
401 struct virt_dma_desc *vdesc;
402 enum dma_status status;
403 unsigned long flags;
404
405 status = dma_cookie_status(chan, cookie, txstate);
406 if (status == DMA_COMPLETE)
407 return status;
408
409 if (!txstate)
410 return fsl_chan->status;
411
412 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
413 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
414 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
415 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
416 else if (vdesc)
417 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
418 else
419 txstate->residue = 0;
420
421 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
422
423 return fsl_chan->status;
424}
425
426static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
427 u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
428 u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
429 u16 csr)
430{
431 void __iomem *addr = fsl_chan->edma->membase;
432 u32 ch = fsl_chan->vchan.chan.chan_id;
433
434 /*
435 * TCD parameters have been swapped in fill_tcd_params(),
436 * so just write them to registers in the cpu endian here
437 */
438 writew(0, addr + EDMA_TCD_CSR(ch));
439 writel(src, addr + EDMA_TCD_SADDR(ch));
440 writel(dst, addr + EDMA_TCD_DADDR(ch));
441 writew(attr, addr + EDMA_TCD_ATTR(ch));
442 writew(soff, addr + EDMA_TCD_SOFF(ch));
443 writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
444 writel(slast, addr + EDMA_TCD_SLAST(ch));
445 writew(citer, addr + EDMA_TCD_CITER(ch));
446 writew(biter, addr + EDMA_TCD_BITER(ch));
447 writew(doff, addr + EDMA_TCD_DOFF(ch));
448 writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
449 writew(csr, addr + EDMA_TCD_CSR(ch));
450}
451
452static void fill_tcd_params(struct fsl_edma_engine *edma,
453 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
454 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
455 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
456 bool disable_req, bool enable_sg)
457{
458 u16 csr = 0;
459
460 /*
461 * eDMA hardware SGs require the TCD parameters stored in memory
462 * the same endian as the eDMA module so that they can be loaded
463 * automatically by the engine
464 */
465 edma_writel(edma, src, &(tcd->saddr));
466 edma_writel(edma, dst, &(tcd->daddr));
467 edma_writew(edma, attr, &(tcd->attr));
468 edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
469 edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
470 edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
471 edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
472 edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
473 edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
474 edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
475 if (major_int)
476 csr |= EDMA_TCD_CSR_INT_MAJOR;
477
478 if (disable_req)
479 csr |= EDMA_TCD_CSR_D_REQ;
480
481 if (enable_sg)
482 csr |= EDMA_TCD_CSR_E_SG;
483
484 edma_writew(edma, csr, &(tcd->csr));
485}
486
487static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
488 int sg_len)
489{
490 struct fsl_edma_desc *fsl_desc;
491 int i;
492
493 fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
494 GFP_NOWAIT);
495 if (!fsl_desc)
496 return NULL;
497
498 fsl_desc->echan = fsl_chan;
499 fsl_desc->n_tcds = sg_len;
500 for (i = 0; i < sg_len; i++) {
501 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
502 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
503 if (!fsl_desc->tcd[i].vtcd)
504 goto err;
505 }
506 return fsl_desc;
507
508err:
509 while (--i >= 0)
510 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
511 fsl_desc->tcd[i].ptcd);
512 kfree(fsl_desc);
513 return NULL;
514}
515
516static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
517 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
518 size_t period_len, enum dma_transfer_direction direction,
519 unsigned long flags, void *context)
520{
521 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
522 struct fsl_edma_desc *fsl_desc;
523 dma_addr_t dma_buf_next;
524 int sg_len, i;
525 u32 src_addr, dst_addr, last_sg, nbytes;
526 u16 soff, doff, iter;
527
528 if (!is_slave_direction(fsl_chan->fsc.dir))
529 return NULL;
530
531 sg_len = buf_len / period_len;
532 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
533 if (!fsl_desc)
534 return NULL;
535 fsl_desc->iscyclic = true;
536
537 dma_buf_next = dma_addr;
538 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
539 iter = period_len / nbytes;
540
541 for (i = 0; i < sg_len; i++) {
542 if (dma_buf_next >= dma_addr + buf_len)
543 dma_buf_next = dma_addr;
544
545 /* get next sg's physical address */
546 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
547
548 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
549 src_addr = dma_buf_next;
550 dst_addr = fsl_chan->fsc.dev_addr;
551 soff = fsl_chan->fsc.addr_width;
552 doff = 0;
553 } else {
554 src_addr = fsl_chan->fsc.dev_addr;
555 dst_addr = dma_buf_next;
556 soff = 0;
557 doff = fsl_chan->fsc.addr_width;
558 }
559
560 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
561 dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
562 iter, iter, doff, last_sg, true, false, true);
563 dma_buf_next += period_len;
564 }
565
566 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
567}
568
569static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
570 struct dma_chan *chan, struct scatterlist *sgl,
571 unsigned int sg_len, enum dma_transfer_direction direction,
572 unsigned long flags, void *context)
573{
574 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
575 struct fsl_edma_desc *fsl_desc;
576 struct scatterlist *sg;
577 u32 src_addr, dst_addr, last_sg, nbytes;
578 u16 soff, doff, iter;
579 int i;
580
581 if (!is_slave_direction(fsl_chan->fsc.dir))
582 return NULL;
583
584 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
585 if (!fsl_desc)
586 return NULL;
587 fsl_desc->iscyclic = false;
588
589 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
590 for_each_sg(sgl, sg, sg_len, i) {
591 /* get next sg's physical address */
592 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
593
594 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
595 src_addr = sg_dma_address(sg);
596 dst_addr = fsl_chan->fsc.dev_addr;
597 soff = fsl_chan->fsc.addr_width;
598 doff = 0;
599 } else {
600 src_addr = fsl_chan->fsc.dev_addr;
601 dst_addr = sg_dma_address(sg);
602 soff = 0;
603 doff = fsl_chan->fsc.addr_width;
604 }
605
606 iter = sg_dma_len(sg) / nbytes;
607 if (i < sg_len - 1) {
608 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
609 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
610 src_addr, dst_addr, fsl_chan->fsc.attr,
611 soff, nbytes, 0, iter, iter, doff, last_sg,
612 false, false, true);
613 } else {
614 last_sg = 0;
615 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
616 src_addr, dst_addr, fsl_chan->fsc.attr,
617 soff, nbytes, 0, iter, iter, doff, last_sg,
618 true, true, false);
619 }
620 }
621
622 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
623}
624
625static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
626{
627 struct fsl_edma_hw_tcd *tcd;
628 struct virt_dma_desc *vdesc;
629
630 vdesc = vchan_next_desc(&fsl_chan->vchan);
631 if (!vdesc)
632 return;
633 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
634 tcd = fsl_chan->edesc->tcd[0].vtcd;
635 fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
636 tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
637 tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
638 fsl_edma_enable_request(fsl_chan);
639 fsl_chan->status = DMA_IN_PROGRESS;
640}
641
642static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
643{
644 struct fsl_edma_engine *fsl_edma = dev_id;
645 unsigned int intr, ch;
646 void __iomem *base_addr;
647 struct fsl_edma_chan *fsl_chan;
648
649 base_addr = fsl_edma->membase;
650
651 intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
652 if (!intr)
653 return IRQ_NONE;
654
655 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
656 if (intr & (0x1 << ch)) {
657 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
658 base_addr + EDMA_CINT);
659
660 fsl_chan = &fsl_edma->chans[ch];
661
662 spin_lock(&fsl_chan->vchan.lock);
663 if (!fsl_chan->edesc->iscyclic) {
664 list_del(&fsl_chan->edesc->vdesc.node);
665 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
666 fsl_chan->edesc = NULL;
667 fsl_chan->status = DMA_COMPLETE;
668 } else {
669 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
670 }
671
672 if (!fsl_chan->edesc)
673 fsl_edma_xfer_desc(fsl_chan);
674
675 spin_unlock(&fsl_chan->vchan.lock);
676 }
677 }
678 return IRQ_HANDLED;
679}
680
681static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
682{
683 struct fsl_edma_engine *fsl_edma = dev_id;
684 unsigned int err, ch;
685
686 err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
687 if (!err)
688 return IRQ_NONE;
689
690 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
691 if (err & (0x1 << ch)) {
692 fsl_edma_disable_request(&fsl_edma->chans[ch]);
693 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
694 fsl_edma->membase + EDMA_CERR);
695 fsl_edma->chans[ch].status = DMA_ERROR;
696 }
697 }
698 return IRQ_HANDLED;
699}
700
701static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
702{
703 if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
704 return IRQ_HANDLED;
705
706 return fsl_edma_err_handler(irq, dev_id);
707}
708
709static void fsl_edma_issue_pending(struct dma_chan *chan)
710{
711 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
712 unsigned long flags;
713
714 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
715
716 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
717 fsl_edma_xfer_desc(fsl_chan);
718
719 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
720}
721
722static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
723 struct of_dma *ofdma)
724{
725 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
726 struct dma_chan *chan, *_chan;
727
728 if (dma_spec->args_count != 2)
729 return NULL;
730
731 mutex_lock(&fsl_edma->fsl_edma_mutex);
732 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
733 if (chan->client_count)
734 continue;
735 if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
736 chan = dma_get_slave_channel(chan);
737 if (chan) {
738 chan->device->privatecnt++;
739 fsl_edma_chan_mux(to_fsl_edma_chan(chan),
740 dma_spec->args[1], true);
741 mutex_unlock(&fsl_edma->fsl_edma_mutex);
742 return chan;
743 }
744 }
745 }
746 mutex_unlock(&fsl_edma->fsl_edma_mutex);
747 return NULL;
748}
749
750static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
751{
752 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
753
754 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
755 sizeof(struct fsl_edma_hw_tcd),
756 32, 0);
757 return 0;
758}
759
760static void fsl_edma_free_chan_resources(struct dma_chan *chan)
761{
762 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
763 unsigned long flags;
764 LIST_HEAD(head);
765
766 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
767 fsl_edma_disable_request(fsl_chan);
768 fsl_edma_chan_mux(fsl_chan, 0, false);
769 fsl_chan->edesc = NULL;
770 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
771 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
772
773 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
774 dma_pool_destroy(fsl_chan->tcd_pool);
775 fsl_chan->tcd_pool = NULL;
776}
777
778static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
779 struct dma_slave_caps *caps)
780{
781 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
782 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
783 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
784 caps->cmd_pause = true;
785 caps->cmd_terminate = true;
786
787 return 0;
788}
789
790static int
791fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
792{
793 int ret;
794
795 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
796 if (fsl_edma->txirq < 0) {
797 dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
798 return fsl_edma->txirq;
799 }
800
801 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
802 if (fsl_edma->errirq < 0) {
803 dev_err(&pdev->dev, "Can't get edma-err irq.\n");
804 return fsl_edma->errirq;
805 }
806
807 if (fsl_edma->txirq == fsl_edma->errirq) {
808 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
809 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
810 if (ret) {
811 dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
812 return ret;
813 }
814 } else {
815 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
816 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
817 if (ret) {
818 dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
819 return ret;
820 }
821
822 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
823 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
824 if (ret) {
825 dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
826 return ret;
827 }
828 }
829
830 return 0;
831}
832
833static int fsl_edma_probe(struct platform_device *pdev)
834{
835 struct device_node *np = pdev->dev.of_node;
836 struct fsl_edma_engine *fsl_edma;
837 struct fsl_edma_chan *fsl_chan;
838 struct resource *res;
839 int len, chans;
840 int ret, i;
841
842 ret = of_property_read_u32(np, "dma-channels", &chans);
843 if (ret) {
844 dev_err(&pdev->dev, "Can't get dma-channels.\n");
845 return ret;
846 }
847
848 len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
849 fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
850 if (!fsl_edma)
851 return -ENOMEM;
852
853 fsl_edma->n_chans = chans;
854 mutex_init(&fsl_edma->fsl_edma_mutex);
855
856 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
857 fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
858 if (IS_ERR(fsl_edma->membase))
859 return PTR_ERR(fsl_edma->membase);
860
861 for (i = 0; i < DMAMUX_NR; i++) {
862 char clkname[32];
863
864 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
865 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
866 if (IS_ERR(fsl_edma->muxbase[i]))
867 return PTR_ERR(fsl_edma->muxbase[i]);
868
869 sprintf(clkname, "dmamux%d", i);
870 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
871 if (IS_ERR(fsl_edma->muxclk[i])) {
872 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
873 return PTR_ERR(fsl_edma->muxclk[i]);
874 }
875
876 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
877 if (ret) {
878 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
879 return ret;
880 }
881
882 }
883
884 ret = fsl_edma_irq_init(pdev, fsl_edma);
885 if (ret)
886 return ret;
887
888 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
889
890 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
891 for (i = 0; i < fsl_edma->n_chans; i++) {
892 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
893
894 fsl_chan->edma = fsl_edma;
895
896 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
897 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
898
899 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
900 fsl_edma_chan_mux(fsl_chan, 0, false);
901 }
902
903 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
904 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
905 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
906
907 fsl_edma->dma_dev.dev = &pdev->dev;
908 fsl_edma->dma_dev.device_alloc_chan_resources
909 = fsl_edma_alloc_chan_resources;
910 fsl_edma->dma_dev.device_free_chan_resources
911 = fsl_edma_free_chan_resources;
912 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
913 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
914 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
915 fsl_edma->dma_dev.device_control = fsl_edma_control;
916 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
917 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
918
919 platform_set_drvdata(pdev, fsl_edma);
920
921 ret = dma_async_device_register(&fsl_edma->dma_dev);
922 if (ret) {
923 dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
924 return ret;
925 }
926
927 ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
928 if (ret) {
929 dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
930 dma_async_device_unregister(&fsl_edma->dma_dev);
931 return ret;
932 }
933
934 /* enable round robin arbitration */
935 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
936
937 return 0;
938}
939
940static int fsl_edma_remove(struct platform_device *pdev)
941{
942 struct device_node *np = pdev->dev.of_node;
943 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
944 int i;
945
946 of_dma_controller_free(np);
947 dma_async_device_unregister(&fsl_edma->dma_dev);
948
949 for (i = 0; i < DMAMUX_NR; i++)
950 clk_disable_unprepare(fsl_edma->muxclk[i]);
951
952 return 0;
953}
954
955static const struct of_device_id fsl_edma_dt_ids[] = {
956 { .compatible = "fsl,vf610-edma", },
957 { /* sentinel */ }
958};
959MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
960
961static struct platform_driver fsl_edma_driver = {
962 .driver = {
963 .name = "fsl-edma",
964 .owner = THIS_MODULE,
965 .of_match_table = fsl_edma_dt_ids,
966 },
967 .probe = fsl_edma_probe,
968 .remove = fsl_edma_remove,
969};
970
971module_platform_driver(fsl_edma_driver);
972
973MODULE_ALIAS("platform:fsl-edma");
974MODULE_DESCRIPTION("Freescale eDMA engine driver");
975MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 6f9ac2022abd..286660a12cc6 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -422,12 +422,12 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
422 /* Tasklet error handler */ 422 /* Tasklet error handler */
423 tasklet_schedule(&imxdma->channel[i].dma_tasklet); 423 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
424 424
425 printk(KERN_WARNING 425 dev_warn(imxdma->dev,
426 "DMA timeout on channel %d -%s%s%s%s\n", i, 426 "DMA timeout on channel %d -%s%s%s%s\n", i,
427 errcode & IMX_DMA_ERR_BURST ? " burst" : "", 427 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "", 428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
431 } 431 }
432 return IRQ_HANDLED; 432 return IRQ_HANDLED;
433} 433}
@@ -1236,6 +1236,7 @@ static int imxdma_remove(struct platform_device *pdev)
1236static struct platform_driver imxdma_driver = { 1236static struct platform_driver imxdma_driver = {
1237 .driver = { 1237 .driver = {
1238 .name = "imx-dma", 1238 .name = "imx-dma",
1239 .owner = THIS_MODULE,
1239 .of_match_table = imx_dma_of_dev_id, 1240 .of_match_table = imx_dma_of_dev_id,
1240 }, 1241 },
1241 .id_table = imx_dma_devtype, 1242 .id_table = imx_dma_devtype,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b439679f4126..bf02e7beb51a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -867,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
867 phy->base = pdev->base; 867 phy->base = pdev->base;
868 868
869 if (irq) { 869 if (irq) {
870 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0, 870 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
871 "pdma", phy); 871 IRQF_SHARED, "pdma", phy);
872 if (ret) { 872 if (ret) {
873 dev_err(pdev->dev, "channel request irq fail!\n"); 873 dev_err(pdev->dev, "channel request irq fail!\n");
874 return ret; 874 return ret;
@@ -957,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
957 if (irq_num != dma_channels) { 957 if (irq_num != dma_channels) {
958 /* all chan share one irq, demux inside */ 958 /* all chan share one irq, demux inside */
959 irq = platform_get_irq(op, 0); 959 irq = platform_get_irq(op, 0);
960 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0, 960 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
961 "pdma", pdev); 961 IRQF_SHARED, "pdma", pdev);
962 if (ret) 962 if (ret)
963 return ret; 963 return ret;
964 } 964 }
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 33f96aaa80c7..724f7f4c9720 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -22,6 +22,7 @@
22#include <mach/regs-icu.h> 22#include <mach/regs-icu.h>
23#include <linux/platform_data/dma-mmp_tdma.h> 23#include <linux/platform_data/dma-mmp_tdma.h>
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_dma.h>
25 26
26#include "dmaengine.h" 27#include "dmaengine.h"
27 28
@@ -541,6 +542,45 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
541 return 0; 542 return 0;
542} 543}
543 544
545struct mmp_tdma_filter_param {
546 struct device_node *of_node;
547 unsigned int chan_id;
548};
549
550static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
551{
552 struct mmp_tdma_filter_param *param = fn_param;
553 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
554 struct dma_device *pdma_device = tdmac->chan.device;
555
556 if (pdma_device->dev->of_node != param->of_node)
557 return false;
558
559 if (chan->chan_id != param->chan_id)
560 return false;
561
562 return true;
563}
564
565struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
566 struct of_dma *ofdma)
567{
568 struct mmp_tdma_device *tdev = ofdma->of_dma_data;
569 dma_cap_mask_t mask = tdev->device.cap_mask;
570 struct mmp_tdma_filter_param param;
571
572 if (dma_spec->args_count != 1)
573 return NULL;
574
575 param.of_node = ofdma->of_node;
576 param.chan_id = dma_spec->args[0];
577
578 if (param.chan_id >= TDMA_CHANNEL_NUM)
579 return NULL;
580
581 return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
582}
583
544static struct of_device_id mmp_tdma_dt_ids[] = { 584static struct of_device_id mmp_tdma_dt_ids[] = {
545 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, 585 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
546 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, 586 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
@@ -631,6 +671,16 @@ static int mmp_tdma_probe(struct platform_device *pdev)
631 return ret; 671 return ret;
632 } 672 }
633 673
674 if (pdev->dev.of_node) {
675 ret = of_dma_controller_register(pdev->dev.of_node,
676 mmp_tdma_xlate, tdev);
677 if (ret) {
678 dev_err(tdev->device.dev,
679 "failed to register controller\n");
680 dma_async_device_unregister(&tdev->device);
681 }
682 }
683
634 dev_info(tdev->device.dev, "initialized\n"); 684 dev_info(tdev->device.dev, "initialized\n");
635 return 0; 685 return 0;
636} 686}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 362e7c49f2e1..928a0fa39f89 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -597,6 +597,23 @@ static void omap_dma_free(struct omap_dmadev *od)
597 kfree(od); 597 kfree(od);
598} 598}
599 599
600#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
601 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
602 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
603
604static int omap_dma_device_slave_caps(struct dma_chan *dchan,
605 struct dma_slave_caps *caps)
606{
607 caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
608 caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
609 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
610 caps->cmd_pause = true;
611 caps->cmd_terminate = true;
612 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
613
614 return 0;
615}
616
600static int omap_dma_probe(struct platform_device *pdev) 617static int omap_dma_probe(struct platform_device *pdev)
601{ 618{
602 struct omap_dmadev *od; 619 struct omap_dmadev *od;
@@ -615,6 +632,7 @@ static int omap_dma_probe(struct platform_device *pdev)
615 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 632 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
616 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 633 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
617 od->ddev.device_control = omap_dma_control; 634 od->ddev.device_control = omap_dma_control;
635 od->ddev.device_slave_caps = omap_dma_device_slave_caps;
618 od->ddev.dev = &pdev->dev; 636 od->ddev.dev = &pdev->dev;
619 INIT_LIST_HEAD(&od->ddev.channels); 637 INIT_LIST_HEAD(&od->ddev.channels);
620 INIT_LIST_HEAD(&od->pending); 638 INIT_LIST_HEAD(&od->pending);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 61fdc54a3c88..05fa548bd659 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -964,16 +964,16 @@ static void pch_dma_remove(struct pci_dev *pdev)
964 if (pd) { 964 if (pd) {
965 dma_async_device_unregister(&pd->dma); 965 dma_async_device_unregister(&pd->dma);
966 966
967 free_irq(pdev->irq, pd);
968
967 list_for_each_entry_safe(chan, _c, &pd->dma.channels, 969 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
968 device_node) { 970 device_node) {
969 pd_chan = to_pd_chan(chan); 971 pd_chan = to_pd_chan(chan);
970 972
971 tasklet_disable(&pd_chan->tasklet);
972 tasklet_kill(&pd_chan->tasklet); 973 tasklet_kill(&pd_chan->tasklet);
973 } 974 }
974 975
975 pci_pool_destroy(pd->pool); 976 pci_pool_destroy(pd->pool);
976 free_irq(pdev->irq, pd);
977 pci_iounmap(pdev, pd->membase); 977 pci_iounmap(pdev, pd->membase);
978 pci_release_regions(pdev); 978 pci_release_regions(pdev);
979 pci_disable_device(pdev); 979 pci_disable_device(pdev);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4eddedb6eb7d..b209a0f17344 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -192,7 +192,7 @@ struct s3c24xx_dma_phy {
192 unsigned int id; 192 unsigned int id;
193 bool valid; 193 bool valid;
194 void __iomem *base; 194 void __iomem *base;
195 unsigned int irq; 195 int irq;
196 struct clk *clk; 196 struct clk *clk;
197 spinlock_t lock; 197 spinlock_t lock;
198 struct s3c24xx_dma_chan *serving; 198 struct s3c24xx_dma_chan *serving;
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index dadd9e010c0b..b4c813831006 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -29,6 +29,12 @@ config RCAR_HPB_DMAE
29 help 29 help
30 Enable support for the Renesas R-Car series DMA controllers. 30 Enable support for the Renesas R-Car series DMA controllers.
31 31
32config RCAR_AUDMAC_PP
33 tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
34 depends on SH_DMAE_BASE
35 help
36 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
37
32config SHDMA_R8A73A4 38config SHDMA_R8A73A4
33 def_bool y 39 def_bool y
34 depends on ARCH_R8A73A4 && SH_DMAE != n 40 depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index e856af23b789..1ce88b28cfc6 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -7,3 +7,4 @@ endif
7shdma-objs := $(shdma-y) 7shdma-objs := $(shdma-y)
8obj-$(CONFIG_SUDMAC) += sudmac.o 8obj-$(CONFIG_SUDMAC) += sudmac.o
9obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o 9obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
10obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
new file mode 100644
index 000000000000..2de77289a2e9
--- /dev/null
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -0,0 +1,320 @@
1/*
2 * This is for Renesas R-Car Audio-DMAC-peri-peri.
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * based on the drivers/dma/sh/shdma.c
8 *
9 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
10 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
11 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
12 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
13 *
14 * This is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 */
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/dmaengine.h>
25#include <linux/platform_data/dma-rcar-audmapp.h>
26#include <linux/platform_device.h>
27#include <linux/shdma-base.h>
28
29/*
30 * DMA register
31 */
32#define PDMASAR 0x00
33#define PDMADAR 0x04
34#define PDMACHCR 0x0c
35
36/* PDMACHCR */
37#define PDMACHCR_DE (1 << 0)
38
39#define AUDMAPP_MAX_CHANNELS 29
40
41/* Default MEMCPY transfer size = 2^2 = 4 bytes */
42#define LOG2_DEFAULT_XFER_SIZE 2
43#define AUDMAPP_SLAVE_NUMBER 256
44#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
45
46struct audmapp_chan {
47 struct shdma_chan shdma_chan;
48 struct audmapp_slave_config *config;
49 void __iomem *base;
50};
51
52struct audmapp_device {
53 struct shdma_dev shdma_dev;
54 struct audmapp_pdata *pdata;
55 struct device *dev;
56 void __iomem *chan_reg;
57};
58
59#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
60#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
61 struct audmapp_device, shdma_dev.dma_dev)
62
63static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
64{
65 struct audmapp_device *audev = to_dev(auchan);
66 struct device *dev = audev->dev;
67
68 dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
69
70 iowrite32(data, auchan->base + reg);
71}
72
73static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
74{
75 return ioread32(auchan->base + reg);
76}
77
78static void audmapp_halt(struct shdma_chan *schan)
79{
80 struct audmapp_chan *auchan = to_chan(schan);
81 int i;
82
83 audmapp_write(auchan, 0, PDMACHCR);
84
85 for (i = 0; i < 1024; i++) {
86 if (0 == audmapp_read(auchan, PDMACHCR))
87 return;
88 udelay(1);
89 }
90}
91
92static void audmapp_start_xfer(struct shdma_chan *schan,
93 struct shdma_desc *sdecs)
94{
95 struct audmapp_chan *auchan = to_chan(schan);
96 struct audmapp_device *audev = to_dev(auchan);
97 struct audmapp_slave_config *cfg = auchan->config;
98 struct device *dev = audev->dev;
99 u32 chcr = cfg->chcr | PDMACHCR_DE;
100
101 dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
102 &cfg->src, &cfg->dst, cfg->chcr);
103
104 audmapp_write(auchan, cfg->src, PDMASAR);
105 audmapp_write(auchan, cfg->dst, PDMADAR);
106 audmapp_write(auchan, chcr, PDMACHCR);
107}
108
109static struct audmapp_slave_config *
110audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
111{
112 struct audmapp_device *audev = to_dev(auchan);
113 struct audmapp_pdata *pdata = audev->pdata;
114 struct audmapp_slave_config *cfg;
115 int i;
116
117 if (slave_id >= AUDMAPP_SLAVE_NUMBER)
118 return NULL;
119
120 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
121 if (cfg->slave_id == slave_id)
122 return cfg;
123
124 return NULL;
125}
126
127static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
128 dma_addr_t slave_addr, bool try)
129{
130 struct audmapp_chan *auchan = to_chan(schan);
131 struct audmapp_slave_config *cfg =
132 audmapp_find_slave(auchan, slave_id);
133
134 if (!cfg)
135 return -ENODEV;
136 if (try)
137 return 0;
138
139 auchan->config = cfg;
140
141 return 0;
142}
143
144static int audmapp_desc_setup(struct shdma_chan *schan,
145 struct shdma_desc *sdecs,
146 dma_addr_t src, dma_addr_t dst, size_t *len)
147{
148 struct audmapp_chan *auchan = to_chan(schan);
149 struct audmapp_slave_config *cfg = auchan->config;
150
151 if (!cfg)
152 return -ENODEV;
153
154 if (*len > (size_t)AUDMAPP_LEN_MAX)
155 *len = (size_t)AUDMAPP_LEN_MAX;
156
157 return 0;
158}
159
160static void audmapp_setup_xfer(struct shdma_chan *schan,
161 int slave_id)
162{
163}
164
165static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
166{
167 return 0; /* always fixed address */
168}
169
170static bool audmapp_channel_busy(struct shdma_chan *schan)
171{
172 struct audmapp_chan *auchan = to_chan(schan);
173 u32 chcr = audmapp_read(auchan, PDMACHCR);
174
175 return chcr & ~PDMACHCR_DE;
176}
177
178static bool audmapp_desc_completed(struct shdma_chan *schan,
179 struct shdma_desc *sdesc)
180{
181 return true;
182}
183
184static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
185{
186 return &((struct shdma_desc *)buf)[i];
187}
188
189static const struct shdma_ops audmapp_shdma_ops = {
190 .halt_channel = audmapp_halt,
191 .desc_setup = audmapp_desc_setup,
192 .set_slave = audmapp_set_slave,
193 .start_xfer = audmapp_start_xfer,
194 .embedded_desc = audmapp_embedded_desc,
195 .setup_xfer = audmapp_setup_xfer,
196 .slave_addr = audmapp_slave_addr,
197 .channel_busy = audmapp_channel_busy,
198 .desc_completed = audmapp_desc_completed,
199};
200
201static int audmapp_chan_probe(struct platform_device *pdev,
202 struct audmapp_device *audev, int id)
203{
204 struct shdma_dev *sdev = &audev->shdma_dev;
205 struct audmapp_chan *auchan;
206 struct shdma_chan *schan;
207 struct device *dev = audev->dev;
208
209 auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
210 if (!auchan)
211 return -ENOMEM;
212
213 schan = &auchan->shdma_chan;
214 schan->max_xfer_len = AUDMAPP_LEN_MAX;
215
216 shdma_chan_probe(sdev, schan, id);
217
218 auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
219 dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
220
221 return 0;
222}
223
224static void audmapp_chan_remove(struct audmapp_device *audev)
225{
226 struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
227 struct shdma_chan *schan;
228 int i;
229
230 shdma_for_each_chan(schan, &audev->shdma_dev, i) {
231 BUG_ON(!schan);
232 shdma_chan_remove(schan);
233 }
234 dma_dev->chancnt = 0;
235}
236
237static int audmapp_probe(struct platform_device *pdev)
238{
239 struct audmapp_pdata *pdata = pdev->dev.platform_data;
240 struct audmapp_device *audev;
241 struct shdma_dev *sdev;
242 struct dma_device *dma_dev;
243 struct resource *res;
244 int err, i;
245
246 if (!pdata)
247 return -ENODEV;
248
249 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
250
251 audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
252 if (!audev)
253 return -ENOMEM;
254
255 audev->dev = &pdev->dev;
256 audev->pdata = pdata;
257 audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
258 if (IS_ERR(audev->chan_reg))
259 return PTR_ERR(audev->chan_reg);
260
261 sdev = &audev->shdma_dev;
262 sdev->ops = &audmapp_shdma_ops;
263 sdev->desc_size = sizeof(struct shdma_desc);
264
265 dma_dev = &sdev->dma_dev;
266 dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
267 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
268
269 err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
270 if (err < 0)
271 return err;
272
273 platform_set_drvdata(pdev, audev);
274
275 /* Create DMA Channel */
276 for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
277 err = audmapp_chan_probe(pdev, audev, i);
278 if (err)
279 goto chan_probe_err;
280 }
281
282 err = dma_async_device_register(dma_dev);
283 if (err < 0)
284 goto chan_probe_err;
285
286 return err;
287
288chan_probe_err:
289 audmapp_chan_remove(audev);
290 shdma_cleanup(sdev);
291
292 return err;
293}
294
295static int audmapp_remove(struct platform_device *pdev)
296{
297 struct audmapp_device *audev = platform_get_drvdata(pdev);
298 struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
299
300 dma_async_device_unregister(dma_dev);
301
302 audmapp_chan_remove(audev);
303 shdma_cleanup(&audev->shdma_dev);
304
305 return 0;
306}
307
308static struct platform_driver audmapp_driver = {
309 .probe = audmapp_probe,
310 .remove = audmapp_remove,
311 .driver = {
312 .owner = THIS_MODULE,
313 .name = "rcar-audmapp-engine",
314 },
315};
316module_platform_driver(audmapp_driver);
317
318MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
319MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
320MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 2e7b394def80..52396771acbe 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -227,7 +227,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
227 struct shdma_chan *schan = to_shdma_chan(chan); 227 struct shdma_chan *schan = to_shdma_chan(chan);
228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
229 const struct shdma_ops *ops = sdev->ops; 229 const struct shdma_ops *ops = sdev->ops;
230 int match = (int)arg; 230 int match = (long)arg;
231 int ret; 231 int ret;
232 232
233 if (match < 0) 233 if (match < 0)
@@ -491,8 +491,8 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
491 } 491 }
492 492
493 dev_dbg(schan->dev, 493 dev_dbg(schan->dev,
494 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", 494 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
495 copy_size, *len, *src, *dst, &new->async_tx, 495 copy_size, *len, src, dst, &new->async_tx,
496 new->async_tx.cookie); 496 new->async_tx.cookie);
497 497
498 new->mark = DESC_PREPARED; 498 new->mark = DESC_PREPARED;
@@ -555,8 +555,8 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
555 goto err_get_desc; 555 goto err_get_desc;
556 556
557 do { 557 do {
558 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", 558 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
559 i, sg, len, (unsigned long long)sg_addr); 559 i, sg, len, &sg_addr);
560 560
561 if (direction == DMA_DEV_TO_MEM) 561 if (direction == DMA_DEV_TO_MEM)
562 new = shdma_add_desc(schan, flags, 562 new = shdma_add_desc(schan, flags,
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 06473a05fe4e..b4ff9d3e56d1 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -33,7 +33,8 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
33 /* Only slave DMA channels can be allocated via DT */ 33 /* Only slave DMA channels can be allocated via DT */
34 dma_cap_set(DMA_SLAVE, mask); 34 dma_cap_set(DMA_SLAVE, mask);
35 35
36 chan = dma_request_channel(mask, shdma_chan_filter, (void *)id); 36 chan = dma_request_channel(mask, shdma_chan_filter,
37 (void *)(uintptr_t)id);
37 if (chan) 38 if (chan)
38 to_shdma_chan(chan)->hw_req = id; 39 to_shdma_chan(chan)->hw_req = id;
39 40
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 0d765c0e21ec..dda7e7563f5d 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,6 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
443 return ret; 443 return ret;
444} 444}
445 445
446#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
446static irqreturn_t sh_dmae_err(int irq, void *data) 447static irqreturn_t sh_dmae_err(int irq, void *data)
447{ 448{
448 struct sh_dmae_device *shdev = data; 449 struct sh_dmae_device *shdev = data;
@@ -453,6 +454,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
453 sh_dmae_reset(shdev); 454 sh_dmae_reset(shdev);
454 return IRQ_HANDLED; 455 return IRQ_HANDLED;
455} 456}
457#endif
456 458
457static bool sh_dmae_desc_completed(struct shdma_chan *schan, 459static bool sh_dmae_desc_completed(struct shdma_chan *schan,
458 struct shdma_desc *sdesc) 460 struct shdma_desc *sdesc)
@@ -637,7 +639,7 @@ static int sh_dmae_resume(struct device *dev)
637#define sh_dmae_resume NULL 639#define sh_dmae_resume NULL
638#endif 640#endif
639 641
640const struct dev_pm_ops sh_dmae_pm = { 642static const struct dev_pm_ops sh_dmae_pm = {
641 .suspend = sh_dmae_suspend, 643 .suspend = sh_dmae_suspend,
642 .resume = sh_dmae_resume, 644 .resume = sh_dmae_resume,
643 .runtime_suspend = sh_dmae_runtime_suspend, 645 .runtime_suspend = sh_dmae_runtime_suspend,
@@ -685,9 +687,12 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
685static int sh_dmae_probe(struct platform_device *pdev) 687static int sh_dmae_probe(struct platform_device *pdev)
686{ 688{
687 const struct sh_dmae_pdata *pdata; 689 const struct sh_dmae_pdata *pdata;
688 unsigned long irqflags = 0, 690 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
689 chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 691 int chan_irq[SH_DMAE_MAX_CHANNELS];
690 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 692#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
693 unsigned long irqflags = 0;
694 int errirq;
695#endif
691 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 696 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
692 struct sh_dmae_device *shdev; 697 struct sh_dmae_device *shdev;
693 struct dma_device *dma_dev; 698 struct dma_device *dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index c7e9cdff0708..4e7df43b50d6 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -178,8 +178,8 @@ static int sudmac_desc_setup(struct shdma_chan *schan,
178 struct sudmac_chan *sc = to_chan(schan); 178 struct sudmac_chan *sc = to_chan(schan);
179 struct sudmac_desc *sd = to_desc(sdesc); 179 struct sudmac_desc *sd = to_desc(sdesc);
180 180
181 dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", 181 dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
182 __func__, src, dst, *len); 182 __func__, &src, &dst, *len);
183 183
184 if (*len > schan->max_xfer_len) 184 if (*len > schan->max_xfer_len)
185 *len = schan->max_xfer_len; 185 *len = schan->max_xfer_len;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d4d3a3109b16..a1bd8298d55f 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,7 @@
18#include <linux/of_device.h> 18#include <linux/of_device.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/of_dma.h>
21#include <linux/sirfsoc_dma.h> 22#include <linux/sirfsoc_dma.h>
22 23
23#include "dmaengine.h" 24#include "dmaengine.h"
@@ -659,6 +660,18 @@ static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
659 return 0; 660 return 0;
660} 661}
661 662
663static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664 struct of_dma *ofdma)
665{
666 struct sirfsoc_dma *sdma = ofdma->of_dma_data;
667 unsigned int request = dma_spec->args[0];
668
669 if (request > SIRFSOC_DMA_CHANNELS)
670 return NULL;
671
672 return dma_get_slave_channel(&sdma->channels[request].chan);
673}
674
662static int sirfsoc_dma_probe(struct platform_device *op) 675static int sirfsoc_dma_probe(struct platform_device *op)
663{ 676{
664 struct device_node *dn = op->dev.of_node; 677 struct device_node *dn = op->dev.of_node;
@@ -764,11 +777,20 @@ static int sirfsoc_dma_probe(struct platform_device *op)
764 if (ret) 777 if (ret)
765 goto free_irq; 778 goto free_irq;
766 779
780 /* Device-tree DMA controller registration */
781 ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
782 if (ret) {
783 dev_err(dev, "failed to register DMA controller\n");
784 goto unreg_dma_dev;
785 }
786
767 pm_runtime_enable(&op->dev); 787 pm_runtime_enable(&op->dev);
768 dev_info(dev, "initialized SIRFSOC DMAC driver\n"); 788 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
769 789
770 return 0; 790 return 0;
771 791
792unreg_dma_dev:
793 dma_async_device_unregister(dma);
772free_irq: 794free_irq:
773 free_irq(sdma->irq, sdma); 795 free_irq(sdma->irq, sdma);
774irq_dispose: 796irq_dispose:
@@ -781,6 +803,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
781 struct device *dev = &op->dev; 803 struct device *dev = &op->dev;
782 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 804 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
783 805
806 of_dma_controller_free(op->dev.of_node);
784 dma_async_device_unregister(&sdma->dma); 807 dma_async_device_unregister(&sdma->dma);
785 free_irq(sdma->irq, sdma); 808 free_irq(sdma->irq, sdma);
786 irq_dispose_mapping(sdma->irq); 809 irq_dispose_mapping(sdma->irq);
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f88929609bac..c0c6281e3496 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -119,7 +119,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
119 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; 119 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
120 struct musb *musb = hw_ep->musb; 120 struct musb *musb = hw_ep->musb;
121 121
122 if (!cppi41_channel->prog_len) { 122 if (!cppi41_channel->prog_len ||
123 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
123 124
124 /* done, complete */ 125 /* done, complete */
125 cppi41_channel->channel.actual_len = 126 cppi41_channel->channel.actual_len =
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index fb0298082916..329436d38e66 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/err.h>
19#include <linux/dmaengine.h> 20#include <linux/dmaengine.h>
20 21
21/** 22/**
@@ -103,12 +104,12 @@ static inline void devm_acpi_dma_controller_free(struct device *dev)
103static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( 104static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
104 struct device *dev, size_t index) 105 struct device *dev, size_t index)
105{ 106{
106 return NULL; 107 return ERR_PTR(-ENODEV);
107} 108}
108static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( 109static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
109 struct device *dev, const char *name) 110 struct device *dev, const char *name)
110{ 111{
111 return NULL; 112 return ERR_PTR(-ENODEV);
112} 113}
113 114
114#define acpi_dma_simple_xlate NULL 115#define acpi_dma_simple_xlate NULL
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c5c92d59e531..8300fb87b84a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -341,15 +341,11 @@ enum dma_slave_buswidth {
341 * and this struct will then be passed in as an argument to the 341 * and this struct will then be passed in as an argument to the
342 * DMA engine device_control() function. 342 * DMA engine device_control() function.
343 * 343 *
344 * The rationale for adding configuration information to this struct 344 * The rationale for adding configuration information to this struct is as
345 * is as follows: if it is likely that most DMA slave controllers in 345 * follows: if it is likely that more than one DMA slave controllers in
346 * the world will support the configuration option, then make it 346 * the world will support the configuration option, then make it generic.
347 * generic. If not: if it is fixed so that it be sent in static from 347 * If not: if it is fixed so that it be sent in static from the platform
348 * the platform data, then prefer to do that. Else, if it is neither 348 * data, then prefer to do that.
349 * fixed at runtime, nor generic enough (such as bus mastership on
350 * some CPU family and whatnot) then create a custom slave config
351 * struct and pass that, then make this config a member of that
352 * struct, if applicable.
353 */ 349 */
354struct dma_slave_config { 350struct dma_slave_config {
355 enum dma_transfer_direction direction; 351 enum dma_transfer_direction direction;
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 481ab2345d6b..68b4024184de 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -1,6 +1,5 @@
1/* 1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on 2 * Driver for the Synopsys DesignWare DMA Controller
3 * AVR32 systems.)
4 * 3 *
5 * Copyright (C) 2007 Atmel Corporation 4 * Copyright (C) 2007 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics 5 * Copyright (C) 2010-2011 ST Microelectronics
@@ -44,8 +43,6 @@ struct dw_dma_slave {
44 * @nr_masters: Number of AHB masters supported by the controller 43 * @nr_masters: Number of AHB masters supported by the controller
45 * @data_width: Maximum data width supported by hardware per AHB master 44 * @data_width: Maximum data width supported by hardware per AHB master
46 * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) 45 * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
47 * @sd: slave specific data. Used for configuring channels
48 * @sd_count: count of slave data structures passed.
49 */ 46 */
50struct dw_dma_platform_data { 47struct dw_dma_platform_data {
51 unsigned int nr_channels; 48 unsigned int nr_channels;
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
new file mode 100644
index 000000000000..471fffebbeb4
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-audmapp.h
@@ -0,0 +1,34 @@
1/*
2 * This is for Renesas R-Car Audio-DMAC-peri-peri.
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This file is based on the include/linux/sh_dma.h
8 *
9 * Header for the new SH dmaengine driver
10 *
11 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17#ifndef SH_AUDMAPP_H
18#define SH_AUDMAPP_H
19
20#include <linux/dmaengine.h>
21
22struct audmapp_slave_config {
23 int slave_id;
24 dma_addr_t src;
25 dma_addr_t dst;
26 u32 chcr;
27};
28
29struct audmapp_pdata {
30 struct audmapp_slave_config *slave;
31 int slave_num;
32};
33
34#endif /* SH_AUDMAPP_H */