aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-10 11:55:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-10 11:55:08 -0400
commit6c61403a446b5ee54c21cecabdc821acf06f96bf (patch)
tree26423d750d6e0d793ac1751b92025250461e9a4e
parentedf2377c4776ce20ae990f27f0248e88a37e25c4 (diff)
parent8673bcef8c1b07b83e9ee02d5e7f4b66507b03cd (diff)
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine updates from Vinod Koul: - New driver for Qcom bam dma - New driver for RCAR peri-peri - New driver for FSL eDMA - Various odd fixes and updates thru the subsystem * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (29 commits) dmaengine: add Qualcomm BAM dma driver shdma: add R-Car Audio DMAC peri peri driver dmaengine: sirf: enable generic dt binding for dma channels dma: omap-dma: Implement device_slave_caps callback dmaengine: qcom_bam_dma: Add device tree binding dma: dw: Add suspend and resume handling for PCI mode DW_DMAC. dma: dw: allocate memory in two stages in probe Add new line to test result strings produced in verbose mode dmaengine: pch_dma: use tasklet_kill in teardown dmaengine: at_hdmac: use tasklet_kill in teardown dma: cppi41: start tear down only if channel is busy usb: musb: musb_cppi41: Dont reprogram DMA if tear down is initiated dmaengine: s3c24xx-dma: make phy->irq signed for error handling dma: imx-dma: Add missing module owner field dma: imx-dma: Replace printk with dev_* dma: fsl-edma: fix static checker warning of NULL dereference dma: Remove comment about embedding dma_slave_config into custom structs dma: mmp_tdma: move to generic device tree binding dma: mmp_pdma: add IRQF_SHARED when request irq dma: edma: Fix memory leak in edma_prep_dma_cyclic() ...
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-edma.txt76
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_bam_dma.txt41
-rw-r--r--Documentation/devicetree/bindings/dma/sirfsoc-dma.txt43
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi2
-rw-r--r--drivers/dma/Kconfig21
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/acpi-dma.c17
-rw-r--r--drivers/dma/at_hdmac.c1
-rw-r--r--drivers/dma/cppi41.c7
-rw-r--r--drivers/dma/dmaengine.c9
-rw-r--r--drivers/dma/dmatest.c4
-rw-r--r--drivers/dma/dw/core.c21
-rw-r--r--drivers/dma/dw/pci.c36
-rw-r--r--drivers/dma/dw/regs.h4
-rw-r--r--drivers/dma/edma.c5
-rw-r--r--drivers/dma/fsl-edma.c975
-rw-r--r--drivers/dma/imx-dma.c13
-rw-r--r--drivers/dma/mmp_pdma.c8
-rw-r--r--drivers/dma/mmp_tdma.c50
-rw-r--r--drivers/dma/omap-dma.c18
-rw-r--r--drivers/dma/pch_dma.c4
-rw-r--r--drivers/dma/qcom_bam_dma.c1111
-rw-r--r--drivers/dma/s3c24xx-dma.c2
-rw-r--r--drivers/dma/sh/Kconfig6
-rw-r--r--drivers/dma/sh/Makefile1
-rw-r--r--drivers/dma/sh/rcar-audmapp.c320
-rw-r--r--drivers/dma/sh/shdma-base.c10
-rw-r--r--drivers/dma/sh/shdma-of.c3
-rw-r--r--drivers/dma/sh/shdmac.c13
-rw-r--r--drivers/dma/sh/sudmac.c4
-rw-r--r--drivers/dma/sirf-dma.c23
-rw-r--r--include/linux/acpi_dma.h5
-rw-r--r--include/linux/dmaengine.h14
-rw-r--r--include/linux/dw_dmac.h5
-rw-r--r--include/linux/platform_data/dma-rcar-audmapp.h34
36 files changed, 2837 insertions, 73 deletions
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
new file mode 100644
index 000000000000..191d7bd8a6fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt
@@ -0,0 +1,76 @@
1* Freescale enhanced Direct Memory Access(eDMA) Controller
2
3 The eDMA channels have multiplex capability by programmble memory-mapped
4registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
5specific DMA request source can only be multiplexed by any channel of certain
6group, DMAMUX0 or DMAMUX1, but not both.
7
8* eDMA Controller
9Required properties:
10- compatible :
11 - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
12- reg : Specifies base physical address(s) and size of the eDMA registers.
13 The 1st region is eDMA control register's address and size.
14 The 2nd and the 3rd regions are programmable channel multiplexing
15 control register's address and size.
16- interrupts : A list of interrupt-specifiers, one for each entry in
17 interrupt-names.
18- interrupt-names : Should contain:
19 "edma-tx" - the transmission interrupt
20 "edma-err" - the error interrupt
21- #dma-cells : Must be <2>.
22 The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
23 Specific request source can only be multiplexed by specific channels
24 group called DMAMUX.
25 The 2nd cell specifies the request source(slot) ID.
26 See the SoC's reference manual for all the supported request sources.
27- dma-channels : Number of channels supported by the controller
28- clock-names : A list of channel group clock names. Should contain:
29 "dmamux0" - clock name of mux0 group
30 "dmamux1" - clock name of mux1 group
31- clocks : A list of phandle and clock-specifier pairs, one for each entry in
32 clock-names.
33
34Optional properties:
35- big-endian: If present registers and hardware scatter/gather descriptors
36 of the eDMA are implemented in big endian mode, otherwise in little
37 mode.
38
39
40Examples:
41
42edma0: dma-controller@40018000 {
43 #dma-cells = <2>;
44 compatible = "fsl,vf610-edma";
45 reg = <0x40018000 0x2000>,
46 <0x40024000 0x1000>,
47 <0x40025000 0x1000>;
48 interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
49 <0 9 IRQ_TYPE_LEVEL_HIGH>;
50 interrupt-names = "edma-tx", "edma-err";
51 dma-channels = <32>;
52 clock-names = "dmamux0", "dmamux1";
53 clocks = <&clks VF610_CLK_DMAMUX0>,
54 <&clks VF610_CLK_DMAMUX1>;
55};
56
57
58* DMA clients
59DMA client drivers that uses the DMA function must use the format described
60in the dma.txt file, using a two-cell specifier for each channel: the 1st
61specifies the channel group(DMAMUX) in which this request can be multiplexed,
62and the 2nd specifies the request source.
63
64Examples:
65
66sai2: sai@40031000 {
67 compatible = "fsl,vf610-sai";
68 reg = <0x40031000 0x1000>;
69 interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
70 clock-names = "sai";
71 clocks = <&clks VF610_CLK_SAI2>;
72 dma-names = "tx", "rx";
73 dmas = <&edma0 0 21>,
74 <&edma0 0 20>;
75 status = "disabled";
76};
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
new file mode 100644
index 000000000000..d75a9d767022
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
@@ -0,0 +1,41 @@
1QCOM BAM DMA controller
2
3Required properties:
4- compatible: must contain "qcom,bam-v1.4.0" for MSM8974
5- reg: Address range for DMA registers
6- interrupts: Should contain the one interrupt shared by all channels
7- #dma-cells: must be <1>, the cell in the dmas property of the client device
8 represents the channel number
9- clocks: required clock
10- clock-names: must contain "bam_clk" entry
11- qcom,ee : indicates the active Execution Environment identifier (0-7) used in
12 the secure world.
13
14Example:
15
16 uart-bam: dma@f9984000 = {
17 compatible = "qcom,bam-v1.4.0";
18 reg = <0xf9984000 0x15000>;
19 interrupts = <0 94 0>;
20 clocks = <&gcc GCC_BAM_DMA_AHB_CLK>;
21 clock-names = "bam_clk";
22 #dma-cells = <1>;
23 qcom,ee = <0>;
24 };
25
26DMA clients must use the format described in the dma.txt file, using a two cell
27specifier for each channel.
28
29Example:
30 serial@f991e000 {
31 compatible = "qcom,msm-uart";
32 reg = <0xf991e000 0x1000>
33 <0xf9944000 0x19000>;
34 interrupts = <0 108 0>;
35 clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
36 <&gcc GCC_BLSP1_AHB_CLK>;
37 clock-names = "core", "iface";
38
39 dmas = <&uart-bam 0>, <&uart-bam 1>;
40 dma-names = "rx", "tx";
41 };
diff --git a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
new file mode 100644
index 000000000000..ecbc96ad36f8
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
@@ -0,0 +1,43 @@
1* CSR SiRFSoC DMA controller
2
3See dma.txt first
4
5Required properties:
6- compatible: Should be "sirf,prima2-dmac" or "sirf,marco-dmac"
7- reg: Should contain DMA registers location and length.
8- interrupts: Should contain one interrupt shared by all channel
9- #dma-cells: must be <1>. used to represent the number of integer
10 cells in the dmas property of client device.
11- clocks: clock required
12
13Example:
14
15Controller:
16dmac0: dma-controller@b00b0000 {
17 compatible = "sirf,prima2-dmac";
18 reg = <0xb00b0000 0x10000>;
19 interrupts = <12>;
20 clocks = <&clks 24>;
21 #dma-cells = <1>;
22};
23
24
25Client:
26Fill the specific dma request line in dmas. In the below example, spi0 read
27channel request line is 9 of the 2nd dma controller, while write channel uses
284 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st
29dma controller, while write channel uses 13 of the 1st dma controller:
30
31spi0: spi@b00d0000 {
32 compatible = "sirf,prima2-spi";
33 dmas = <&dmac1 9>,
34 <&dmac1 4>;
35 dma-names = "rx", "tx";
36};
37
38spi1: spi@b0170000 {
39 compatible = "sirf,prima2-spi";
40 dmas = <&dmac0 12>,
41 <&dmac0 13>;
42 dma-names = "rx", "tx";
43};
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 55d3f79c2ef5..9d72674049d6 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -271,6 +271,7 @@
271 reg = <0xb00b0000 0x10000>; 271 reg = <0xb00b0000 0x10000>;
272 interrupts = <12>; 272 interrupts = <12>;
273 clocks = <&clks 24>; 273 clocks = <&clks 24>;
274 #dma-cells = <1>;
274 }; 275 };
275 276
276 dmac1: dma-controller@b0160000 { 277 dmac1: dma-controller@b0160000 {
@@ -279,6 +280,7 @@
279 reg = <0xb0160000 0x10000>; 280 reg = <0xb0160000 0x10000>;
280 interrupts = <13>; 281 interrupts = <13>;
281 clocks = <&clks 25>; 282 clocks = <&clks 25>;
283 #dma-cells = <1>;
282 }; 284 };
283 285
284 vip@b00C0000 { 286 vip@b00C0000 {
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 20145526cd7b..1e82571d6823 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -287,6 +287,7 @@
287 reg = <0xb00b0000 0x10000>; 287 reg = <0xb00b0000 0x10000>;
288 interrupts = <12>; 288 interrupts = <12>;
289 clocks = <&clks 24>; 289 clocks = <&clks 24>;
290 #dma-cells = <1>;
290 }; 291 };
291 292
292 dmac1: dma-controller@b0160000 { 293 dmac1: dma-controller@b0160000 {
@@ -295,6 +296,7 @@
295 reg = <0xb0160000 0x10000>; 296 reg = <0xb0160000 0x10000>;
296 interrupts = <13>; 297 interrupts = <13>;
297 clocks = <&clks 25>; 298 clocks = <&clks 25>;
299 #dma-cells = <1>;
298 }; 300 };
299 301
300 vip@b00C0000 { 302 vip@b00C0000 {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 605b016bcea4..ba06d1d2f99e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -308,7 +308,7 @@ config DMA_OMAP
308 308
309config DMA_BCM2835 309config DMA_BCM2835
310 tristate "BCM2835 DMA engine support" 310 tristate "BCM2835 DMA engine support"
311 depends on (ARCH_BCM2835 || MACH_BCM2708) 311 depends on ARCH_BCM2835
312 select DMA_ENGINE 312 select DMA_ENGINE
313 select DMA_VIRTUAL_CHANNELS 313 select DMA_VIRTUAL_CHANNELS
314 314
@@ -350,6 +350,16 @@ config MOXART_DMA
350 select DMA_VIRTUAL_CHANNELS 350 select DMA_VIRTUAL_CHANNELS
351 help 351 help
352 Enable support for the MOXA ART SoC DMA controller. 352 Enable support for the MOXA ART SoC DMA controller.
353
354config FSL_EDMA
355 tristate "Freescale eDMA engine support"
356 depends on OF
357 select DMA_ENGINE
358 select DMA_VIRTUAL_CHANNELS
359 help
360 Support the Freescale eDMA engine with programmable channel
361 multiplexing capability for DMA request sources(slot).
362 This module can be found on Freescale Vybrid and LS-1 SoCs.
353 363
354config DMA_ENGINE 364config DMA_ENGINE
355 bool 365 bool
@@ -401,4 +411,13 @@ config DMATEST
401config DMA_ENGINE_RAID 411config DMA_ENGINE_RAID
402 bool 412 bool
403 413
414config QCOM_BAM_DMA
415 tristate "QCOM BAM DMA support"
416 depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
417 select DMA_ENGINE
418 select DMA_VIRTUAL_CHANNELS
419 ---help---
420 Enable support for the QCOM BAM DMA controller. This controller
421 provides DMA capabilities for a variety of on-chip devices.
422
404endif 423endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a029d0f4a1be..5150c82c9caf 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,3 +44,5 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
44obj-$(CONFIG_TI_CPPI41) += cppi41.o 44obj-$(CONFIG_TI_CPPI41) += cppi41.o
45obj-$(CONFIG_K3_DMA) += k3dma.o 45obj-$(CONFIG_K3_DMA) += k3dma.o
46obj-$(CONFIG_MOXART_DMA) += moxart-dma.o 46obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
47obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
48obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 1e506afa33f5..de361a156b34 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/err.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/list.h> 18#include <linux/list.h>
18#include <linux/mutex.h> 19#include <linux/mutex.h>
@@ -265,7 +266,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
265 */ 266 */
266void devm_acpi_dma_controller_free(struct device *dev) 267void devm_acpi_dma_controller_free(struct device *dev)
267{ 268{
268 WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); 269 WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
269} 270}
270EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); 271EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
271 272
@@ -343,7 +344,7 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
343 * @index: index of FixedDMA descriptor for @dev 344 * @index: index of FixedDMA descriptor for @dev
344 * 345 *
345 * Return: 346 * Return:
346 * Pointer to appropriate dma channel on success or NULL on error. 347 * Pointer to appropriate dma channel on success or an error pointer.
347 */ 348 */
348struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, 349struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
349 size_t index) 350 size_t index)
@@ -358,10 +359,10 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
358 359
359 /* Check if the device was enumerated by ACPI */ 360 /* Check if the device was enumerated by ACPI */
360 if (!dev || !ACPI_HANDLE(dev)) 361 if (!dev || !ACPI_HANDLE(dev))
361 return NULL; 362 return ERR_PTR(-ENODEV);
362 363
363 if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) 364 if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
364 return NULL; 365 return ERR_PTR(-ENODEV);
365 366
366 memset(&pdata, 0, sizeof(pdata)); 367 memset(&pdata, 0, sizeof(pdata));
367 pdata.index = index; 368 pdata.index = index;
@@ -376,7 +377,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
376 acpi_dev_free_resource_list(&resource_list); 377 acpi_dev_free_resource_list(&resource_list);
377 378
378 if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) 379 if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
379 return NULL; 380 return ERR_PTR(-ENODEV);
380 381
381 mutex_lock(&acpi_dma_lock); 382 mutex_lock(&acpi_dma_lock);
382 383
@@ -399,7 +400,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
399 } 400 }
400 401
401 mutex_unlock(&acpi_dma_lock); 402 mutex_unlock(&acpi_dma_lock);
402 return chan; 403 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
403} 404}
404EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); 405EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
405 406
@@ -413,7 +414,7 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
413 * the first FixedDMA descriptor is TX and second is RX. 414 * the first FixedDMA descriptor is TX and second is RX.
414 * 415 *
415 * Return: 416 * Return:
416 * Pointer to appropriate dma channel on success or NULL on error. 417 * Pointer to appropriate dma channel on success or an error pointer.
417 */ 418 */
418struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, 419struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
419 const char *name) 420 const char *name)
@@ -425,7 +426,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
425 else if (!strcmp(name, "rx")) 426 else if (!strcmp(name, "rx"))
426 index = 1; 427 index = 1;
427 else 428 else
428 return NULL; 429 return ERR_PTR(-ENODEV);
429 430
430 return acpi_dma_request_slave_chan_by_index(dev, index); 431 return acpi_dma_request_slave_chan_by_index(dev, index);
431} 432}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index e2c04dc81e2a..c13a3bb0f594 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1569,7 +1569,6 @@ static int at_dma_remove(struct platform_device *pdev)
1569 1569
1570 /* Disable interrupts */ 1570 /* Disable interrupts */
1571 atc_disable_chan_irq(atdma, chan->chan_id); 1571 atc_disable_chan_irq(atdma, chan->chan_id);
1572 tasklet_disable(&atchan->tasklet);
1573 1572
1574 tasklet_kill(&atchan->tasklet); 1573 tasklet_kill(&atchan->tasklet);
1575 list_del(&chan->device_node); 1574 list_del(&chan->device_node);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index c18aebf7d5aa..d028f36ae655 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -620,12 +620,15 @@ static int cppi41_stop_chan(struct dma_chan *chan)
620 u32 desc_phys; 620 u32 desc_phys;
621 int ret; 621 int ret;
622 622
623 desc_phys = lower_32_bits(c->desc_phys);
624 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
625 if (!cdd->chan_busy[desc_num])
626 return 0;
627
623 ret = cppi41_tear_down_chan(c); 628 ret = cppi41_tear_down_chan(c);
624 if (ret) 629 if (ret)
625 return ret; 630 return ret;
626 631
627 desc_phys = lower_32_bits(c->desc_phys);
628 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
629 WARN_ON(!cdd->chan_busy[desc_num]); 632 WARN_ON(!cdd->chan_busy[desc_num]);
630 cdd->chan_busy[desc_num] = NULL; 633 cdd->chan_busy[desc_num] = NULL;
631 634
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ed610b497518..a886713937fd 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -627,18 +627,13 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
627struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 627struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
628 const char *name) 628 const char *name)
629{ 629{
630 struct dma_chan *chan;
631
632 /* If device-tree is present get slave info from here */ 630 /* If device-tree is present get slave info from here */
633 if (dev->of_node) 631 if (dev->of_node)
634 return of_dma_request_slave_channel(dev->of_node, name); 632 return of_dma_request_slave_channel(dev->of_node, name);
635 633
636 /* If device was enumerated by ACPI get slave info from here */ 634 /* If device was enumerated by ACPI get slave info from here */
637 if (ACPI_HANDLE(dev)) { 635 if (ACPI_HANDLE(dev))
638 chan = acpi_dma_request_slave_chan_by_name(dev, name); 636 return acpi_dma_request_slave_chan_by_name(dev, name);
639 if (chan)
640 return chan;
641 }
642 637
643 return ERR_PTR(-ENODEV); 638 return ERR_PTR(-ENODEV);
644} 639}
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 05b6dea770a4..e27cec25c59e 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -340,7 +340,7 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
340static void result(const char *err, unsigned int n, unsigned int src_off, 340static void result(const char *err, unsigned int n, unsigned int src_off,
341 unsigned int dst_off, unsigned int len, unsigned long data) 341 unsigned int dst_off, unsigned int len, unsigned long data)
342{ 342{
343 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", 343 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
344 current->comm, n, err, src_off, dst_off, len, data); 344 current->comm, n, err, src_off, dst_off, len, data);
345} 345}
346 346
@@ -348,7 +348,7 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
348 unsigned int dst_off, unsigned int len, 348 unsigned int dst_off, unsigned int len,
349 unsigned long data) 349 unsigned long data)
350{ 350{
351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)", 351 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
352 current->comm, n, err, src_off, dst_off, len, data); 352 current->comm, n, err, src_off, dst_off, len, data);
353} 353}
354 354
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 13ac3f240e79..cfdbb92aae1d 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -33,8 +33,8 @@
33 * of which use ARM any more). See the "Databook" from Synopsys for 33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide. 34 * information beyond what licensees probably provide.
35 * 35 *
36 * The driver has currently been tested only with the Atmel AT32AP7000, 36 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * which does not support descriptor writeback. 37 * support descriptor writeback.
38 */ 38 */
39 39
40static inline bool is_request_line_unset(struct dw_dma_chan *dwc) 40static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
@@ -1479,7 +1479,6 @@ static void dw_dma_off(struct dw_dma *dw)
1479int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) 1479int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1480{ 1480{
1481 struct dw_dma *dw; 1481 struct dw_dma *dw;
1482 size_t size;
1483 bool autocfg; 1482 bool autocfg;
1484 unsigned int dw_params; 1483 unsigned int dw_params;
1485 unsigned int nr_channels; 1484 unsigned int nr_channels;
@@ -1487,6 +1486,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1487 int err; 1486 int err;
1488 int i; 1487 int i;
1489 1488
1489 dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1490 if (!dw)
1491 return -ENOMEM;
1492
1493 dw->regs = chip->regs;
1494 chip->dw = dw;
1495
1490 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); 1496 dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1491 autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1497 autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1492 1498
@@ -1509,9 +1515,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1509 else 1515 else
1510 nr_channels = pdata->nr_channels; 1516 nr_channels = pdata->nr_channels;
1511 1517
1512 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); 1518 dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
1513 dw = devm_kzalloc(chip->dev, size, GFP_KERNEL); 1519 GFP_KERNEL);
1514 if (!dw) 1520 if (!dw->chan)
1515 return -ENOMEM; 1521 return -ENOMEM;
1516 1522
1517 dw->clk = devm_clk_get(chip->dev, "hclk"); 1523 dw->clk = devm_clk_get(chip->dev, "hclk");
@@ -1519,9 +1525,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1519 return PTR_ERR(dw->clk); 1525 return PTR_ERR(dw->clk);
1520 clk_prepare_enable(dw->clk); 1526 clk_prepare_enable(dw->clk);
1521 1527
1522 dw->regs = chip->regs;
1523 chip->dw = dw;
1524
1525 /* Get hardware configuration parameters */ 1528 /* Get hardware configuration parameters */
1526 if (autocfg) { 1529 if (autocfg) {
1527 max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1530 max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index e89fc24b8293..fec59f1a77bb 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -75,6 +75,36 @@ static void dw_pci_remove(struct pci_dev *pdev)
75 dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); 75 dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
76} 76}
77 77
78#ifdef CONFIG_PM_SLEEP
79
80static int dw_pci_suspend_late(struct device *dev)
81{
82 struct pci_dev *pci = to_pci_dev(dev);
83 struct dw_dma_chip *chip = pci_get_drvdata(pci);
84
85 return dw_dma_suspend(chip);
86};
87
88static int dw_pci_resume_early(struct device *dev)
89{
90 struct pci_dev *pci = to_pci_dev(dev);
91 struct dw_dma_chip *chip = pci_get_drvdata(pci);
92
93 return dw_dma_resume(chip);
94};
95
96#else /* !CONFIG_PM_SLEEP */
97
98#define dw_pci_suspend_late NULL
99#define dw_pci_resume_early NULL
100
101#endif /* !CONFIG_PM_SLEEP */
102
103static const struct dev_pm_ops dw_pci_dev_pm_ops = {
104 .suspend_late = dw_pci_suspend_late,
105 .resume_early = dw_pci_resume_early,
106};
107
78static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = { 108static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
79 /* Medfield */ 109 /* Medfield */
80 { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, 110 { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
@@ -83,6 +113,9 @@ static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
83 /* BayTrail */ 113 /* BayTrail */
84 { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, 114 { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
85 { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, 115 { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
116
117 /* Haswell */
118 { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
86 { } 119 { }
87}; 120};
88MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 121MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
@@ -92,6 +125,9 @@ static struct pci_driver dw_pci_driver = {
92 .id_table = dw_pci_id_table, 125 .id_table = dw_pci_id_table,
93 .probe = dw_pci_probe, 126 .probe = dw_pci_probe,
94 .remove = dw_pci_remove, 127 .remove = dw_pci_remove,
128 .driver = {
129 .pm = &dw_pci_dev_pm_ops,
130 },
95}; 131};
96 132
97module_pci_driver(dw_pci_driver); 133module_pci_driver(dw_pci_driver);
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index deb4274f80f4..bb98d3e91e8b 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -252,13 +252,13 @@ struct dw_dma {
252 struct tasklet_struct tasklet; 252 struct tasklet_struct tasklet;
253 struct clk *clk; 253 struct clk *clk;
254 254
255 /* channels */
256 struct dw_dma_chan *chan;
255 u8 all_chan_mask; 257 u8 all_chan_mask;
256 258
257 /* hardware configuration */ 259 /* hardware configuration */
258 unsigned char nr_masters; 260 unsigned char nr_masters;
259 unsigned char data_width[4]; 261 unsigned char data_width[4];
260
261 struct dw_dma_chan chan[0];
262}; 262};
263 263
264static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) 264static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd8da451d199..cd04eb7b182e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -539,6 +539,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
539 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 539 edma_alloc_slot(EDMA_CTLR(echan->ch_num),
540 EDMA_SLOT_ANY); 540 EDMA_SLOT_ANY);
541 if (echan->slot[i] < 0) { 541 if (echan->slot[i] < 0) {
542 kfree(edesc);
542 dev_err(dev, "Failed to allocate slot\n"); 543 dev_err(dev, "Failed to allocate slot\n");
543 return NULL; 544 return NULL;
544 } 545 }
@@ -553,8 +554,10 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
553 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, 554 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
554 dst_addr, burst, dev_width, period_len, 555 dst_addr, burst, dev_width, period_len,
555 direction); 556 direction);
556 if (ret < 0) 557 if (ret < 0) {
558 kfree(edesc);
557 return NULL; 559 return NULL;
560 }
558 561
559 if (direction == DMA_DEV_TO_MEM) 562 if (direction == DMA_DEV_TO_MEM)
560 dst_addr += period_len; 563 dst_addr += period_len;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644
index 000000000000..381e793184ba
--- /dev/null
+++ b/drivers/dma/fsl-edma.c
@@ -0,0 +1,975 @@
1/*
2 * drivers/dma/fsl-edma.c
3 *
4 * Copyright 2013-2014 Freescale Semiconductor, Inc.
5 *
6 * Driver for the Freescale eDMA engine with flexible channel multiplexing
7 * capability for DMA request sources. The eDMA block can be found on some
8 * Vybrid and Layerscape SoCs.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/interrupt.h>
19#include <linux/clk.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_dma.h>
29
30#include "virt-dma.h"
31
32#define EDMA_CR 0x00
33#define EDMA_ES 0x04
34#define EDMA_ERQ 0x0C
35#define EDMA_EEI 0x14
36#define EDMA_SERQ 0x1B
37#define EDMA_CERQ 0x1A
38#define EDMA_SEEI 0x19
39#define EDMA_CEEI 0x18
40#define EDMA_CINT 0x1F
41#define EDMA_CERR 0x1E
42#define EDMA_SSRT 0x1D
43#define EDMA_CDNE 0x1C
44#define EDMA_INTR 0x24
45#define EDMA_ERR 0x2C
46
47#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
48#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
49#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
50#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
51#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
52#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
53#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
54#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
55#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
56#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
57#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
58#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
59#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
60
61#define EDMA_CR_EDBG BIT(1)
62#define EDMA_CR_ERCA BIT(2)
63#define EDMA_CR_ERGA BIT(3)
64#define EDMA_CR_HOE BIT(4)
65#define EDMA_CR_HALT BIT(5)
66#define EDMA_CR_CLM BIT(6)
67#define EDMA_CR_EMLM BIT(7)
68#define EDMA_CR_ECX BIT(16)
69#define EDMA_CR_CX BIT(17)
70
71#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
72#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
73#define EDMA_CINT_CINT(x) ((x) & 0x1F)
74#define EDMA_CERR_CERR(x) ((x) & 0x1F)
75
76#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
77#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
78#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
79#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
80#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
81#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
82#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
83#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
84#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
85#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
86#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
87#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
88#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
89#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
90
91#define EDMA_TCD_SOFF_SOFF(x) (x)
92#define EDMA_TCD_NBYTES_NBYTES(x) (x)
93#define EDMA_TCD_SLAST_SLAST(x) (x)
94#define EDMA_TCD_DADDR_DADDR(x) (x)
95#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
96#define EDMA_TCD_DOFF_DOFF(x) (x)
97#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
98#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
99
100#define EDMA_TCD_CSR_START BIT(0)
101#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
102#define EDMA_TCD_CSR_INT_HALF BIT(2)
103#define EDMA_TCD_CSR_D_REQ BIT(3)
104#define EDMA_TCD_CSR_E_SG BIT(4)
105#define EDMA_TCD_CSR_E_LINK BIT(5)
106#define EDMA_TCD_CSR_ACTIVE BIT(6)
107#define EDMA_TCD_CSR_DONE BIT(7)
108
109#define EDMAMUX_CHCFG_DIS 0x0
110#define EDMAMUX_CHCFG_ENBL 0x80
111#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
112
113#define DMAMUX_NR 2
114
115#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119
120struct fsl_edma_hw_tcd {
121 u32 saddr;
122 u16 soff;
123 u16 attr;
124 u32 nbytes;
125 u32 slast;
126 u32 daddr;
127 u16 doff;
128 u16 citer;
129 u32 dlast_sga;
130 u16 csr;
131 u16 biter;
132};
133
134struct fsl_edma_sw_tcd {
135 dma_addr_t ptcd;
136 struct fsl_edma_hw_tcd *vtcd;
137};
138
139struct fsl_edma_slave_config {
140 enum dma_transfer_direction dir;
141 enum dma_slave_buswidth addr_width;
142 u32 dev_addr;
143 u32 burst;
144 u32 attr;
145};
146
147struct fsl_edma_chan {
148 struct virt_dma_chan vchan;
149 enum dma_status status;
150 struct fsl_edma_engine *edma;
151 struct fsl_edma_desc *edesc;
152 struct fsl_edma_slave_config fsc;
153 struct dma_pool *tcd_pool;
154};
155
156struct fsl_edma_desc {
157 struct virt_dma_desc vdesc;
158 struct fsl_edma_chan *echan;
159 bool iscyclic;
160 unsigned int n_tcds;
161 struct fsl_edma_sw_tcd tcd[];
162};
163
164struct fsl_edma_engine {
165 struct dma_device dma_dev;
166 void __iomem *membase;
167 void __iomem *muxbase[DMAMUX_NR];
168 struct clk *muxclk[DMAMUX_NR];
169 struct mutex fsl_edma_mutex;
170 u32 n_chans;
171 int txirq;
172 int errirq;
173 bool big_endian;
174 struct fsl_edma_chan chans[];
175};
176
177/*
178 * R/W functions for big- or little-endian registers
179 * the eDMA controller's endian is independent of the CPU core's endian.
180 */
181
182static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
183{
184 if (edma->big_endian)
185 return ioread16be(addr);
186 else
187 return ioread16(addr);
188}
189
190static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
191{
192 if (edma->big_endian)
193 return ioread32be(addr);
194 else
195 return ioread32(addr);
196}
197
198static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
199{
200 iowrite8(val, addr);
201}
202
203static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
204{
205 if (edma->big_endian)
206 iowrite16be(val, addr);
207 else
208 iowrite16(val, addr);
209}
210
211static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
212{
213 if (edma->big_endian)
214 iowrite32be(val, addr);
215 else
216 iowrite32(val, addr);
217}
218
219static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
220{
221 return container_of(chan, struct fsl_edma_chan, vchan.chan);
222}
223
224static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
225{
226 return container_of(vd, struct fsl_edma_desc, vdesc);
227}
228
229static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
230{
231 void __iomem *addr = fsl_chan->edma->membase;
232 u32 ch = fsl_chan->vchan.chan.chan_id;
233
234 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
235 edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
236}
237
238static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
239{
240 void __iomem *addr = fsl_chan->edma->membase;
241 u32 ch = fsl_chan->vchan.chan.chan_id;
242
243 edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
244 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
245}
246
247static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
248 unsigned int slot, bool enable)
249{
250 u32 ch = fsl_chan->vchan.chan.chan_id;
251 void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
252 unsigned chans_per_mux, ch_off;
253
254 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
255 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
256
257 if (enable)
258 edma_writeb(fsl_chan->edma,
259 EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
260 muxaddr + ch_off);
261 else
262 edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
263}
264
265static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
266{
267 switch (addr_width) {
268 case 1:
269 return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
270 case 2:
271 return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
272 case 4:
273 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
274 case 8:
275 return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
276 default:
277 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
278 }
279}
280
281static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
282{
283 struct fsl_edma_desc *fsl_desc;
284 int i;
285
286 fsl_desc = to_fsl_edma_desc(vdesc);
287 for (i = 0; i < fsl_desc->n_tcds; i++)
288 dma_pool_free(fsl_desc->echan->tcd_pool,
289 fsl_desc->tcd[i].vtcd,
290 fsl_desc->tcd[i].ptcd);
291 kfree(fsl_desc);
292}
293
294static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
295 unsigned long arg)
296{
297 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
298 struct dma_slave_config *cfg = (void *)arg;
299 unsigned long flags;
300 LIST_HEAD(head);
301
302 switch (cmd) {
303 case DMA_TERMINATE_ALL:
304 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
305 fsl_edma_disable_request(fsl_chan);
306 fsl_chan->edesc = NULL;
307 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
308 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
309 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
310 return 0;
311
312 case DMA_SLAVE_CONFIG:
313 fsl_chan->fsc.dir = cfg->direction;
314 if (cfg->direction == DMA_DEV_TO_MEM) {
315 fsl_chan->fsc.dev_addr = cfg->src_addr;
316 fsl_chan->fsc.addr_width = cfg->src_addr_width;
317 fsl_chan->fsc.burst = cfg->src_maxburst;
318 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
319 } else if (cfg->direction == DMA_MEM_TO_DEV) {
320 fsl_chan->fsc.dev_addr = cfg->dst_addr;
321 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
322 fsl_chan->fsc.burst = cfg->dst_maxburst;
323 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
324 } else {
325 return -EINVAL;
326 }
327 return 0;
328
329 case DMA_PAUSE:
330 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
331 if (fsl_chan->edesc) {
332 fsl_edma_disable_request(fsl_chan);
333 fsl_chan->status = DMA_PAUSED;
334 }
335 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
336 return 0;
337
338 case DMA_RESUME:
339 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
340 if (fsl_chan->edesc) {
341 fsl_edma_enable_request(fsl_chan);
342 fsl_chan->status = DMA_IN_PROGRESS;
343 }
344 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
345 return 0;
346
347 default:
348 return -ENXIO;
349 }
350}
351
352static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
353 struct virt_dma_desc *vdesc, bool in_progress)
354{
355 struct fsl_edma_desc *edesc = fsl_chan->edesc;
356 void __iomem *addr = fsl_chan->edma->membase;
357 u32 ch = fsl_chan->vchan.chan.chan_id;
358 enum dma_transfer_direction dir = fsl_chan->fsc.dir;
359 dma_addr_t cur_addr, dma_addr;
360 size_t len, size;
361 int i;
362
363 /* calculate the total size in this desc */
364 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
365 len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
366 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
367
368 if (!in_progress)
369 return len;
370
371 if (dir == DMA_MEM_TO_DEV)
372 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
373 else
374 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
375
376 /* figure out the finished and calculate the residue */
377 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
378 size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
379 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
380 if (dir == DMA_MEM_TO_DEV)
381 dma_addr = edma_readl(fsl_chan->edma,
382 &(edesc->tcd[i].vtcd->saddr));
383 else
384 dma_addr = edma_readl(fsl_chan->edma,
385 &(edesc->tcd[i].vtcd->daddr));
386
387 len -= size;
388 if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
389 len += dma_addr + size - cur_addr;
390 break;
391 }
392 }
393
394 return len;
395}
396
397static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
398 dma_cookie_t cookie, struct dma_tx_state *txstate)
399{
400 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
401 struct virt_dma_desc *vdesc;
402 enum dma_status status;
403 unsigned long flags;
404
405 status = dma_cookie_status(chan, cookie, txstate);
406 if (status == DMA_COMPLETE)
407 return status;
408
409 if (!txstate)
410 return fsl_chan->status;
411
412 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
413 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
414 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
415 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
416 else if (vdesc)
417 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
418 else
419 txstate->residue = 0;
420
421 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
422
423 return fsl_chan->status;
424}
425
426static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
427 u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
428 u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
429 u16 csr)
430{
431 void __iomem *addr = fsl_chan->edma->membase;
432 u32 ch = fsl_chan->vchan.chan.chan_id;
433
434 /*
435 * TCD parameters have been swapped in fill_tcd_params(),
436 * so just write them to registers in the cpu endian here
437 */
438 writew(0, addr + EDMA_TCD_CSR(ch));
439 writel(src, addr + EDMA_TCD_SADDR(ch));
440 writel(dst, addr + EDMA_TCD_DADDR(ch));
441 writew(attr, addr + EDMA_TCD_ATTR(ch));
442 writew(soff, addr + EDMA_TCD_SOFF(ch));
443 writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
444 writel(slast, addr + EDMA_TCD_SLAST(ch));
445 writew(citer, addr + EDMA_TCD_CITER(ch));
446 writew(biter, addr + EDMA_TCD_BITER(ch));
447 writew(doff, addr + EDMA_TCD_DOFF(ch));
448 writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
449 writew(csr, addr + EDMA_TCD_CSR(ch));
450}
451
452static void fill_tcd_params(struct fsl_edma_engine *edma,
453 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
454 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
455 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
456 bool disable_req, bool enable_sg)
457{
458 u16 csr = 0;
459
460 /*
461 * eDMA hardware SGs require the TCD parameters stored in memory
462 * the same endian as the eDMA module so that they can be loaded
463 * automatically by the engine
464 */
465 edma_writel(edma, src, &(tcd->saddr));
466 edma_writel(edma, dst, &(tcd->daddr));
467 edma_writew(edma, attr, &(tcd->attr));
468 edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
469 edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
470 edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
471 edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
472 edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
473 edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
474 edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
475 if (major_int)
476 csr |= EDMA_TCD_CSR_INT_MAJOR;
477
478 if (disable_req)
479 csr |= EDMA_TCD_CSR_D_REQ;
480
481 if (enable_sg)
482 csr |= EDMA_TCD_CSR_E_SG;
483
484 edma_writew(edma, csr, &(tcd->csr));
485}
486
487static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
488 int sg_len)
489{
490 struct fsl_edma_desc *fsl_desc;
491 int i;
492
493 fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
494 GFP_NOWAIT);
495 if (!fsl_desc)
496 return NULL;
497
498 fsl_desc->echan = fsl_chan;
499 fsl_desc->n_tcds = sg_len;
500 for (i = 0; i < sg_len; i++) {
501 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
502 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
503 if (!fsl_desc->tcd[i].vtcd)
504 goto err;
505 }
506 return fsl_desc;
507
508err:
509 while (--i >= 0)
510 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
511 fsl_desc->tcd[i].ptcd);
512 kfree(fsl_desc);
513 return NULL;
514}
515
516static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
517 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
518 size_t period_len, enum dma_transfer_direction direction,
519 unsigned long flags, void *context)
520{
521 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
522 struct fsl_edma_desc *fsl_desc;
523 dma_addr_t dma_buf_next;
524 int sg_len, i;
525 u32 src_addr, dst_addr, last_sg, nbytes;
526 u16 soff, doff, iter;
527
528 if (!is_slave_direction(fsl_chan->fsc.dir))
529 return NULL;
530
531 sg_len = buf_len / period_len;
532 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
533 if (!fsl_desc)
534 return NULL;
535 fsl_desc->iscyclic = true;
536
537 dma_buf_next = dma_addr;
538 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
539 iter = period_len / nbytes;
540
541 for (i = 0; i < sg_len; i++) {
542 if (dma_buf_next >= dma_addr + buf_len)
543 dma_buf_next = dma_addr;
544
545 /* get next sg's physical address */
546 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
547
548 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
549 src_addr = dma_buf_next;
550 dst_addr = fsl_chan->fsc.dev_addr;
551 soff = fsl_chan->fsc.addr_width;
552 doff = 0;
553 } else {
554 src_addr = fsl_chan->fsc.dev_addr;
555 dst_addr = dma_buf_next;
556 soff = 0;
557 doff = fsl_chan->fsc.addr_width;
558 }
559
560 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
561 dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
562 iter, iter, doff, last_sg, true, false, true);
563 dma_buf_next += period_len;
564 }
565
566 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
567}
568
569static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
570 struct dma_chan *chan, struct scatterlist *sgl,
571 unsigned int sg_len, enum dma_transfer_direction direction,
572 unsigned long flags, void *context)
573{
574 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
575 struct fsl_edma_desc *fsl_desc;
576 struct scatterlist *sg;
577 u32 src_addr, dst_addr, last_sg, nbytes;
578 u16 soff, doff, iter;
579 int i;
580
581 if (!is_slave_direction(fsl_chan->fsc.dir))
582 return NULL;
583
584 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
585 if (!fsl_desc)
586 return NULL;
587 fsl_desc->iscyclic = false;
588
589 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
590 for_each_sg(sgl, sg, sg_len, i) {
591 /* get next sg's physical address */
592 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
593
594 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
595 src_addr = sg_dma_address(sg);
596 dst_addr = fsl_chan->fsc.dev_addr;
597 soff = fsl_chan->fsc.addr_width;
598 doff = 0;
599 } else {
600 src_addr = fsl_chan->fsc.dev_addr;
601 dst_addr = sg_dma_address(sg);
602 soff = 0;
603 doff = fsl_chan->fsc.addr_width;
604 }
605
606 iter = sg_dma_len(sg) / nbytes;
607 if (i < sg_len - 1) {
608 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
609 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
610 src_addr, dst_addr, fsl_chan->fsc.attr,
611 soff, nbytes, 0, iter, iter, doff, last_sg,
612 false, false, true);
613 } else {
614 last_sg = 0;
615 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
616 src_addr, dst_addr, fsl_chan->fsc.attr,
617 soff, nbytes, 0, iter, iter, doff, last_sg,
618 true, true, false);
619 }
620 }
621
622 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
623}
624
625static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
626{
627 struct fsl_edma_hw_tcd *tcd;
628 struct virt_dma_desc *vdesc;
629
630 vdesc = vchan_next_desc(&fsl_chan->vchan);
631 if (!vdesc)
632 return;
633 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
634 tcd = fsl_chan->edesc->tcd[0].vtcd;
635 fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
636 tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
637 tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
638 fsl_edma_enable_request(fsl_chan);
639 fsl_chan->status = DMA_IN_PROGRESS;
640}
641
642static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
643{
644 struct fsl_edma_engine *fsl_edma = dev_id;
645 unsigned int intr, ch;
646 void __iomem *base_addr;
647 struct fsl_edma_chan *fsl_chan;
648
649 base_addr = fsl_edma->membase;
650
651 intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
652 if (!intr)
653 return IRQ_NONE;
654
655 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
656 if (intr & (0x1 << ch)) {
657 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
658 base_addr + EDMA_CINT);
659
660 fsl_chan = &fsl_edma->chans[ch];
661
662 spin_lock(&fsl_chan->vchan.lock);
663 if (!fsl_chan->edesc->iscyclic) {
664 list_del(&fsl_chan->edesc->vdesc.node);
665 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
666 fsl_chan->edesc = NULL;
667 fsl_chan->status = DMA_COMPLETE;
668 } else {
669 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
670 }
671
672 if (!fsl_chan->edesc)
673 fsl_edma_xfer_desc(fsl_chan);
674
675 spin_unlock(&fsl_chan->vchan.lock);
676 }
677 }
678 return IRQ_HANDLED;
679}
680
681static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
682{
683 struct fsl_edma_engine *fsl_edma = dev_id;
684 unsigned int err, ch;
685
686 err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
687 if (!err)
688 return IRQ_NONE;
689
690 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
691 if (err & (0x1 << ch)) {
692 fsl_edma_disable_request(&fsl_edma->chans[ch]);
693 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
694 fsl_edma->membase + EDMA_CERR);
695 fsl_edma->chans[ch].status = DMA_ERROR;
696 }
697 }
698 return IRQ_HANDLED;
699}
700
701static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
702{
703 if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
704 return IRQ_HANDLED;
705
706 return fsl_edma_err_handler(irq, dev_id);
707}
708
709static void fsl_edma_issue_pending(struct dma_chan *chan)
710{
711 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
712 unsigned long flags;
713
714 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
715
716 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
717 fsl_edma_xfer_desc(fsl_chan);
718
719 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
720}
721
722static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
723 struct of_dma *ofdma)
724{
725 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
726 struct dma_chan *chan, *_chan;
727
728 if (dma_spec->args_count != 2)
729 return NULL;
730
731 mutex_lock(&fsl_edma->fsl_edma_mutex);
732 list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
733 if (chan->client_count)
734 continue;
735 if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
736 chan = dma_get_slave_channel(chan);
737 if (chan) {
738 chan->device->privatecnt++;
739 fsl_edma_chan_mux(to_fsl_edma_chan(chan),
740 dma_spec->args[1], true);
741 mutex_unlock(&fsl_edma->fsl_edma_mutex);
742 return chan;
743 }
744 }
745 }
746 mutex_unlock(&fsl_edma->fsl_edma_mutex);
747 return NULL;
748}
749
750static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
751{
752 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
753
754 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
755 sizeof(struct fsl_edma_hw_tcd),
756 32, 0);
757 return 0;
758}
759
760static void fsl_edma_free_chan_resources(struct dma_chan *chan)
761{
762 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
763 unsigned long flags;
764 LIST_HEAD(head);
765
766 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
767 fsl_edma_disable_request(fsl_chan);
768 fsl_edma_chan_mux(fsl_chan, 0, false);
769 fsl_chan->edesc = NULL;
770 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
771 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
772
773 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
774 dma_pool_destroy(fsl_chan->tcd_pool);
775 fsl_chan->tcd_pool = NULL;
776}
777
778static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
779 struct dma_slave_caps *caps)
780{
781 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
782 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
783 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
784 caps->cmd_pause = true;
785 caps->cmd_terminate = true;
786
787 return 0;
788}
789
790static int
791fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
792{
793 int ret;
794
795 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
796 if (fsl_edma->txirq < 0) {
797 dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
798 return fsl_edma->txirq;
799 }
800
801 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
802 if (fsl_edma->errirq < 0) {
803 dev_err(&pdev->dev, "Can't get edma-err irq.\n");
804 return fsl_edma->errirq;
805 }
806
807 if (fsl_edma->txirq == fsl_edma->errirq) {
808 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
809 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
810 if (ret) {
811 dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
812 return ret;
813 }
814 } else {
815 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
816 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
817 if (ret) {
818 dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
819 return ret;
820 }
821
822 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
823 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
824 if (ret) {
825 dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
826 return ret;
827 }
828 }
829
830 return 0;
831}
832
833static int fsl_edma_probe(struct platform_device *pdev)
834{
835 struct device_node *np = pdev->dev.of_node;
836 struct fsl_edma_engine *fsl_edma;
837 struct fsl_edma_chan *fsl_chan;
838 struct resource *res;
839 int len, chans;
840 int ret, i;
841
842 ret = of_property_read_u32(np, "dma-channels", &chans);
843 if (ret) {
844 dev_err(&pdev->dev, "Can't get dma-channels.\n");
845 return ret;
846 }
847
848 len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
849 fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
850 if (!fsl_edma)
851 return -ENOMEM;
852
853 fsl_edma->n_chans = chans;
854 mutex_init(&fsl_edma->fsl_edma_mutex);
855
856 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
857 fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
858 if (IS_ERR(fsl_edma->membase))
859 return PTR_ERR(fsl_edma->membase);
860
861 for (i = 0; i < DMAMUX_NR; i++) {
862 char clkname[32];
863
864 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
865 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
866 if (IS_ERR(fsl_edma->muxbase[i]))
867 return PTR_ERR(fsl_edma->muxbase[i]);
868
869 sprintf(clkname, "dmamux%d", i);
870 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
871 if (IS_ERR(fsl_edma->muxclk[i])) {
872 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
873 return PTR_ERR(fsl_edma->muxclk[i]);
874 }
875
876 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
877 if (ret) {
878 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
879 return ret;
880 }
881
882 }
883
884 ret = fsl_edma_irq_init(pdev, fsl_edma);
885 if (ret)
886 return ret;
887
888 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
889
890 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
891 for (i = 0; i < fsl_edma->n_chans; i++) {
892 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
893
894 fsl_chan->edma = fsl_edma;
895
896 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
897 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
898
899 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
900 fsl_edma_chan_mux(fsl_chan, 0, false);
901 }
902
903 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
904 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
905 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
906
907 fsl_edma->dma_dev.dev = &pdev->dev;
908 fsl_edma->dma_dev.device_alloc_chan_resources
909 = fsl_edma_alloc_chan_resources;
910 fsl_edma->dma_dev.device_free_chan_resources
911 = fsl_edma_free_chan_resources;
912 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
913 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
914 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
915 fsl_edma->dma_dev.device_control = fsl_edma_control;
916 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
917 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
918
919 platform_set_drvdata(pdev, fsl_edma);
920
921 ret = dma_async_device_register(&fsl_edma->dma_dev);
922 if (ret) {
923 dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
924 return ret;
925 }
926
927 ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
928 if (ret) {
929 dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
930 dma_async_device_unregister(&fsl_edma->dma_dev);
931 return ret;
932 }
933
934 /* enable round robin arbitration */
935 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
936
937 return 0;
938}
939
940static int fsl_edma_remove(struct platform_device *pdev)
941{
942 struct device_node *np = pdev->dev.of_node;
943 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
944 int i;
945
946 of_dma_controller_free(np);
947 dma_async_device_unregister(&fsl_edma->dma_dev);
948
949 for (i = 0; i < DMAMUX_NR; i++)
950 clk_disable_unprepare(fsl_edma->muxclk[i]);
951
952 return 0;
953}
954
955static const struct of_device_id fsl_edma_dt_ids[] = {
956 { .compatible = "fsl,vf610-edma", },
957 { /* sentinel */ }
958};
959MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
960
961static struct platform_driver fsl_edma_driver = {
962 .driver = {
963 .name = "fsl-edma",
964 .owner = THIS_MODULE,
965 .of_match_table = fsl_edma_dt_ids,
966 },
967 .probe = fsl_edma_probe,
968 .remove = fsl_edma_remove,
969};
970
971module_platform_driver(fsl_edma_driver);
972
973MODULE_ALIAS("platform:fsl-edma");
974MODULE_DESCRIPTION("Freescale eDMA engine driver");
975MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 6f9ac2022abd..286660a12cc6 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -422,12 +422,12 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
422 /* Tasklet error handler */ 422 /* Tasklet error handler */
423 tasklet_schedule(&imxdma->channel[i].dma_tasklet); 423 tasklet_schedule(&imxdma->channel[i].dma_tasklet);
424 424
425 printk(KERN_WARNING 425 dev_warn(imxdma->dev,
426 "DMA timeout on channel %d -%s%s%s%s\n", i, 426 "DMA timeout on channel %d -%s%s%s%s\n", i,
427 errcode & IMX_DMA_ERR_BURST ? " burst" : "", 427 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "", 428 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", 429 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); 430 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
431 } 431 }
432 return IRQ_HANDLED; 432 return IRQ_HANDLED;
433} 433}
@@ -1236,6 +1236,7 @@ static int imxdma_remove(struct platform_device *pdev)
1236static struct platform_driver imxdma_driver = { 1236static struct platform_driver imxdma_driver = {
1237 .driver = { 1237 .driver = {
1238 .name = "imx-dma", 1238 .name = "imx-dma",
1239 .owner = THIS_MODULE,
1239 .of_match_table = imx_dma_of_dev_id, 1240 .of_match_table = imx_dma_of_dev_id,
1240 }, 1241 },
1241 .id_table = imx_dma_devtype, 1242 .id_table = imx_dma_devtype,
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index b439679f4126..bf02e7beb51a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -867,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
867 phy->base = pdev->base; 867 phy->base = pdev->base;
868 868
869 if (irq) { 869 if (irq) {
870 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0, 870 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
871 "pdma", phy); 871 IRQF_SHARED, "pdma", phy);
872 if (ret) { 872 if (ret) {
873 dev_err(pdev->dev, "channel request irq fail!\n"); 873 dev_err(pdev->dev, "channel request irq fail!\n");
874 return ret; 874 return ret;
@@ -957,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
957 if (irq_num != dma_channels) { 957 if (irq_num != dma_channels) {
958 /* all chan share one irq, demux inside */ 958 /* all chan share one irq, demux inside */
959 irq = platform_get_irq(op, 0); 959 irq = platform_get_irq(op, 0);
960 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0, 960 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
961 "pdma", pdev); 961 IRQF_SHARED, "pdma", pdev);
962 if (ret) 962 if (ret)
963 return ret; 963 return ret;
964 } 964 }
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 33f96aaa80c7..724f7f4c9720 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -22,6 +22,7 @@
22#include <mach/regs-icu.h> 22#include <mach/regs-icu.h>
23#include <linux/platform_data/dma-mmp_tdma.h> 23#include <linux/platform_data/dma-mmp_tdma.h>
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_dma.h>
25 26
26#include "dmaengine.h" 27#include "dmaengine.h"
27 28
@@ -541,6 +542,45 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
541 return 0; 542 return 0;
542} 543}
543 544
545struct mmp_tdma_filter_param {
546 struct device_node *of_node;
547 unsigned int chan_id;
548};
549
550static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
551{
552 struct mmp_tdma_filter_param *param = fn_param;
553 struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
554 struct dma_device *pdma_device = tdmac->chan.device;
555
556 if (pdma_device->dev->of_node != param->of_node)
557 return false;
558
559 if (chan->chan_id != param->chan_id)
560 return false;
561
562 return true;
563}
564
565struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
566 struct of_dma *ofdma)
567{
568 struct mmp_tdma_device *tdev = ofdma->of_dma_data;
569 dma_cap_mask_t mask = tdev->device.cap_mask;
570 struct mmp_tdma_filter_param param;
571
572 if (dma_spec->args_count != 1)
573 return NULL;
574
575 param.of_node = ofdma->of_node;
576 param.chan_id = dma_spec->args[0];
577
578 if (param.chan_id >= TDMA_CHANNEL_NUM)
579 return NULL;
580
581 return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
582}
583
544static struct of_device_id mmp_tdma_dt_ids[] = { 584static struct of_device_id mmp_tdma_dt_ids[] = {
545 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, 585 { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
546 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, 586 { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
@@ -631,6 +671,16 @@ static int mmp_tdma_probe(struct platform_device *pdev)
631 return ret; 671 return ret;
632 } 672 }
633 673
674 if (pdev->dev.of_node) {
675 ret = of_dma_controller_register(pdev->dev.of_node,
676 mmp_tdma_xlate, tdev);
677 if (ret) {
678 dev_err(tdev->device.dev,
679 "failed to register controller\n");
680 dma_async_device_unregister(&tdev->device);
681 }
682 }
683
634 dev_info(tdev->device.dev, "initialized\n"); 684 dev_info(tdev->device.dev, "initialized\n");
635 return 0; 685 return 0;
636} 686}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index 64ceca2920b8..b19f04f4390b 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1088,6 +1088,23 @@ static void omap_dma_free(struct omap_dmadev *od)
1088 } 1088 }
1089} 1089}
1090 1090
1091#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1092 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1093 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1094
1095static int omap_dma_device_slave_caps(struct dma_chan *dchan,
1096 struct dma_slave_caps *caps)
1097{
1098 caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
1099 caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
1100 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1101 caps->cmd_pause = true;
1102 caps->cmd_terminate = true;
1103 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1104
1105 return 0;
1106}
1107
1091static int omap_dma_probe(struct platform_device *pdev) 1108static int omap_dma_probe(struct platform_device *pdev)
1092{ 1109{
1093 struct omap_dmadev *od; 1110 struct omap_dmadev *od;
@@ -1118,6 +1135,7 @@ static int omap_dma_probe(struct platform_device *pdev)
1118 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 1135 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
1119 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 1136 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
1120 od->ddev.device_control = omap_dma_control; 1137 od->ddev.device_control = omap_dma_control;
1138 od->ddev.device_slave_caps = omap_dma_device_slave_caps;
1121 od->ddev.dev = &pdev->dev; 1139 od->ddev.dev = &pdev->dev;
1122 INIT_LIST_HEAD(&od->ddev.channels); 1140 INIT_LIST_HEAD(&od->ddev.channels);
1123 INIT_LIST_HEAD(&od->pending); 1141 INIT_LIST_HEAD(&od->pending);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 61fdc54a3c88..05fa548bd659 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -964,16 +964,16 @@ static void pch_dma_remove(struct pci_dev *pdev)
964 if (pd) { 964 if (pd) {
965 dma_async_device_unregister(&pd->dma); 965 dma_async_device_unregister(&pd->dma);
966 966
967 free_irq(pdev->irq, pd);
968
967 list_for_each_entry_safe(chan, _c, &pd->dma.channels, 969 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
968 device_node) { 970 device_node) {
969 pd_chan = to_pd_chan(chan); 971 pd_chan = to_pd_chan(chan);
970 972
971 tasklet_disable(&pd_chan->tasklet);
972 tasklet_kill(&pd_chan->tasklet); 973 tasklet_kill(&pd_chan->tasklet);
973 } 974 }
974 975
975 pci_pool_destroy(pd->pool); 976 pci_pool_destroy(pd->pool);
976 free_irq(pdev->irq, pd);
977 pci_iounmap(pdev, pd->membase); 977 pci_iounmap(pdev, pd->membase);
978 pci_release_regions(pdev); 978 pci_release_regions(pdev);
979 pci_disable_device(pdev); 979 pci_disable_device(pdev);
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
new file mode 100644
index 000000000000..82c923146e49
--- /dev/null
+++ b/drivers/dma/qcom_bam_dma.c
@@ -0,0 +1,1111 @@
1/*
2 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14/*
15 * QCOM BAM DMA engine driver
16 *
17 * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
18 * peripherals on the MSM 8x74. The configuration of the channels are dependent
19 * on the way they are hard wired to that specific peripheral. The peripheral
20 * device tree entries specify the configuration of each channel.
21 *
22 * The DMA controller requires the use of external memory for storage of the
23 * hardware descriptors for each channel. The descriptor FIFO is accessed as a
24 * circular buffer and operations are managed according to the offset within the
25 * FIFO. After pipe/channel reset, all of the pipe registers and internal state
26 * are back to defaults.
27 *
28 * During DMA operations, we write descriptors to the FIFO, being careful to
29 * handle wrapping and then write the last FIFO offset to that channel's
30 * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register
31 * indicates the current FIFO offset that is being processed, so there is some
32 * indication of where the hardware is currently working.
33 */
34
35#include <linux/kernel.h>
36#include <linux/io.h>
37#include <linux/init.h>
38#include <linux/slab.h>
39#include <linux/module.h>
40#include <linux/interrupt.h>
41#include <linux/dma-mapping.h>
42#include <linux/scatterlist.h>
43#include <linux/device.h>
44#include <linux/platform_device.h>
45#include <linux/of.h>
46#include <linux/of_address.h>
47#include <linux/of_irq.h>
48#include <linux/of_dma.h>
49#include <linux/clk.h>
50#include <linux/dmaengine.h>
51
52#include "dmaengine.h"
53#include "virt-dma.h"
54
55struct bam_desc_hw {
56 u32 addr; /* Buffer physical address */
57 u16 size; /* Buffer size in bytes */
58 u16 flags;
59};
60
61#define DESC_FLAG_INT BIT(15)
62#define DESC_FLAG_EOT BIT(14)
63#define DESC_FLAG_EOB BIT(13)
64
65struct bam_async_desc {
66 struct virt_dma_desc vd;
67
68 u32 num_desc;
69 u32 xfer_len;
70 struct bam_desc_hw *curr_desc;
71
72 enum dma_transfer_direction dir;
73 size_t length;
74 struct bam_desc_hw desc[0];
75};
76
77#define BAM_CTRL 0x0000
78#define BAM_REVISION 0x0004
79#define BAM_SW_REVISION 0x0080
80#define BAM_NUM_PIPES 0x003C
81#define BAM_TIMER 0x0040
82#define BAM_TIMER_CTRL 0x0044
83#define BAM_DESC_CNT_TRSHLD 0x0008
84#define BAM_IRQ_SRCS 0x000C
85#define BAM_IRQ_SRCS_MSK 0x0010
86#define BAM_IRQ_SRCS_UNMASKED 0x0030
87#define BAM_IRQ_STTS 0x0014
88#define BAM_IRQ_CLR 0x0018
89#define BAM_IRQ_EN 0x001C
90#define BAM_CNFG_BITS 0x007C
91#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80))
92#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80))
93#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000))
94#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000))
95#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000))
96#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000))
97#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000))
98#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000))
99#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000))
100#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000))
101#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000))
102#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000))
103#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000))
104#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000))
105#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000))
106
107/* BAM CTRL */
108#define BAM_SW_RST BIT(0)
109#define BAM_EN BIT(1)
110#define BAM_EN_ACCUM BIT(4)
111#define BAM_TESTBUS_SEL_SHIFT 5
112#define BAM_TESTBUS_SEL_MASK 0x3F
113#define BAM_DESC_CACHE_SEL_SHIFT 13
114#define BAM_DESC_CACHE_SEL_MASK 0x3
115#define BAM_CACHED_DESC_STORE BIT(15)
116#define IBC_DISABLE BIT(16)
117
118/* BAM REVISION */
119#define REVISION_SHIFT 0
120#define REVISION_MASK 0xFF
121#define NUM_EES_SHIFT 8
122#define NUM_EES_MASK 0xF
123#define CE_BUFFER_SIZE BIT(13)
124#define AXI_ACTIVE BIT(14)
125#define USE_VMIDMT BIT(15)
126#define SECURED BIT(16)
127#define BAM_HAS_NO_BYPASS BIT(17)
128#define HIGH_FREQUENCY_BAM BIT(18)
129#define INACTIV_TMRS_EXST BIT(19)
130#define NUM_INACTIV_TMRS BIT(20)
131#define DESC_CACHE_DEPTH_SHIFT 21
132#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
133#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
134#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
135#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
136#define CMD_DESC_EN BIT(23)
137#define INACTIV_TMR_BASE_SHIFT 24
138#define INACTIV_TMR_BASE_MASK 0xFF
139
140/* BAM NUM PIPES */
141#define BAM_NUM_PIPES_SHIFT 0
142#define BAM_NUM_PIPES_MASK 0xFF
143#define PERIPH_NON_PIPE_GRP_SHIFT 16
144#define PERIPH_NON_PIP_GRP_MASK 0xFF
145#define BAM_NON_PIPE_GRP_SHIFT 24
146#define BAM_NON_PIPE_GRP_MASK 0xFF
147
148/* BAM CNFG BITS */
149#define BAM_PIPE_CNFG BIT(2)
150#define BAM_FULL_PIPE BIT(11)
151#define BAM_NO_EXT_P_RST BIT(12)
152#define BAM_IBC_DISABLE BIT(13)
153#define BAM_SB_CLK_REQ BIT(14)
154#define BAM_PSM_CSW_REQ BIT(15)
155#define BAM_PSM_P_RES BIT(16)
156#define BAM_AU_P_RES BIT(17)
157#define BAM_SI_P_RES BIT(18)
158#define BAM_WB_P_RES BIT(19)
159#define BAM_WB_BLK_CSW BIT(20)
160#define BAM_WB_CSW_ACK_IDL BIT(21)
161#define BAM_WB_RETR_SVPNT BIT(22)
162#define BAM_WB_DSC_AVL_P_RST BIT(23)
163#define BAM_REG_P_EN BIT(24)
164#define BAM_PSM_P_HD_DATA BIT(25)
165#define BAM_AU_ACCUMED BIT(26)
166#define BAM_CMD_ENABLE BIT(27)
167
168#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
169 BAM_NO_EXT_P_RST | \
170 BAM_IBC_DISABLE | \
171 BAM_SB_CLK_REQ | \
172 BAM_PSM_CSW_REQ | \
173 BAM_PSM_P_RES | \
174 BAM_AU_P_RES | \
175 BAM_SI_P_RES | \
176 BAM_WB_P_RES | \
177 BAM_WB_BLK_CSW | \
178 BAM_WB_CSW_ACK_IDL | \
179 BAM_WB_RETR_SVPNT | \
180 BAM_WB_DSC_AVL_P_RST | \
181 BAM_REG_P_EN | \
182 BAM_PSM_P_HD_DATA | \
183 BAM_AU_ACCUMED | \
184 BAM_CMD_ENABLE)
185
186/* PIPE CTRL */
187#define P_EN BIT(1)
188#define P_DIRECTION BIT(3)
189#define P_SYS_STRM BIT(4)
190#define P_SYS_MODE BIT(5)
191#define P_AUTO_EOB BIT(6)
192#define P_AUTO_EOB_SEL_SHIFT 7
193#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
194#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
195#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
196#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
197#define P_PREFETCH_LIMIT_SHIFT 9
198#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
199#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
200#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
201#define P_WRITE_NWD BIT(11)
202#define P_LOCK_GROUP_SHIFT 16
203#define P_LOCK_GROUP_MASK 0x1F
204
205/* BAM_DESC_CNT_TRSHLD */
206#define CNT_TRSHLD 0xffff
207#define DEFAULT_CNT_THRSHLD 0x4
208
209/* BAM_IRQ_SRCS */
210#define BAM_IRQ BIT(31)
211#define P_IRQ 0x7fffffff
212
213/* BAM_IRQ_SRCS_MSK */
214#define BAM_IRQ_MSK BAM_IRQ
215#define P_IRQ_MSK P_IRQ
216
217/* BAM_IRQ_STTS */
218#define BAM_TIMER_IRQ BIT(4)
219#define BAM_EMPTY_IRQ BIT(3)
220#define BAM_ERROR_IRQ BIT(2)
221#define BAM_HRESP_ERR_IRQ BIT(1)
222
223/* BAM_IRQ_CLR */
224#define BAM_TIMER_CLR BIT(4)
225#define BAM_EMPTY_CLR BIT(3)
226#define BAM_ERROR_CLR BIT(2)
227#define BAM_HRESP_ERR_CLR BIT(1)
228
229/* BAM_IRQ_EN */
230#define BAM_TIMER_EN BIT(4)
231#define BAM_EMPTY_EN BIT(3)
232#define BAM_ERROR_EN BIT(2)
233#define BAM_HRESP_ERR_EN BIT(1)
234
235/* BAM_P_IRQ_EN */
236#define P_PRCSD_DESC_EN BIT(0)
237#define P_TIMER_EN BIT(1)
238#define P_WAKE_EN BIT(2)
239#define P_OUT_OF_DESC_EN BIT(3)
240#define P_ERR_EN BIT(4)
241#define P_TRNSFR_END_EN BIT(5)
242#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
243
244/* BAM_P_SW_OFSTS */
245#define P_SW_OFSTS_MASK 0xffff
246
247#define BAM_DESC_FIFO_SIZE SZ_32K
248#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
249#define BAM_MAX_DATA_SIZE (SZ_32K - 8)
250
251struct bam_chan {
252 struct virt_dma_chan vc;
253
254 struct bam_device *bdev;
255
256 /* configuration from device tree */
257 u32 id;
258
259 struct bam_async_desc *curr_txd; /* current running dma */
260
261 /* runtime configuration */
262 struct dma_slave_config slave;
263
264 /* fifo storage */
265 struct bam_desc_hw *fifo_virt;
266 dma_addr_t fifo_phys;
267
268 /* fifo markers */
269 unsigned short head; /* start of active descriptor entries */
270 unsigned short tail; /* end of active descriptor entries */
271
272 unsigned int initialized; /* is the channel hw initialized? */
273 unsigned int paused; /* is the channel paused? */
274 unsigned int reconfigure; /* new slave config? */
275
276 struct list_head node;
277};
278
279static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
280{
281 return container_of(common, struct bam_chan, vc.chan);
282}
283
284struct bam_device {
285 void __iomem *regs;
286 struct device *dev;
287 struct dma_device common;
288 struct device_dma_parameters dma_parms;
289 struct bam_chan *channels;
290 u32 num_channels;
291
292 /* execution environment ID, from DT */
293 u32 ee;
294
295 struct clk *bamclk;
296 int irq;
297
298 /* dma start transaction tasklet */
299 struct tasklet_struct task;
300};
301
302/**
303 * bam_reset_channel - Reset individual BAM DMA channel
304 * @bchan: bam channel
305 *
306 * This function resets a specific BAM channel
307 */
308static void bam_reset_channel(struct bam_chan *bchan)
309{
310 struct bam_device *bdev = bchan->bdev;
311
312 lockdep_assert_held(&bchan->vc.lock);
313
314 /* reset channel */
315 writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
316 writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
317
318 /* don't allow cpu to reorder BAM register accesses done after this */
319 wmb();
320
321 /* make sure hw is initialized when channel is used the first time */
322 bchan->initialized = 0;
323}
324
325/**
326 * bam_chan_init_hw - Initialize channel hardware
327 * @bchan: bam channel
328 *
329 * This function resets and initializes the BAM channel
330 */
331static void bam_chan_init_hw(struct bam_chan *bchan,
332 enum dma_transfer_direction dir)
333{
334 struct bam_device *bdev = bchan->bdev;
335 u32 val;
336
337 /* Reset the channel to clear internal state of the FIFO */
338 bam_reset_channel(bchan);
339
340 /*
341 * write out 8 byte aligned address. We have enough space for this
342 * because we allocated 1 more descriptor (8 bytes) than we can use
343 */
344 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
345 bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
346 writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
347 BAM_P_FIFO_SIZES(bchan->id));
348
349 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
350 writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
351
352 /* unmask the specific pipe and EE combo */
353 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
354 val |= BIT(bchan->id);
355 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
356
357 /* don't allow cpu to reorder the channel enable done below */
358 wmb();
359
360 /* set fixed direction and mode, then enable channel */
361 val = P_EN | P_SYS_MODE;
362 if (dir == DMA_DEV_TO_MEM)
363 val |= P_DIRECTION;
364
365 writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
366
367 bchan->initialized = 1;
368
369 /* init FIFO pointers */
370 bchan->head = 0;
371 bchan->tail = 0;
372}
373
374/**
375 * bam_alloc_chan - Allocate channel resources for DMA channel.
376 * @chan: specified channel
377 *
378 * This function allocates the FIFO descriptor memory
379 */
380static int bam_alloc_chan(struct dma_chan *chan)
381{
382 struct bam_chan *bchan = to_bam_chan(chan);
383 struct bam_device *bdev = bchan->bdev;
384
385 if (bchan->fifo_virt)
386 return 0;
387
388 /* allocate FIFO descriptor space, but only if necessary */
389 bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
390 &bchan->fifo_phys, GFP_KERNEL);
391
392 if (!bchan->fifo_virt) {
393 dev_err(bdev->dev, "Failed to allocate desc fifo\n");
394 return -ENOMEM;
395 }
396
397 return 0;
398}
399
400/**
401 * bam_free_chan - Frees dma resources associated with specific channel
402 * @chan: specified channel
403 *
404 * Free the allocated fifo descriptor memory and channel resources
405 *
406 */
407static void bam_free_chan(struct dma_chan *chan)
408{
409 struct bam_chan *bchan = to_bam_chan(chan);
410 struct bam_device *bdev = bchan->bdev;
411 u32 val;
412 unsigned long flags;
413
414 vchan_free_chan_resources(to_virt_chan(chan));
415
416 if (bchan->curr_txd) {
417 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
418 return;
419 }
420
421 spin_lock_irqsave(&bchan->vc.lock, flags);
422 bam_reset_channel(bchan);
423 spin_unlock_irqrestore(&bchan->vc.lock, flags);
424
425 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
426 bchan->fifo_phys);
427 bchan->fifo_virt = NULL;
428
429 /* mask irq for pipe/channel */
430 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
431 val &= ~BIT(bchan->id);
432 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
433
434 /* disable irq */
435 writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
436}
437
438/**
439 * bam_slave_config - set slave configuration for channel
440 * @chan: dma channel
441 * @cfg: slave configuration
442 *
443 * Sets slave configuration for channel
444 *
445 */
446static void bam_slave_config(struct bam_chan *bchan,
447 struct dma_slave_config *cfg)
448{
449 memcpy(&bchan->slave, cfg, sizeof(*cfg));
450 bchan->reconfigure = 1;
451}
452
453/**
454 * bam_prep_slave_sg - Prep slave sg transaction
455 *
456 * @chan: dma channel
457 * @sgl: scatter gather list
458 * @sg_len: length of sg
459 * @direction: DMA transfer direction
460 * @flags: DMA flags
461 * @context: transfer context (unused)
462 */
463static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
464 struct scatterlist *sgl, unsigned int sg_len,
465 enum dma_transfer_direction direction, unsigned long flags,
466 void *context)
467{
468 struct bam_chan *bchan = to_bam_chan(chan);
469 struct bam_device *bdev = bchan->bdev;
470 struct bam_async_desc *async_desc;
471 struct scatterlist *sg;
472 u32 i;
473 struct bam_desc_hw *desc;
474 unsigned int num_alloc = 0;
475
476
477 if (!is_slave_direction(direction)) {
478 dev_err(bdev->dev, "invalid dma direction\n");
479 return NULL;
480 }
481
482 /* calculate number of required entries */
483 for_each_sg(sgl, sg, sg_len, i)
484 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
485
486 /* allocate enough room to accomodate the number of entries */
487 async_desc = kzalloc(sizeof(*async_desc) +
488 (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
489
490 if (!async_desc)
491 goto err_out;
492
493 async_desc->num_desc = num_alloc;
494 async_desc->curr_desc = async_desc->desc;
495 async_desc->dir = direction;
496
497 /* fill in temporary descriptors */
498 desc = async_desc->desc;
499 for_each_sg(sgl, sg, sg_len, i) {
500 unsigned int remainder = sg_dma_len(sg);
501 unsigned int curr_offset = 0;
502
503 do {
504 desc->addr = sg_dma_address(sg) + curr_offset;
505
506 if (remainder > BAM_MAX_DATA_SIZE) {
507 desc->size = BAM_MAX_DATA_SIZE;
508 remainder -= BAM_MAX_DATA_SIZE;
509 curr_offset += BAM_MAX_DATA_SIZE;
510 } else {
511 desc->size = remainder;
512 remainder = 0;
513 }
514
515 async_desc->length += desc->size;
516 desc++;
517 } while (remainder > 0);
518 }
519
520 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
521
522err_out:
523 kfree(async_desc);
524 return NULL;
525}
526
527/**
528 * bam_dma_terminate_all - terminate all transactions on a channel
529 * @bchan: bam dma channel
530 *
531 * Dequeues and frees all transactions
532 * No callbacks are done
533 *
534 */
535static void bam_dma_terminate_all(struct bam_chan *bchan)
536{
537 unsigned long flag;
538 LIST_HEAD(head);
539
540 /* remove all transactions, including active transaction */
541 spin_lock_irqsave(&bchan->vc.lock, flag);
542 if (bchan->curr_txd) {
543 list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
544 bchan->curr_txd = NULL;
545 }
546
547 vchan_get_all_descriptors(&bchan->vc, &head);
548 spin_unlock_irqrestore(&bchan->vc.lock, flag);
549
550 vchan_dma_desc_free_list(&bchan->vc, &head);
551}
552
553/**
554 * bam_control - DMA device control
555 * @chan: dma channel
556 * @cmd: control cmd
557 * @arg: cmd argument
558 *
559 * Perform DMA control command
560 *
561 */
562static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
563 unsigned long arg)
564{
565 struct bam_chan *bchan = to_bam_chan(chan);
566 struct bam_device *bdev = bchan->bdev;
567 int ret = 0;
568 unsigned long flag;
569
570 switch (cmd) {
571 case DMA_PAUSE:
572 spin_lock_irqsave(&bchan->vc.lock, flag);
573 writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
574 bchan->paused = 1;
575 spin_unlock_irqrestore(&bchan->vc.lock, flag);
576 break;
577
578 case DMA_RESUME:
579 spin_lock_irqsave(&bchan->vc.lock, flag);
580 writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
581 bchan->paused = 0;
582 spin_unlock_irqrestore(&bchan->vc.lock, flag);
583 break;
584
585 case DMA_TERMINATE_ALL:
586 bam_dma_terminate_all(bchan);
587 break;
588
589 case DMA_SLAVE_CONFIG:
590 spin_lock_irqsave(&bchan->vc.lock, flag);
591 bam_slave_config(bchan, (struct dma_slave_config *)arg);
592 spin_unlock_irqrestore(&bchan->vc.lock, flag);
593 break;
594
595 default:
596 ret = -ENXIO;
597 break;
598 }
599
600 return ret;
601}
602
603/**
604 * process_channel_irqs - processes the channel interrupts
605 * @bdev: bam controller
606 *
607 * This function processes the channel interrupts
608 *
609 */
610static u32 process_channel_irqs(struct bam_device *bdev)
611{
612 u32 i, srcs, pipe_stts;
613 unsigned long flags;
614 struct bam_async_desc *async_desc;
615
616 srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
617
618 /* return early if no pipe/channel interrupts are present */
619 if (!(srcs & P_IRQ))
620 return srcs;
621
622 for (i = 0; i < bdev->num_channels; i++) {
623 struct bam_chan *bchan = &bdev->channels[i];
624
625 if (!(srcs & BIT(i)))
626 continue;
627
628 /* clear pipe irq */
629 pipe_stts = readl_relaxed(bdev->regs +
630 BAM_P_IRQ_STTS(i));
631
632 writel_relaxed(pipe_stts, bdev->regs +
633 BAM_P_IRQ_CLR(i));
634
635 spin_lock_irqsave(&bchan->vc.lock, flags);
636 async_desc = bchan->curr_txd;
637
638 if (async_desc) {
639 async_desc->num_desc -= async_desc->xfer_len;
640 async_desc->curr_desc += async_desc->xfer_len;
641 bchan->curr_txd = NULL;
642
643 /* manage FIFO */
644 bchan->head += async_desc->xfer_len;
645 bchan->head %= MAX_DESCRIPTORS;
646
647 /*
648 * if complete, process cookie. Otherwise
649 * push back to front of desc_issued so that
650 * it gets restarted by the tasklet
651 */
652 if (!async_desc->num_desc)
653 vchan_cookie_complete(&async_desc->vd);
654 else
655 list_add(&async_desc->vd.node,
656 &bchan->vc.desc_issued);
657 }
658
659 spin_unlock_irqrestore(&bchan->vc.lock, flags);
660 }
661
662 return srcs;
663}
664
665/**
666 * bam_dma_irq - irq handler for bam controller
667 * @irq: IRQ of interrupt
668 * @data: callback data
669 *
670 * IRQ handler for the bam controller
671 */
672static irqreturn_t bam_dma_irq(int irq, void *data)
673{
674 struct bam_device *bdev = data;
675 u32 clr_mask = 0, srcs = 0;
676
677 srcs |= process_channel_irqs(bdev);
678
679 /* kick off tasklet to start next dma transfer */
680 if (srcs & P_IRQ)
681 tasklet_schedule(&bdev->task);
682
683 if (srcs & BAM_IRQ)
684 clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
685
686 /* don't allow reorder of the various accesses to the BAM registers */
687 mb();
688
689 writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
690
691 return IRQ_HANDLED;
692}
693
694/**
695 * bam_tx_status - returns status of transaction
696 * @chan: dma channel
697 * @cookie: transaction cookie
698 * @txstate: DMA transaction state
699 *
700 * Return status of dma transaction
701 */
702static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
703 struct dma_tx_state *txstate)
704{
705 struct bam_chan *bchan = to_bam_chan(chan);
706 struct virt_dma_desc *vd;
707 int ret;
708 size_t residue = 0;
709 unsigned int i;
710 unsigned long flags;
711
712 ret = dma_cookie_status(chan, cookie, txstate);
713 if (ret == DMA_COMPLETE)
714 return ret;
715
716 if (!txstate)
717 return bchan->paused ? DMA_PAUSED : ret;
718
719 spin_lock_irqsave(&bchan->vc.lock, flags);
720 vd = vchan_find_desc(&bchan->vc, cookie);
721 if (vd)
722 residue = container_of(vd, struct bam_async_desc, vd)->length;
723 else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
724 for (i = 0; i < bchan->curr_txd->num_desc; i++)
725 residue += bchan->curr_txd->curr_desc[i].size;
726
727 spin_unlock_irqrestore(&bchan->vc.lock, flags);
728
729 dma_set_residue(txstate, residue);
730
731 if (ret == DMA_IN_PROGRESS && bchan->paused)
732 ret = DMA_PAUSED;
733
734 return ret;
735}
736
737/**
738 * bam_apply_new_config
739 * @bchan: bam dma channel
740 * @dir: DMA direction
741 */
742static void bam_apply_new_config(struct bam_chan *bchan,
743 enum dma_transfer_direction dir)
744{
745 struct bam_device *bdev = bchan->bdev;
746 u32 maxburst;
747
748 if (dir == DMA_DEV_TO_MEM)
749 maxburst = bchan->slave.src_maxburst;
750 else
751 maxburst = bchan->slave.dst_maxburst;
752
753 writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
754
755 bchan->reconfigure = 0;
756}
757
758/**
759 * bam_start_dma - start next transaction
760 * @bchan - bam dma channel
761 */
762static void bam_start_dma(struct bam_chan *bchan)
763{
764 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
765 struct bam_device *bdev = bchan->bdev;
766 struct bam_async_desc *async_desc;
767 struct bam_desc_hw *desc;
768 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
769 sizeof(struct bam_desc_hw));
770
771 lockdep_assert_held(&bchan->vc.lock);
772
773 if (!vd)
774 return;
775
776 list_del(&vd->node);
777
778 async_desc = container_of(vd, struct bam_async_desc, vd);
779 bchan->curr_txd = async_desc;
780
781 /* on first use, initialize the channel hardware */
782 if (!bchan->initialized)
783 bam_chan_init_hw(bchan, async_desc->dir);
784
785 /* apply new slave config changes, if necessary */
786 if (bchan->reconfigure)
787 bam_apply_new_config(bchan, async_desc->dir);
788
789 desc = bchan->curr_txd->curr_desc;
790
791 if (async_desc->num_desc > MAX_DESCRIPTORS)
792 async_desc->xfer_len = MAX_DESCRIPTORS;
793 else
794 async_desc->xfer_len = async_desc->num_desc;
795
796 /* set INT on last descriptor */
797 desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
798
799 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
800 u32 partial = MAX_DESCRIPTORS - bchan->tail;
801
802 memcpy(&fifo[bchan->tail], desc,
803 partial * sizeof(struct bam_desc_hw));
804 memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
805 sizeof(struct bam_desc_hw));
806 } else {
807 memcpy(&fifo[bchan->tail], desc,
808 async_desc->xfer_len * sizeof(struct bam_desc_hw));
809 }
810
811 bchan->tail += async_desc->xfer_len;
812 bchan->tail %= MAX_DESCRIPTORS;
813
814 /* ensure descriptor writes and dma start not reordered */
815 wmb();
816 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
817 bdev->regs + BAM_P_EVNT_REG(bchan->id));
818}
819
820/**
821 * dma_tasklet - DMA IRQ tasklet
822 * @data: tasklet argument (bam controller structure)
823 *
824 * Sets up next DMA operation and then processes all completed transactions
825 */
826static void dma_tasklet(unsigned long data)
827{
828 struct bam_device *bdev = (struct bam_device *)data;
829 struct bam_chan *bchan;
830 unsigned long flags;
831 unsigned int i;
832
833 /* go through the channels and kick off transactions */
834 for (i = 0; i < bdev->num_channels; i++) {
835 bchan = &bdev->channels[i];
836 spin_lock_irqsave(&bchan->vc.lock, flags);
837
838 if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
839 bam_start_dma(bchan);
840 spin_unlock_irqrestore(&bchan->vc.lock, flags);
841 }
842}
843
844/**
845 * bam_issue_pending - starts pending transactions
846 * @chan: dma channel
847 *
848 * Calls tasklet directly which in turn starts any pending transactions
849 */
850static void bam_issue_pending(struct dma_chan *chan)
851{
852 struct bam_chan *bchan = to_bam_chan(chan);
853 unsigned long flags;
854
855 spin_lock_irqsave(&bchan->vc.lock, flags);
856
857 /* if work pending and idle, start a transaction */
858 if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
859 bam_start_dma(bchan);
860
861 spin_unlock_irqrestore(&bchan->vc.lock, flags);
862}
863
864/**
865 * bam_dma_free_desc - free descriptor memory
866 * @vd: virtual descriptor
867 *
868 */
869static void bam_dma_free_desc(struct virt_dma_desc *vd)
870{
871 struct bam_async_desc *async_desc = container_of(vd,
872 struct bam_async_desc, vd);
873
874 kfree(async_desc);
875}
876
877static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
878 struct of_dma *of)
879{
880 struct bam_device *bdev = container_of(of->of_dma_data,
881 struct bam_device, common);
882 unsigned int request;
883
884 if (dma_spec->args_count != 1)
885 return NULL;
886
887 request = dma_spec->args[0];
888 if (request >= bdev->num_channels)
889 return NULL;
890
891 return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
892}
893
894/**
895 * bam_init
896 * @bdev: bam device
897 *
898 * Initialization helper for global bam registers
899 */
900static int bam_init(struct bam_device *bdev)
901{
902 u32 val;
903
904 /* read revision and configuration information */
905 val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
906 val &= NUM_EES_MASK;
907
908 /* check that configured EE is within range */
909 if (bdev->ee >= val)
910 return -EINVAL;
911
912 val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
913 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
914
915 /* s/w reset bam */
916 /* after reset all pipes are disabled and idle */
917 val = readl_relaxed(bdev->regs + BAM_CTRL);
918 val |= BAM_SW_RST;
919 writel_relaxed(val, bdev->regs + BAM_CTRL);
920 val &= ~BAM_SW_RST;
921 writel_relaxed(val, bdev->regs + BAM_CTRL);
922
923 /* make sure previous stores are visible before enabling BAM */
924 wmb();
925
926 /* enable bam */
927 val |= BAM_EN;
928 writel_relaxed(val, bdev->regs + BAM_CTRL);
929
930 /* set descriptor threshhold, start with 4 bytes */
931 writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
932
933 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
934 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
935
936 /* enable irqs for errors */
937 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
938 bdev->regs + BAM_IRQ_EN);
939
940 /* unmask global bam interrupt */
941 writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
942
943 return 0;
944}
945
946static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
947 u32 index)
948{
949 bchan->id = index;
950 bchan->bdev = bdev;
951
952 vchan_init(&bchan->vc, &bdev->common);
953 bchan->vc.desc_free = bam_dma_free_desc;
954}
955
956static int bam_dma_probe(struct platform_device *pdev)
957{
958 struct bam_device *bdev;
959 struct resource *iores;
960 int ret, i;
961
962 bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
963 if (!bdev)
964 return -ENOMEM;
965
966 bdev->dev = &pdev->dev;
967
968 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
969 bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
970 if (IS_ERR(bdev->regs))
971 return PTR_ERR(bdev->regs);
972
973 bdev->irq = platform_get_irq(pdev, 0);
974 if (bdev->irq < 0)
975 return bdev->irq;
976
977 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
978 if (ret) {
979 dev_err(bdev->dev, "Execution environment unspecified\n");
980 return ret;
981 }
982
983 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
984 if (IS_ERR(bdev->bamclk))
985 return PTR_ERR(bdev->bamclk);
986
987 ret = clk_prepare_enable(bdev->bamclk);
988 if (ret) {
989 dev_err(bdev->dev, "failed to prepare/enable clock\n");
990 return ret;
991 }
992
993 ret = bam_init(bdev);
994 if (ret)
995 goto err_disable_clk;
996
997 tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
998
999 bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1000 sizeof(*bdev->channels), GFP_KERNEL);
1001
1002 if (!bdev->channels) {
1003 ret = -ENOMEM;
1004 goto err_disable_clk;
1005 }
1006
1007 /* allocate and initialize channels */
1008 INIT_LIST_HEAD(&bdev->common.channels);
1009
1010 for (i = 0; i < bdev->num_channels; i++)
1011 bam_channel_init(bdev, &bdev->channels[i], i);
1012
1013 ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1014 IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1015 if (ret)
1016 goto err_disable_clk;
1017
1018 /* set max dma segment size */
1019 bdev->common.dev = bdev->dev;
1020 bdev->common.dev->dma_parms = &bdev->dma_parms;
1021 ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
1022 if (ret) {
1023 dev_err(bdev->dev, "cannot set maximum segment size\n");
1024 goto err_disable_clk;
1025 }
1026
1027 platform_set_drvdata(pdev, bdev);
1028
1029 /* set capabilities */
1030 dma_cap_zero(bdev->common.cap_mask);
1031 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1032
1033 /* initialize dmaengine apis */
1034 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1035 bdev->common.device_free_chan_resources = bam_free_chan;
1036 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1037 bdev->common.device_control = bam_control;
1038 bdev->common.device_issue_pending = bam_issue_pending;
1039 bdev->common.device_tx_status = bam_tx_status;
1040 bdev->common.dev = bdev->dev;
1041
1042 ret = dma_async_device_register(&bdev->common);
1043 if (ret) {
1044 dev_err(bdev->dev, "failed to register dma async device\n");
1045 goto err_disable_clk;
1046 }
1047
1048 ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1049 &bdev->common);
1050 if (ret)
1051 goto err_unregister_dma;
1052
1053 return 0;
1054
1055err_unregister_dma:
1056 dma_async_device_unregister(&bdev->common);
1057err_disable_clk:
1058 clk_disable_unprepare(bdev->bamclk);
1059 return ret;
1060}
1061
1062static int bam_dma_remove(struct platform_device *pdev)
1063{
1064 struct bam_device *bdev = platform_get_drvdata(pdev);
1065 u32 i;
1066
1067 of_dma_controller_free(pdev->dev.of_node);
1068 dma_async_device_unregister(&bdev->common);
1069
1070 /* mask all interrupts for this execution environment */
1071 writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
1072
1073 devm_free_irq(bdev->dev, bdev->irq, bdev);
1074
1075 for (i = 0; i < bdev->num_channels; i++) {
1076 bam_dma_terminate_all(&bdev->channels[i]);
1077 tasklet_kill(&bdev->channels[i].vc.task);
1078
1079 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
1080 bdev->channels[i].fifo_virt,
1081 bdev->channels[i].fifo_phys);
1082 }
1083
1084 tasklet_kill(&bdev->task);
1085
1086 clk_disable_unprepare(bdev->bamclk);
1087
1088 return 0;
1089}
1090
1091static const struct of_device_id bam_of_match[] = {
1092 { .compatible = "qcom,bam-v1.4.0", },
1093 {}
1094};
1095MODULE_DEVICE_TABLE(of, bam_of_match);
1096
1097static struct platform_driver bam_dma_driver = {
1098 .probe = bam_dma_probe,
1099 .remove = bam_dma_remove,
1100 .driver = {
1101 .name = "bam-dma-engine",
1102 .owner = THIS_MODULE,
1103 .of_match_table = bam_of_match,
1104 },
1105};
1106
1107module_platform_driver(bam_dma_driver);
1108
1109MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1110MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1111MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4eddedb6eb7d..b209a0f17344 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -192,7 +192,7 @@ struct s3c24xx_dma_phy {
192 unsigned int id; 192 unsigned int id;
193 bool valid; 193 bool valid;
194 void __iomem *base; 194 void __iomem *base;
195 unsigned int irq; 195 int irq;
196 struct clk *clk; 196 struct clk *clk;
197 spinlock_t lock; 197 spinlock_t lock;
198 struct s3c24xx_dma_chan *serving; 198 struct s3c24xx_dma_chan *serving;
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index dadd9e010c0b..b4c813831006 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -29,6 +29,12 @@ config RCAR_HPB_DMAE
29 help 29 help
30 Enable support for the Renesas R-Car series DMA controllers. 30 Enable support for the Renesas R-Car series DMA controllers.
31 31
32config RCAR_AUDMAC_PP
33 tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
34 depends on SH_DMAE_BASE
35 help
36 Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
37
32config SHDMA_R8A73A4 38config SHDMA_R8A73A4
33 def_bool y 39 def_bool y
34 depends on ARCH_R8A73A4 && SH_DMAE != n 40 depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index e856af23b789..1ce88b28cfc6 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -7,3 +7,4 @@ endif
7shdma-objs := $(shdma-y) 7shdma-objs := $(shdma-y)
8obj-$(CONFIG_SUDMAC) += sudmac.o 8obj-$(CONFIG_SUDMAC) += sudmac.o
9obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o 9obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
10obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
new file mode 100644
index 000000000000..2de77289a2e9
--- /dev/null
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -0,0 +1,320 @@
1/*
2 * This is for Renesas R-Car Audio-DMAC-peri-peri.
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * based on the drivers/dma/sh/shdma.c
8 *
9 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
10 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
11 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
12 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
13 *
14 * This is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 */
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/dmaengine.h>
25#include <linux/platform_data/dma-rcar-audmapp.h>
26#include <linux/platform_device.h>
27#include <linux/shdma-base.h>
28
29/*
30 * DMA register
31 */
32#define PDMASAR 0x00
33#define PDMADAR 0x04
34#define PDMACHCR 0x0c
35
36/* PDMACHCR */
37#define PDMACHCR_DE (1 << 0)
38
39#define AUDMAPP_MAX_CHANNELS 29
40
41/* Default MEMCPY transfer size = 2^2 = 4 bytes */
42#define LOG2_DEFAULT_XFER_SIZE 2
43#define AUDMAPP_SLAVE_NUMBER 256
44#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
45
46struct audmapp_chan {
47 struct shdma_chan shdma_chan;
48 struct audmapp_slave_config *config;
49 void __iomem *base;
50};
51
52struct audmapp_device {
53 struct shdma_dev shdma_dev;
54 struct audmapp_pdata *pdata;
55 struct device *dev;
56 void __iomem *chan_reg;
57};
58
59#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
60#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
61 struct audmapp_device, shdma_dev.dma_dev)
62
63static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
64{
65 struct audmapp_device *audev = to_dev(auchan);
66 struct device *dev = audev->dev;
67
68 dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
69
70 iowrite32(data, auchan->base + reg);
71}
72
73static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
74{
75 return ioread32(auchan->base + reg);
76}
77
78static void audmapp_halt(struct shdma_chan *schan)
79{
80 struct audmapp_chan *auchan = to_chan(schan);
81 int i;
82
83 audmapp_write(auchan, 0, PDMACHCR);
84
85 for (i = 0; i < 1024; i++) {
86 if (0 == audmapp_read(auchan, PDMACHCR))
87 return;
88 udelay(1);
89 }
90}
91
92static void audmapp_start_xfer(struct shdma_chan *schan,
93 struct shdma_desc *sdecs)
94{
95 struct audmapp_chan *auchan = to_chan(schan);
96 struct audmapp_device *audev = to_dev(auchan);
97 struct audmapp_slave_config *cfg = auchan->config;
98 struct device *dev = audev->dev;
99 u32 chcr = cfg->chcr | PDMACHCR_DE;
100
101 dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
102 &cfg->src, &cfg->dst, cfg->chcr);
103
104 audmapp_write(auchan, cfg->src, PDMASAR);
105 audmapp_write(auchan, cfg->dst, PDMADAR);
106 audmapp_write(auchan, chcr, PDMACHCR);
107}
108
109static struct audmapp_slave_config *
110audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
111{
112 struct audmapp_device *audev = to_dev(auchan);
113 struct audmapp_pdata *pdata = audev->pdata;
114 struct audmapp_slave_config *cfg;
115 int i;
116
117 if (slave_id >= AUDMAPP_SLAVE_NUMBER)
118 return NULL;
119
120 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
121 if (cfg->slave_id == slave_id)
122 return cfg;
123
124 return NULL;
125}
126
127static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
128 dma_addr_t slave_addr, bool try)
129{
130 struct audmapp_chan *auchan = to_chan(schan);
131 struct audmapp_slave_config *cfg =
132 audmapp_find_slave(auchan, slave_id);
133
134 if (!cfg)
135 return -ENODEV;
136 if (try)
137 return 0;
138
139 auchan->config = cfg;
140
141 return 0;
142}
143
144static int audmapp_desc_setup(struct shdma_chan *schan,
145 struct shdma_desc *sdecs,
146 dma_addr_t src, dma_addr_t dst, size_t *len)
147{
148 struct audmapp_chan *auchan = to_chan(schan);
149 struct audmapp_slave_config *cfg = auchan->config;
150
151 if (!cfg)
152 return -ENODEV;
153
154 if (*len > (size_t)AUDMAPP_LEN_MAX)
155 *len = (size_t)AUDMAPP_LEN_MAX;
156
157 return 0;
158}
159
160static void audmapp_setup_xfer(struct shdma_chan *schan,
161 int slave_id)
162{
163}
164
165static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
166{
167 return 0; /* always fixed address */
168}
169
170static bool audmapp_channel_busy(struct shdma_chan *schan)
171{
172 struct audmapp_chan *auchan = to_chan(schan);
173 u32 chcr = audmapp_read(auchan, PDMACHCR);
174
175 return chcr & ~PDMACHCR_DE;
176}
177
178static bool audmapp_desc_completed(struct shdma_chan *schan,
179 struct shdma_desc *sdesc)
180{
181 return true;
182}
183
184static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
185{
186 return &((struct shdma_desc *)buf)[i];
187}
188
189static const struct shdma_ops audmapp_shdma_ops = {
190 .halt_channel = audmapp_halt,
191 .desc_setup = audmapp_desc_setup,
192 .set_slave = audmapp_set_slave,
193 .start_xfer = audmapp_start_xfer,
194 .embedded_desc = audmapp_embedded_desc,
195 .setup_xfer = audmapp_setup_xfer,
196 .slave_addr = audmapp_slave_addr,
197 .channel_busy = audmapp_channel_busy,
198 .desc_completed = audmapp_desc_completed,
199};
200
201static int audmapp_chan_probe(struct platform_device *pdev,
202 struct audmapp_device *audev, int id)
203{
204 struct shdma_dev *sdev = &audev->shdma_dev;
205 struct audmapp_chan *auchan;
206 struct shdma_chan *schan;
207 struct device *dev = audev->dev;
208
209 auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
210 if (!auchan)
211 return -ENOMEM;
212
213 schan = &auchan->shdma_chan;
214 schan->max_xfer_len = AUDMAPP_LEN_MAX;
215
216 shdma_chan_probe(sdev, schan, id);
217
218 auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
219 dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
220
221 return 0;
222}
223
224static void audmapp_chan_remove(struct audmapp_device *audev)
225{
226 struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
227 struct shdma_chan *schan;
228 int i;
229
230 shdma_for_each_chan(schan, &audev->shdma_dev, i) {
231 BUG_ON(!schan);
232 shdma_chan_remove(schan);
233 }
234 dma_dev->chancnt = 0;
235}
236
237static int audmapp_probe(struct platform_device *pdev)
238{
239 struct audmapp_pdata *pdata = pdev->dev.platform_data;
240 struct audmapp_device *audev;
241 struct shdma_dev *sdev;
242 struct dma_device *dma_dev;
243 struct resource *res;
244 int err, i;
245
246 if (!pdata)
247 return -ENODEV;
248
249 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
250
251 audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
252 if (!audev)
253 return -ENOMEM;
254
255 audev->dev = &pdev->dev;
256 audev->pdata = pdata;
257 audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
258 if (IS_ERR(audev->chan_reg))
259 return PTR_ERR(audev->chan_reg);
260
261 sdev = &audev->shdma_dev;
262 sdev->ops = &audmapp_shdma_ops;
263 sdev->desc_size = sizeof(struct shdma_desc);
264
265 dma_dev = &sdev->dma_dev;
266 dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
267 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
268
269 err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
270 if (err < 0)
271 return err;
272
273 platform_set_drvdata(pdev, audev);
274
275 /* Create DMA Channel */
276 for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
277 err = audmapp_chan_probe(pdev, audev, i);
278 if (err)
279 goto chan_probe_err;
280 }
281
282 err = dma_async_device_register(dma_dev);
283 if (err < 0)
284 goto chan_probe_err;
285
286 return err;
287
288chan_probe_err:
289 audmapp_chan_remove(audev);
290 shdma_cleanup(sdev);
291
292 return err;
293}
294
295static int audmapp_remove(struct platform_device *pdev)
296{
297 struct audmapp_device *audev = platform_get_drvdata(pdev);
298 struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
299
300 dma_async_device_unregister(dma_dev);
301
302 audmapp_chan_remove(audev);
303 shdma_cleanup(&audev->shdma_dev);
304
305 return 0;
306}
307
308static struct platform_driver audmapp_driver = {
309 .probe = audmapp_probe,
310 .remove = audmapp_remove,
311 .driver = {
312 .owner = THIS_MODULE,
313 .name = "rcar-audmapp-engine",
314 },
315};
316module_platform_driver(audmapp_driver);
317
318MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
319MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
320MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 2e7b394def80..52396771acbe 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -227,7 +227,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
227 struct shdma_chan *schan = to_shdma_chan(chan); 227 struct shdma_chan *schan = to_shdma_chan(chan);
228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 228 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
229 const struct shdma_ops *ops = sdev->ops; 229 const struct shdma_ops *ops = sdev->ops;
230 int match = (int)arg; 230 int match = (long)arg;
231 int ret; 231 int ret;
232 232
233 if (match < 0) 233 if (match < 0)
@@ -491,8 +491,8 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
491 } 491 }
492 492
493 dev_dbg(schan->dev, 493 dev_dbg(schan->dev,
494 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", 494 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
495 copy_size, *len, *src, *dst, &new->async_tx, 495 copy_size, *len, src, dst, &new->async_tx,
496 new->async_tx.cookie); 496 new->async_tx.cookie);
497 497
498 new->mark = DESC_PREPARED; 498 new->mark = DESC_PREPARED;
@@ -555,8 +555,8 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
555 goto err_get_desc; 555 goto err_get_desc;
556 556
557 do { 557 do {
558 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", 558 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
559 i, sg, len, (unsigned long long)sg_addr); 559 i, sg, len, &sg_addr);
560 560
561 if (direction == DMA_DEV_TO_MEM) 561 if (direction == DMA_DEV_TO_MEM)
562 new = shdma_add_desc(schan, flags, 562 new = shdma_add_desc(schan, flags,
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index 06473a05fe4e..b4ff9d3e56d1 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -33,7 +33,8 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
33 /* Only slave DMA channels can be allocated via DT */ 33 /* Only slave DMA channels can be allocated via DT */
34 dma_cap_set(DMA_SLAVE, mask); 34 dma_cap_set(DMA_SLAVE, mask);
35 35
36 chan = dma_request_channel(mask, shdma_chan_filter, (void *)id); 36 chan = dma_request_channel(mask, shdma_chan_filter,
37 (void *)(uintptr_t)id);
37 if (chan) 38 if (chan)
38 to_shdma_chan(chan)->hw_req = id; 39 to_shdma_chan(chan)->hw_req = id;
39 40
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 0d765c0e21ec..dda7e7563f5d 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,6 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
443 return ret; 443 return ret;
444} 444}
445 445
446#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
446static irqreturn_t sh_dmae_err(int irq, void *data) 447static irqreturn_t sh_dmae_err(int irq, void *data)
447{ 448{
448 struct sh_dmae_device *shdev = data; 449 struct sh_dmae_device *shdev = data;
@@ -453,6 +454,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
453 sh_dmae_reset(shdev); 454 sh_dmae_reset(shdev);
454 return IRQ_HANDLED; 455 return IRQ_HANDLED;
455} 456}
457#endif
456 458
457static bool sh_dmae_desc_completed(struct shdma_chan *schan, 459static bool sh_dmae_desc_completed(struct shdma_chan *schan,
458 struct shdma_desc *sdesc) 460 struct shdma_desc *sdesc)
@@ -637,7 +639,7 @@ static int sh_dmae_resume(struct device *dev)
637#define sh_dmae_resume NULL 639#define sh_dmae_resume NULL
638#endif 640#endif
639 641
640const struct dev_pm_ops sh_dmae_pm = { 642static const struct dev_pm_ops sh_dmae_pm = {
641 .suspend = sh_dmae_suspend, 643 .suspend = sh_dmae_suspend,
642 .resume = sh_dmae_resume, 644 .resume = sh_dmae_resume,
643 .runtime_suspend = sh_dmae_runtime_suspend, 645 .runtime_suspend = sh_dmae_runtime_suspend,
@@ -685,9 +687,12 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
685static int sh_dmae_probe(struct platform_device *pdev) 687static int sh_dmae_probe(struct platform_device *pdev)
686{ 688{
687 const struct sh_dmae_pdata *pdata; 689 const struct sh_dmae_pdata *pdata;
688 unsigned long irqflags = 0, 690 unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
689 chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 691 int chan_irq[SH_DMAE_MAX_CHANNELS];
690 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 692#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
693 unsigned long irqflags = 0;
694 int errirq;
695#endif
691 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 696 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
692 struct sh_dmae_device *shdev; 697 struct sh_dmae_device *shdev;
693 struct dma_device *dma_dev; 698 struct dma_device *dma_dev;
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index c7e9cdff0708..4e7df43b50d6 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -178,8 +178,8 @@ static int sudmac_desc_setup(struct shdma_chan *schan,
178 struct sudmac_chan *sc = to_chan(schan); 178 struct sudmac_chan *sc = to_chan(schan);
179 struct sudmac_desc *sd = to_desc(sdesc); 179 struct sudmac_desc *sd = to_desc(sdesc);
180 180
181 dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", 181 dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
182 __func__, src, dst, *len); 182 __func__, &src, &dst, *len);
183 183
184 if (*len > schan->max_xfer_len) 184 if (*len > schan->max_xfer_len)
185 *len = schan->max_xfer_len; 185 *len = schan->max_xfer_len;
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d4d3a3109b16..a1bd8298d55f 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -18,6 +18,7 @@
18#include <linux/of_device.h> 18#include <linux/of_device.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/clk.h> 20#include <linux/clk.h>
21#include <linux/of_dma.h>
21#include <linux/sirfsoc_dma.h> 22#include <linux/sirfsoc_dma.h>
22 23
23#include "dmaengine.h" 24#include "dmaengine.h"
@@ -659,6 +660,18 @@ static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
659 return 0; 660 return 0;
660} 661}
661 662
663static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
664 struct of_dma *ofdma)
665{
666 struct sirfsoc_dma *sdma = ofdma->of_dma_data;
667 unsigned int request = dma_spec->args[0];
668
669 if (request > SIRFSOC_DMA_CHANNELS)
670 return NULL;
671
672 return dma_get_slave_channel(&sdma->channels[request].chan);
673}
674
662static int sirfsoc_dma_probe(struct platform_device *op) 675static int sirfsoc_dma_probe(struct platform_device *op)
663{ 676{
664 struct device_node *dn = op->dev.of_node; 677 struct device_node *dn = op->dev.of_node;
@@ -764,11 +777,20 @@ static int sirfsoc_dma_probe(struct platform_device *op)
764 if (ret) 777 if (ret)
765 goto free_irq; 778 goto free_irq;
766 779
780 /* Device-tree DMA controller registration */
781 ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
782 if (ret) {
783 dev_err(dev, "failed to register DMA controller\n");
784 goto unreg_dma_dev;
785 }
786
767 pm_runtime_enable(&op->dev); 787 pm_runtime_enable(&op->dev);
768 dev_info(dev, "initialized SIRFSOC DMAC driver\n"); 788 dev_info(dev, "initialized SIRFSOC DMAC driver\n");
769 789
770 return 0; 790 return 0;
771 791
792unreg_dma_dev:
793 dma_async_device_unregister(dma);
772free_irq: 794free_irq:
773 free_irq(sdma->irq, sdma); 795 free_irq(sdma->irq, sdma);
774irq_dispose: 796irq_dispose:
@@ -781,6 +803,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
781 struct device *dev = &op->dev; 803 struct device *dev = &op->dev;
782 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 804 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
783 805
806 of_dma_controller_free(op->dev.of_node);
784 dma_async_device_unregister(&sdma->dma); 807 dma_async_device_unregister(&sdma->dma);
785 free_irq(sdma->irq, sdma); 808 free_irq(sdma->irq, sdma);
786 irq_dispose_mapping(sdma->irq); 809 irq_dispose_mapping(sdma->irq);
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index fb0298082916..329436d38e66 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/err.h>
19#include <linux/dmaengine.h> 20#include <linux/dmaengine.h>
20 21
21/** 22/**
@@ -103,12 +104,12 @@ static inline void devm_acpi_dma_controller_free(struct device *dev)
103static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( 104static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
104 struct device *dev, size_t index) 105 struct device *dev, size_t index)
105{ 106{
106 return NULL; 107 return ERR_PTR(-ENODEV);
107} 108}
108static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( 109static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
109 struct device *dev, const char *name) 110 struct device *dev, const char *name)
110{ 111{
111 return NULL; 112 return ERR_PTR(-ENODEV);
112} 113}
113 114
114#define acpi_dma_simple_xlate NULL 115#define acpi_dma_simple_xlate NULL
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c5c92d59e531..8300fb87b84a 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -341,15 +341,11 @@ enum dma_slave_buswidth {
341 * and this struct will then be passed in as an argument to the 341 * and this struct will then be passed in as an argument to the
342 * DMA engine device_control() function. 342 * DMA engine device_control() function.
343 * 343 *
344 * The rationale for adding configuration information to this struct 344 * The rationale for adding configuration information to this struct is as
345 * is as follows: if it is likely that most DMA slave controllers in 345 * follows: if it is likely that more than one DMA slave controllers in
346 * the world will support the configuration option, then make it 346 * the world will support the configuration option, then make it generic.
347 * generic. If not: if it is fixed so that it be sent in static from 347 * If not: if it is fixed so that it be sent in static from the platform
348 * the platform data, then prefer to do that. Else, if it is neither 348 * data, then prefer to do that.
349 * fixed at runtime, nor generic enough (such as bus mastership on
350 * some CPU family and whatnot) then create a custom slave config
351 * struct and pass that, then make this config a member of that
352 * struct, if applicable.
353 */ 349 */
354struct dma_slave_config { 350struct dma_slave_config {
355 enum dma_transfer_direction direction; 351 enum dma_transfer_direction direction;
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 481ab2345d6b..68b4024184de 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -1,6 +1,5 @@
1/* 1/*
2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on 2 * Driver for the Synopsys DesignWare DMA Controller
3 * AVR32 systems.)
4 * 3 *
5 * Copyright (C) 2007 Atmel Corporation 4 * Copyright (C) 2007 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics 5 * Copyright (C) 2010-2011 ST Microelectronics
@@ -44,8 +43,6 @@ struct dw_dma_slave {
44 * @nr_masters: Number of AHB masters supported by the controller 43 * @nr_masters: Number of AHB masters supported by the controller
45 * @data_width: Maximum data width supported by hardware per AHB master 44 * @data_width: Maximum data width supported by hardware per AHB master
46 * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) 45 * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
47 * @sd: slave specific data. Used for configuring channels
48 * @sd_count: count of slave data structures passed.
49 */ 46 */
50struct dw_dma_platform_data { 47struct dw_dma_platform_data {
51 unsigned int nr_channels; 48 unsigned int nr_channels;
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
new file mode 100644
index 000000000000..471fffebbeb4
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-audmapp.h
@@ -0,0 +1,34 @@
1/*
2 * This is for Renesas R-Car Audio-DMAC-peri-peri.
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 *
7 * This file is based on the include/linux/sh_dma.h
8 *
9 * Header for the new SH dmaengine driver
10 *
11 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17#ifndef SH_AUDMAPP_H
18#define SH_AUDMAPP_H
19
20#include <linux/dmaengine.h>
21
22struct audmapp_slave_config {
23 int slave_id;
24 dma_addr_t src;
25 dma_addr_t dst;
26 u32 chcr;
27};
28
29struct audmapp_pdata {
30 struct audmapp_slave_config *slave;
31 int slave_num;
32};
33
34#endif /* SH_AUDMAPP_H */