diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-17 12:55:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-17 12:55:43 -0400 |
commit | 47ebe00b684c2bc183a766bc33c8b5943bc0df85 (patch) | |
tree | c0f155acc5623f6990d20b7a623f48f5e7aa0f61 | |
parent | fa121bb3fed6313b1f0af23952301e06cf6d32ed (diff) | |
parent | 5c274ca4cfb22a455e880f61536b1894fa29fd17 (diff) |
Merge tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
- Add support in dmaengine core to do device node checks for DT devices
and update bunch of drivers to use that and remove open coding from
drivers
- New driver/driver support for new hardware, namely:
- MediaTek UART APDMA
- Freescale i.mx7ulp edma2
- Synopsys eDMA IP core version 0
- Allwinner H6 DMA
- Updates to axi-dma and support for interleaved cyclic transfers
- Greg's debugfs return value check removals on drivers
- Updates to stm32-dma, hsu, dw, pl330, tegra drivers
* tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits)
dmaengine: Revert "dmaengine: fsl-edma: add i.mx7ulp edma2 version support"
dmaengine: at_xdmac: check for non-empty xfers_list before invoking callback
Documentation: dmaengine: clean up description of dmatest usage
dmaengine: tegra210-adma: remove PM_CLK dependency
dmaengine: fsl-edma: add i.mx7ulp edma2 version support
dt-bindings: dma: fsl-edma: add new i.mx7ulp-edma
dmaengine: fsl-edma-common: version check for v2 instead
dmaengine: fsl-edma-common: move dmamux register to another single function
dmaengine: fsl-edma: add drvdata for fsl-edma
dmaengine: Revert "dmaengine: fsl-edma: support little endian for edma driver"
dmaengine: rcar-dmac: Reject zero-length slave DMA requests
dmaengine: dw: Enable iDMA 32-bit on Intel Elkhart Lake
dmaengine: dw-edma: fix semicolon.cocci warnings
dmaengine: sh: usb-dmac: Use [] to denote a flexible array member
dmaengine: dmatest: timeout value of -1 should specify infinite wait
dmaengine: dw: Distinguish ->remove() between DW and iDMA 32-bit
dmaengine: fsl-edma: support little endian for edma driver
dmaengine: hsu: Revert "set HSU_CH_MTSR to memory width"
dmagengine: pl330: add code to get reset property
dt-bindings: pl330: document the optional resets property
...
66 files changed, 3668 insertions, 795 deletions
diff --git a/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt b/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt deleted file mode 100644 index 3fe0961bcf64..000000000000 --- a/Documentation/devicetree/bindings/dma/8250_mtk_dma.txt +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | * Mediatek UART APDMA Controller | ||
2 | |||
3 | Required properties: | ||
4 | - compatible should contain: | ||
5 | * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA | ||
6 | * "mediatek,mt6577-uart-dma" for MT6577 and all of the above | ||
7 | |||
8 | - reg: The base address of the APDMA register bank. | ||
9 | |||
10 | - interrupts: A single interrupt specifier. | ||
11 | |||
12 | - clocks : Must contain an entry for each entry in clock-names. | ||
13 | See ../clocks/clock-bindings.txt for details. | ||
14 | - clock-names: The APDMA clock for register accesses | ||
15 | |||
16 | Examples: | ||
17 | |||
18 | apdma: dma-controller@11000380 { | ||
19 | compatible = "mediatek,mt2712-uart-dma"; | ||
20 | reg = <0 0x11000380 0 0x400>; | ||
21 | interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_LOW>, | ||
22 | <GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>, | ||
23 | <GIC_SPI 65 IRQ_TYPE_LEVEL_LOW>, | ||
24 | <GIC_SPI 66 IRQ_TYPE_LEVEL_LOW>, | ||
25 | <GIC_SPI 67 IRQ_TYPE_LEVEL_LOW>, | ||
26 | <GIC_SPI 68 IRQ_TYPE_LEVEL_LOW>, | ||
27 | <GIC_SPI 69 IRQ_TYPE_LEVEL_LOW>, | ||
28 | <GIC_SPI 70 IRQ_TYPE_LEVEL_LOW>; | ||
29 | clocks = <&pericfg CLK_PERI_AP_DMA>; | ||
30 | clock-names = "apdma"; | ||
31 | #dma-cells = <1>; | ||
32 | }; | ||
33 | |||
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt index db7e2260f9c5..2c7fd1941abb 100644 --- a/Documentation/devicetree/bindings/dma/arm-pl330.txt +++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt | |||
@@ -16,6 +16,9 @@ Optional properties: | |||
16 | - dma-channels: contains the total number of DMA channels supported by the DMAC | 16 | - dma-channels: contains the total number of DMA channels supported by the DMAC |
17 | - dma-requests: contains the total number of DMA requests supported by the DMAC | 17 | - dma-requests: contains the total number of DMA requests supported by the DMAC |
18 | - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP | 18 | - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP |
19 | - resets: contains an entry for each entry in reset-names. | ||
20 | See ../reset/reset.txt for details. | ||
21 | - reset-names: must contain at least "dma", and optional is "dma-ocp". | ||
19 | 22 | ||
20 | Example: | 23 | Example: |
21 | 24 | ||
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt index 97e213e07660..29dd3ccb1235 100644 --- a/Documentation/devicetree/bindings/dma/fsl-edma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt | |||
@@ -9,15 +9,16 @@ group, DMAMUX0 or DMAMUX1, but not both. | |||
9 | Required properties: | 9 | Required properties: |
10 | - compatible : | 10 | - compatible : |
11 | - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC | 11 | - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC |
12 | - "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp | ||
12 | - reg : Specifies base physical address(s) and size of the eDMA registers. | 13 | - reg : Specifies base physical address(s) and size of the eDMA registers. |
13 | The 1st region is eDMA control register's address and size. | 14 | The 1st region is eDMA control register's address and size. |
14 | The 2nd and the 3rd regions are programmable channel multiplexing | 15 | The 2nd and the 3rd regions are programmable channel multiplexing |
15 | control register's address and size. | 16 | control register's address and size. |
16 | - interrupts : A list of interrupt-specifiers, one for each entry in | 17 | - interrupts : A list of interrupt-specifiers, one for each entry in |
17 | interrupt-names. | 18 | interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel |
18 | - interrupt-names : Should contain: | 19 | per transmission interrupt, total 16 channel interrupt and 1 |
19 | "edma-tx" - the transmission interrupt | 20 | error interrupt(located in the last), no interrupt-names list on |
20 | "edma-err" - the error interrupt | 21 | i.mx7ulp for clean on dts. |
21 | - #dma-cells : Must be <2>. | 22 | - #dma-cells : Must be <2>. |
22 | The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1). | 23 | The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1). |
23 | Specific request source can only be multiplexed by specific channels | 24 | Specific request source can only be multiplexed by specific channels |
@@ -28,6 +29,7 @@ Required properties: | |||
28 | - clock-names : A list of channel group clock names. Should contain: | 29 | - clock-names : A list of channel group clock names. Should contain: |
29 | "dmamux0" - clock name of mux0 group | 30 | "dmamux0" - clock name of mux0 group |
30 | "dmamux1" - clock name of mux1 group | 31 | "dmamux1" - clock name of mux1 group |
32 | Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp. | ||
31 | - clocks : A list of phandle and clock-specifier pairs, one for each entry in | 33 | - clocks : A list of phandle and clock-specifier pairs, one for each entry in |
32 | clock-names. | 34 | clock-names. |
33 | 35 | ||
@@ -35,6 +37,10 @@ Optional properties: | |||
35 | - big-endian: If present registers and hardware scatter/gather descriptors | 37 | - big-endian: If present registers and hardware scatter/gather descriptors |
36 | of the eDMA are implemented in big endian mode, otherwise in little | 38 | of the eDMA are implemented in big endian mode, otherwise in little |
37 | mode. | 39 | mode. |
40 | - interrupt-names : Should contain the below on vf610 similar SoC but not used | ||
41 | on i.mx7ulp similar SoC: | ||
42 | "edma-tx" - the transmission interrupt | ||
43 | "edma-err" - the error interrupt | ||
38 | 44 | ||
39 | 45 | ||
40 | Examples: | 46 | Examples: |
@@ -52,8 +58,36 @@ edma0: dma-controller@40018000 { | |||
52 | clock-names = "dmamux0", "dmamux1"; | 58 | clock-names = "dmamux0", "dmamux1"; |
53 | clocks = <&clks VF610_CLK_DMAMUX0>, | 59 | clocks = <&clks VF610_CLK_DMAMUX0>, |
54 | <&clks VF610_CLK_DMAMUX1>; | 60 | <&clks VF610_CLK_DMAMUX1>; |
55 | }; | 61 | }; /* vf610 */ |
56 | 62 | ||
63 | edma1: dma-controller@40080000 { | ||
64 | #dma-cells = <2>; | ||
65 | compatible = "fsl,imx7ulp-edma"; | ||
66 | reg = <0x40080000 0x2000>, | ||
67 | <0x40210000 0x1000>; | ||
68 | dma-channels = <32>; | ||
69 | interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, | ||
70 | <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, | ||
71 | <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, | ||
72 | <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, | ||
73 | <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, | ||
74 | <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, | ||
75 | <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, | ||
76 | <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, | ||
77 | <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, | ||
78 | <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, | ||
79 | <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, | ||
80 | <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, | ||
81 | <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, | ||
82 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, | ||
83 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, | ||
84 | <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, | ||
85 | /* last is eDMA2-ERR interrupt */ | ||
86 | <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; | ||
87 | clock-names = "dma", "dmamux0"; | ||
88 | clocks = <&pcc2 IMX7ULP_CLK_DMA1>, | ||
89 | <&pcc2 IMX7ULP_CLK_DMA_MUX1>; | ||
90 | }; /* i.mx7ulp */ | ||
57 | 91 | ||
58 | * DMA clients | 92 | * DMA clients |
59 | DMA client drivers that uses the DMA function must use the format described | 93 | DMA client drivers that uses the DMA function must use the format described |
diff --git a/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt new file mode 100644 index 000000000000..5d6f98c43e3d --- /dev/null +++ b/Documentation/devicetree/bindings/dma/mtk-uart-apdma.txt | |||
@@ -0,0 +1,54 @@ | |||
1 | * Mediatek UART APDMA Controller | ||
2 | |||
3 | Required properties: | ||
4 | - compatible should contain: | ||
5 | * "mediatek,mt2712-uart-dma" for MT2712 compatible APDMA | ||
6 | * "mediatek,mt6577-uart-dma" for MT6577 and all of the above | ||
7 | |||
8 | - reg: The base address of the APDMA register bank. | ||
9 | |||
10 | - interrupts: A single interrupt specifier. | ||
11 | One interrupt per dma-requests, or 8 if no dma-requests property is present | ||
12 | |||
13 | - dma-requests: The number of DMA channels | ||
14 | |||
15 | - clocks : Must contain an entry for each entry in clock-names. | ||
16 | See ../clocks/clock-bindings.txt for details. | ||
17 | - clock-names: The APDMA clock for register accesses | ||
18 | |||
19 | - mediatek,dma-33bits: Present if the DMA requires support | ||
20 | |||
21 | Examples: | ||
22 | |||
23 | apdma: dma-controller@11000400 { | ||
24 | compatible = "mediatek,mt2712-uart-dma"; | ||
25 | reg = <0 0x11000400 0 0x80>, | ||
26 | <0 0x11000480 0 0x80>, | ||
27 | <0 0x11000500 0 0x80>, | ||
28 | <0 0x11000580 0 0x80>, | ||
29 | <0 0x11000600 0 0x80>, | ||
30 | <0 0x11000680 0 0x80>, | ||
31 | <0 0x11000700 0 0x80>, | ||
32 | <0 0x11000780 0 0x80>, | ||
33 | <0 0x11000800 0 0x80>, | ||
34 | <0 0x11000880 0 0x80>, | ||
35 | <0 0x11000900 0 0x80>, | ||
36 | <0 0x11000980 0 0x80>; | ||
37 | interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>, | ||
38 | <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>, | ||
39 | <GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>, | ||
40 | <GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>, | ||
41 | <GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>, | ||
42 | <GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>, | ||
43 | <GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>, | ||
44 | <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>, | ||
45 | <GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>, | ||
46 | <GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>, | ||
47 | <GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>, | ||
48 | <GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>; | ||
49 | dma-requests = <12>; | ||
50 | clocks = <&pericfg CLK_PERI_AP_DMA>; | ||
51 | clock-names = "apdma"; | ||
52 | mediatek,dma-33bits; | ||
53 | #dma-cells = <1>; | ||
54 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt index 7fccc20d8331..cae31f4e77ba 100644 --- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt | |||
@@ -28,12 +28,17 @@ Example: | |||
28 | }; | 28 | }; |
29 | 29 | ||
30 | ------------------------------------------------------------------------------ | 30 | ------------------------------------------------------------------------------ |
31 | For A64 DMA controller: | 31 | For A64 and H6 DMA controller: |
32 | 32 | ||
33 | Required properties: | 33 | Required properties: |
34 | - compatible: "allwinner,sun50i-a64-dma" | 34 | - compatible: Must be one of |
35 | "allwinner,sun50i-a64-dma" | ||
36 | "allwinner,sun50i-h6-dma" | ||
35 | - dma-channels: Number of DMA channels supported by the controller. | 37 | - dma-channels: Number of DMA channels supported by the controller. |
36 | Refer to Documentation/devicetree/bindings/dma/dma.txt | 38 | Refer to Documentation/devicetree/bindings/dma/dma.txt |
39 | - clocks: In addition to parent AHB clock, it should also contain mbus | ||
40 | clock (H6 only) | ||
41 | - clock-names: Should contain "bus" and "mbus" (H6 only) | ||
37 | - all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells | 42 | - all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells |
38 | 43 | ||
39 | Optional properties: | 44 | Optional properties: |
diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst index e78d070bb468..ee268d445d38 100644 --- a/Documentation/driver-api/dmaengine/dmatest.rst +++ b/Documentation/driver-api/dmaengine/dmatest.rst | |||
@@ -44,7 +44,8 @@ Example of usage:: | |||
44 | 44 | ||
45 | dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1 | 45 | dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1 |
46 | 46 | ||
47 | Example of multi-channel test usage: | 47 | Example of multi-channel test usage (new in the 5.0 kernel):: |
48 | |||
48 | % modprobe dmatest | 49 | % modprobe dmatest |
49 | % echo 2000 > /sys/module/dmatest/parameters/timeout | 50 | % echo 2000 > /sys/module/dmatest/parameters/timeout |
50 | % echo 1 > /sys/module/dmatest/parameters/iterations | 51 | % echo 1 > /sys/module/dmatest/parameters/iterations |
@@ -53,15 +54,18 @@ Example of multi-channel test usage: | |||
53 | % echo dma0chan2 > /sys/module/dmatest/parameters/channel | 54 | % echo dma0chan2 > /sys/module/dmatest/parameters/channel |
54 | % echo 1 > /sys/module/dmatest/parameters/run | 55 | % echo 1 > /sys/module/dmatest/parameters/run |
55 | 56 | ||
56 | Note: the channel parameter should always be the last parameter set prior to | 57 | .. note:: |
57 | running the test (setting run=1), this is because upon setting the channel | 58 | For all tests, starting in the 5.0 kernel, either single- or multi-channel, |
58 | parameter, that specific channel is requested using the dmaengine and a thread | 59 | the channel parameter(s) must be set after all other parameters. It is at |
59 | is created with the existing parameters. This thread is set as pending | 60 | that time that the existing parameter values are acquired for use by the |
60 | and will be executed once run is set to 1. Any parameters set after the thread | 61 | thread(s). All other parameters are shared. Therefore, if changes are made |
61 | is created are not applied. | 62 | to any of the other parameters, and an additional channel specified, the |
63 | (shared) parameters used for all threads will use the new values. | ||
64 | After the channels are specified, each thread is set as pending. All threads | ||
65 | begin execution when the run parameter is set to 1. | ||
62 | 66 | ||
63 | .. hint:: | 67 | .. hint:: |
64 | available channel list could be extracted by running the following command:: | 68 | A list of available channels can be found by running the following command:: |
65 | 69 | ||
66 | % ls -1 /sys/class/dma/ | 70 | % ls -1 /sys/class/dma/ |
67 | 71 | ||
@@ -204,6 +208,7 @@ Releasing Channels | |||
204 | Channels can be freed by setting run to 0. | 208 | Channels can be freed by setting run to 0. |
205 | 209 | ||
206 | Example:: | 210 | Example:: |
211 | |||
207 | % echo dma0chan1 > /sys/module/dmatest/parameters/channel | 212 | % echo dma0chan1 > /sys/module/dmatest/parameters/channel |
208 | dmatest: Added 1 threads using dma0chan1 | 213 | dmatest: Added 1 threads using dma0chan1 |
209 | % cat /sys/class/dma/dma0chan1/in_use | 214 | % cat /sys/class/dma/dma0chan1/in_use |
diff --git a/MAINTAINERS b/MAINTAINERS index 51ef58f15cdd..d452d7bbbaad 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4683,6 +4683,13 @@ L: linux-mtd@lists.infradead.org | |||
4683 | S: Supported | 4683 | S: Supported |
4684 | F: drivers/mtd/nand/raw/denali* | 4684 | F: drivers/mtd/nand/raw/denali* |
4685 | 4685 | ||
4686 | DESIGNWARE EDMA CORE IP DRIVER | ||
4687 | M: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
4688 | L: dmaengine@vger.kernel.org | ||
4689 | S: Maintained | ||
4690 | F: drivers/dma/dw-edma/ | ||
4691 | F: include/linux/dma/edma.h | ||
4692 | |||
4686 | DESIGNWARE USB2 DRD IP DRIVER | 4693 | DESIGNWARE USB2 DRD IP DRIVER |
4687 | M: Minas Harutyunyan <hminas@synopsys.com> | 4694 | M: Minas Harutyunyan <hminas@synopsys.com> |
4688 | L: linux-usb@vger.kernel.org | 4695 | L: linux-usb@vger.kernel.org |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 703275cc29de..03fa0c58cef3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -103,6 +103,7 @@ config AXI_DMAC | |||
103 | depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST | 103 | depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST |
104 | select DMA_ENGINE | 104 | select DMA_ENGINE |
105 | select DMA_VIRTUAL_CHANNELS | 105 | select DMA_VIRTUAL_CHANNELS |
106 | select REGMAP_MMIO | ||
106 | help | 107 | help |
107 | Enable support for the Analog Devices AXI-DMAC peripheral. This DMA | 108 | Enable support for the Analog Devices AXI-DMAC peripheral. This DMA |
108 | controller is often used in Analog Device's reference designs for FPGA | 109 | controller is often used in Analog Device's reference designs for FPGA |
@@ -584,7 +585,7 @@ config TEGRA20_APB_DMA | |||
584 | 585 | ||
585 | config TEGRA210_ADMA | 586 | config TEGRA210_ADMA |
586 | tristate "NVIDIA Tegra210 ADMA support" | 587 | tristate "NVIDIA Tegra210 ADMA support" |
587 | depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK | 588 | depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) |
588 | select DMA_ENGINE | 589 | select DMA_ENGINE |
589 | select DMA_VIRTUAL_CHANNELS | 590 | select DMA_VIRTUAL_CHANNELS |
590 | help | 591 | help |
@@ -666,6 +667,8 @@ source "drivers/dma/qcom/Kconfig" | |||
666 | 667 | ||
667 | source "drivers/dma/dw/Kconfig" | 668 | source "drivers/dma/dw/Kconfig" |
668 | 669 | ||
670 | source "drivers/dma/dw-edma/Kconfig" | ||
671 | |||
669 | source "drivers/dma/hsu/Kconfig" | 672 | source "drivers/dma/hsu/Kconfig" |
670 | 673 | ||
671 | source "drivers/dma/sh/Kconfig" | 674 | source "drivers/dma/sh/Kconfig" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 6126e1c3a875..5bddf6f8790f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o | |||
29 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o | 29 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o |
30 | obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ | 30 | obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ |
31 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ | 31 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ |
32 | obj-$(CONFIG_DW_EDMA) += dw-edma/ | ||
32 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 33 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
33 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 34 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
34 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o | 35 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 464725dcad00..9adc7a2fa3d3 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -2508,9 +2508,8 @@ DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); | |||
2508 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | 2508 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) |
2509 | { | 2509 | { |
2510 | /* Expose a simple debugfs interface to view all clocks */ | 2510 | /* Expose a simple debugfs interface to view all clocks */ |
2511 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), | 2511 | debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, |
2512 | S_IFREG | S_IRUGO, NULL, pl08x, | 2512 | NULL, pl08x, &pl08x_debugfs_fops); |
2513 | &pl08x_debugfs_fops); | ||
2514 | } | 2513 | } |
2515 | 2514 | ||
2516 | #else | 2515 | #else |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 627ef3e5b312..b58ac720d9a1 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -1568,11 +1568,14 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) | |||
1568 | struct at_xdmac_desc *desc; | 1568 | struct at_xdmac_desc *desc; |
1569 | struct dma_async_tx_descriptor *txd; | 1569 | struct dma_async_tx_descriptor *txd; |
1570 | 1570 | ||
1571 | desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); | 1571 | if (!list_empty(&atchan->xfers_list)) { |
1572 | txd = &desc->tx_dma_desc; | 1572 | desc = list_first_entry(&atchan->xfers_list, |
1573 | struct at_xdmac_desc, xfer_node); | ||
1574 | txd = &desc->tx_dma_desc; | ||
1573 | 1575 | ||
1574 | if (txd->flags & DMA_PREP_INTERRUPT) | 1576 | if (txd->flags & DMA_PREP_INTERRUPT) |
1575 | dmaengine_desc_get_callback_invoke(txd, NULL); | 1577 | dmaengine_desc_get_callback_invoke(txd, NULL); |
1578 | } | ||
1576 | } | 1579 | } |
1577 | 1580 | ||
1578 | static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) | 1581 | static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) |
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index fa81d0177765..275e90fa829d 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -164,7 +164,6 @@ struct sba_device { | |||
164 | struct list_head reqs_free_list; | 164 | struct list_head reqs_free_list; |
165 | /* DebugFS directory entries */ | 165 | /* DebugFS directory entries */ |
166 | struct dentry *root; | 166 | struct dentry *root; |
167 | struct dentry *stats; | ||
168 | }; | 167 | }; |
169 | 168 | ||
170 | /* ====== Command helper routines ===== */ | 169 | /* ====== Command helper routines ===== */ |
@@ -1716,17 +1715,11 @@ static int sba_probe(struct platform_device *pdev) | |||
1716 | 1715 | ||
1717 | /* Create debugfs root entry */ | 1716 | /* Create debugfs root entry */ |
1718 | sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); | 1717 | sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); |
1719 | if (IS_ERR_OR_NULL(sba->root)) { | ||
1720 | dev_err(sba->dev, "failed to create debugfs root entry\n"); | ||
1721 | sba->root = NULL; | ||
1722 | goto skip_debugfs; | ||
1723 | } | ||
1724 | 1718 | ||
1725 | /* Create debugfs stats entry */ | 1719 | /* Create debugfs stats entry */ |
1726 | sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, | 1720 | debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, |
1727 | sba_debugfs_stats_show); | 1721 | sba_debugfs_stats_show); |
1728 | if (IS_ERR_OR_NULL(sba->stats)) | 1722 | |
1729 | dev_err(sba->dev, "failed to create debugfs stats file\n"); | ||
1730 | skip_debugfs: | 1723 | skip_debugfs: |
1731 | 1724 | ||
1732 | /* Register DMA device with Linux async framework */ | 1725 | /* Register DMA device with Linux async framework */ |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 547786ac342b..e51d836afcc7 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -1378,10 +1378,8 @@ static int __init init_coh901318_debugfs(void) | |||
1378 | 1378 | ||
1379 | dma_dentry = debugfs_create_dir("dma", NULL); | 1379 | dma_dentry = debugfs_create_dir("dma", NULL); |
1380 | 1380 | ||
1381 | (void) debugfs_create_file("status", | 1381 | debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL, |
1382 | S_IFREG | S_IRUGO, | 1382 | &coh901318_debugfs_status_operations); |
1383 | dma_dentry, NULL, | ||
1384 | &coh901318_debugfs_status_operations); | ||
1385 | return 0; | 1383 | return 0; |
1386 | } | 1384 | } |
1387 | 1385 | ||
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 8a3f1043917b..a0ee404b736e 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c | |||
@@ -2,7 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * Driver for the Analog Devices AXI-DMAC core | 3 | * Driver for the Analog Devices AXI-DMAC core |
4 | * | 4 | * |
5 | * Copyright 2013-2015 Analog Devices Inc. | 5 | * Copyright 2013-2019 Analog Devices Inc. |
6 | * Author: Lars-Peter Clausen <lars@metafoo.de> | 6 | * Author: Lars-Peter Clausen <lars@metafoo.de> |
7 | */ | 7 | */ |
8 | 8 | ||
@@ -18,7 +18,9 @@ | |||
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include <linux/of_dma.h> | 19 | #include <linux/of_dma.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/regmap.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/fpga/adi-axi-common.h> | ||
22 | 24 | ||
23 | #include <dt-bindings/dma/axi-dmac.h> | 25 | #include <dt-bindings/dma/axi-dmac.h> |
24 | 26 | ||
@@ -62,6 +64,8 @@ | |||
62 | #define AXI_DMAC_REG_STATUS 0x430 | 64 | #define AXI_DMAC_REG_STATUS 0x430 |
63 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 | 65 | #define AXI_DMAC_REG_CURRENT_SRC_ADDR 0x434 |
64 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 | 66 | #define AXI_DMAC_REG_CURRENT_DEST_ADDR 0x438 |
67 | #define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c | ||
68 | #define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450 | ||
65 | 69 | ||
66 | #define AXI_DMAC_CTRL_ENABLE BIT(0) | 70 | #define AXI_DMAC_CTRL_ENABLE BIT(0) |
67 | #define AXI_DMAC_CTRL_PAUSE BIT(1) | 71 | #define AXI_DMAC_CTRL_PAUSE BIT(1) |
@@ -70,6 +74,10 @@ | |||
70 | #define AXI_DMAC_IRQ_EOT BIT(1) | 74 | #define AXI_DMAC_IRQ_EOT BIT(1) |
71 | 75 | ||
72 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) | 76 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) |
77 | #define AXI_DMAC_FLAG_LAST BIT(1) | ||
78 | #define AXI_DMAC_FLAG_PARTIAL_REPORT BIT(2) | ||
79 | |||
80 | #define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31) | ||
73 | 81 | ||
74 | /* The maximum ID allocated by the hardware is 31 */ | 82 | /* The maximum ID allocated by the hardware is 31 */ |
75 | #define AXI_DMAC_SG_UNUSED 32U | 83 | #define AXI_DMAC_SG_UNUSED 32U |
@@ -82,12 +90,14 @@ struct axi_dmac_sg { | |||
82 | unsigned int dest_stride; | 90 | unsigned int dest_stride; |
83 | unsigned int src_stride; | 91 | unsigned int src_stride; |
84 | unsigned int id; | 92 | unsigned int id; |
93 | unsigned int partial_len; | ||
85 | bool schedule_when_free; | 94 | bool schedule_when_free; |
86 | }; | 95 | }; |
87 | 96 | ||
88 | struct axi_dmac_desc { | 97 | struct axi_dmac_desc { |
89 | struct virt_dma_desc vdesc; | 98 | struct virt_dma_desc vdesc; |
90 | bool cyclic; | 99 | bool cyclic; |
100 | bool have_partial_xfer; | ||
91 | 101 | ||
92 | unsigned int num_submitted; | 102 | unsigned int num_submitted; |
93 | unsigned int num_completed; | 103 | unsigned int num_completed; |
@@ -108,8 +118,10 @@ struct axi_dmac_chan { | |||
108 | unsigned int dest_type; | 118 | unsigned int dest_type; |
109 | 119 | ||
110 | unsigned int max_length; | 120 | unsigned int max_length; |
111 | unsigned int align_mask; | 121 | unsigned int address_align_mask; |
122 | unsigned int length_align_mask; | ||
112 | 123 | ||
124 | bool hw_partial_xfer; | ||
113 | bool hw_cyclic; | 125 | bool hw_cyclic; |
114 | bool hw_2d; | 126 | bool hw_2d; |
115 | }; | 127 | }; |
@@ -167,14 +179,14 @@ static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) | |||
167 | { | 179 | { |
168 | if (len == 0) | 180 | if (len == 0) |
169 | return false; | 181 | return false; |
170 | if ((len & chan->align_mask) != 0) /* Not aligned */ | 182 | if ((len & chan->length_align_mask) != 0) /* Not aligned */ |
171 | return false; | 183 | return false; |
172 | return true; | 184 | return true; |
173 | } | 185 | } |
174 | 186 | ||
175 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) | 187 | static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr) |
176 | { | 188 | { |
177 | if ((addr & chan->align_mask) != 0) /* Not aligned */ | 189 | if ((addr & chan->address_align_mask) != 0) /* Not aligned */ |
178 | return false; | 190 | return false; |
179 | return true; | 191 | return true; |
180 | } | 192 | } |
@@ -210,11 +222,13 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |||
210 | } | 222 | } |
211 | 223 | ||
212 | desc->num_submitted++; | 224 | desc->num_submitted++; |
213 | if (desc->num_submitted == desc->num_sgs) { | 225 | if (desc->num_submitted == desc->num_sgs || |
226 | desc->have_partial_xfer) { | ||
214 | if (desc->cyclic) | 227 | if (desc->cyclic) |
215 | desc->num_submitted = 0; /* Start again */ | 228 | desc->num_submitted = 0; /* Start again */ |
216 | else | 229 | else |
217 | chan->next_desc = NULL; | 230 | chan->next_desc = NULL; |
231 | flags |= AXI_DMAC_FLAG_LAST; | ||
218 | } else { | 232 | } else { |
219 | chan->next_desc = desc; | 233 | chan->next_desc = desc; |
220 | } | 234 | } |
@@ -240,6 +254,9 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |||
240 | desc->num_sgs == 1) | 254 | desc->num_sgs == 1) |
241 | flags |= AXI_DMAC_FLAG_CYCLIC; | 255 | flags |= AXI_DMAC_FLAG_CYCLIC; |
242 | 256 | ||
257 | if (chan->hw_partial_xfer) | ||
258 | flags |= AXI_DMAC_FLAG_PARTIAL_REPORT; | ||
259 | |||
243 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); | 260 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); |
244 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); | 261 | axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1); |
245 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); | 262 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags); |
@@ -252,6 +269,83 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) | |||
252 | struct axi_dmac_desc, vdesc.node); | 269 | struct axi_dmac_desc, vdesc.node); |
253 | } | 270 | } |
254 | 271 | ||
272 | static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan, | ||
273 | struct axi_dmac_sg *sg) | ||
274 | { | ||
275 | if (chan->hw_2d) | ||
276 | return sg->x_len * sg->y_len; | ||
277 | else | ||
278 | return sg->x_len; | ||
279 | } | ||
280 | |||
281 | static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan) | ||
282 | { | ||
283 | struct axi_dmac *dmac = chan_to_axi_dmac(chan); | ||
284 | struct axi_dmac_desc *desc; | ||
285 | struct axi_dmac_sg *sg; | ||
286 | u32 xfer_done, len, id, i; | ||
287 | bool found_sg; | ||
288 | |||
289 | do { | ||
290 | len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN); | ||
291 | id = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID); | ||
292 | |||
293 | found_sg = false; | ||
294 | list_for_each_entry(desc, &chan->active_descs, vdesc.node) { | ||
295 | for (i = 0; i < desc->num_sgs; i++) { | ||
296 | sg = &desc->sg[i]; | ||
297 | if (sg->id == AXI_DMAC_SG_UNUSED) | ||
298 | continue; | ||
299 | if (sg->id == id) { | ||
300 | desc->have_partial_xfer = true; | ||
301 | sg->partial_len = len; | ||
302 | found_sg = true; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | if (found_sg) | ||
307 | break; | ||
308 | } | ||
309 | |||
310 | if (found_sg) { | ||
311 | dev_dbg(dmac->dma_dev.dev, | ||
312 | "Found partial segment id=%u, len=%u\n", | ||
313 | id, len); | ||
314 | } else { | ||
315 | dev_warn(dmac->dma_dev.dev, | ||
316 | "Not found partial segment id=%u, len=%u\n", | ||
317 | id, len); | ||
318 | } | ||
319 | |||
320 | /* Check if we have any more partial transfers */ | ||
321 | xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | ||
322 | xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE); | ||
323 | |||
324 | } while (!xfer_done); | ||
325 | } | ||
326 | |||
327 | static void axi_dmac_compute_residue(struct axi_dmac_chan *chan, | ||
328 | struct axi_dmac_desc *active) | ||
329 | { | ||
330 | struct dmaengine_result *rslt = &active->vdesc.tx_result; | ||
331 | unsigned int start = active->num_completed - 1; | ||
332 | struct axi_dmac_sg *sg; | ||
333 | unsigned int i, total; | ||
334 | |||
335 | rslt->result = DMA_TRANS_NOERROR; | ||
336 | rslt->residue = 0; | ||
337 | |||
338 | /* | ||
339 | * We get here if the last completed segment is partial, which | ||
340 | * means we can compute the residue from that segment onwards | ||
341 | */ | ||
342 | for (i = start; i < active->num_sgs; i++) { | ||
343 | sg = &active->sg[i]; | ||
344 | total = axi_dmac_total_sg_bytes(chan, sg); | ||
345 | rslt->residue += (total - sg->partial_len); | ||
346 | } | ||
347 | } | ||
348 | |||
255 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, | 349 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, |
256 | unsigned int completed_transfers) | 350 | unsigned int completed_transfers) |
257 | { | 351 | { |
@@ -263,6 +357,10 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, | |||
263 | if (!active) | 357 | if (!active) |
264 | return false; | 358 | return false; |
265 | 359 | ||
360 | if (chan->hw_partial_xfer && | ||
361 | (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE)) | ||
362 | axi_dmac_dequeue_partial_xfers(chan); | ||
363 | |||
266 | do { | 364 | do { |
267 | sg = &active->sg[active->num_completed]; | 365 | sg = &active->sg[active->num_completed]; |
268 | if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ | 366 | if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ |
@@ -276,10 +374,14 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, | |||
276 | start_next = true; | 374 | start_next = true; |
277 | } | 375 | } |
278 | 376 | ||
377 | if (sg->partial_len) | ||
378 | axi_dmac_compute_residue(chan, active); | ||
379 | |||
279 | if (active->cyclic) | 380 | if (active->cyclic) |
280 | vchan_cyclic_callback(&active->vdesc); | 381 | vchan_cyclic_callback(&active->vdesc); |
281 | 382 | ||
282 | if (active->num_completed == active->num_sgs) { | 383 | if (active->num_completed == active->num_sgs || |
384 | sg->partial_len) { | ||
283 | if (active->cyclic) { | 385 | if (active->cyclic) { |
284 | active->num_completed = 0; /* wrap around */ | 386 | active->num_completed = 0; /* wrap around */ |
285 | } else { | 387 | } else { |
@@ -391,7 +493,7 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, | |||
391 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); | 493 | num_segments = DIV_ROUND_UP(period_len, chan->max_length); |
392 | segment_size = DIV_ROUND_UP(period_len, num_segments); | 494 | segment_size = DIV_ROUND_UP(period_len, num_segments); |
393 | /* Take care of alignment */ | 495 | /* Take care of alignment */ |
394 | segment_size = ((segment_size - 1) | chan->align_mask) + 1; | 496 | segment_size = ((segment_size - 1) | chan->length_align_mask) + 1; |
395 | 497 | ||
396 | for (i = 0; i < num_periods; i++) { | 498 | for (i = 0; i < num_periods; i++) { |
397 | len = period_len; | 499 | len = period_len; |
@@ -561,6 +663,9 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( | |||
561 | desc->sg[0].y_len = 1; | 663 | desc->sg[0].y_len = 1; |
562 | } | 664 | } |
563 | 665 | ||
666 | if (flags & DMA_CYCLIC) | ||
667 | desc->cyclic = true; | ||
668 | |||
564 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | 669 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); |
565 | } | 670 | } |
566 | 671 | ||
@@ -574,6 +679,44 @@ static void axi_dmac_desc_free(struct virt_dma_desc *vdesc) | |||
574 | kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); | 679 | kfree(container_of(vdesc, struct axi_dmac_desc, vdesc)); |
575 | } | 680 | } |
576 | 681 | ||
682 | static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg) | ||
683 | { | ||
684 | switch (reg) { | ||
685 | case AXI_DMAC_REG_IRQ_MASK: | ||
686 | case AXI_DMAC_REG_IRQ_SOURCE: | ||
687 | case AXI_DMAC_REG_IRQ_PENDING: | ||
688 | case AXI_DMAC_REG_CTRL: | ||
689 | case AXI_DMAC_REG_TRANSFER_ID: | ||
690 | case AXI_DMAC_REG_START_TRANSFER: | ||
691 | case AXI_DMAC_REG_FLAGS: | ||
692 | case AXI_DMAC_REG_DEST_ADDRESS: | ||
693 | case AXI_DMAC_REG_SRC_ADDRESS: | ||
694 | case AXI_DMAC_REG_X_LENGTH: | ||
695 | case AXI_DMAC_REG_Y_LENGTH: | ||
696 | case AXI_DMAC_REG_DEST_STRIDE: | ||
697 | case AXI_DMAC_REG_SRC_STRIDE: | ||
698 | case AXI_DMAC_REG_TRANSFER_DONE: | ||
699 | case AXI_DMAC_REG_ACTIVE_TRANSFER_ID: | ||
700 | case AXI_DMAC_REG_STATUS: | ||
701 | case AXI_DMAC_REG_CURRENT_SRC_ADDR: | ||
702 | case AXI_DMAC_REG_CURRENT_DEST_ADDR: | ||
703 | case AXI_DMAC_REG_PARTIAL_XFER_LEN: | ||
704 | case AXI_DMAC_REG_PARTIAL_XFER_ID: | ||
705 | return true; | ||
706 | default: | ||
707 | return false; | ||
708 | } | ||
709 | } | ||
710 | |||
711 | static const struct regmap_config axi_dmac_regmap_config = { | ||
712 | .reg_bits = 32, | ||
713 | .val_bits = 32, | ||
714 | .reg_stride = 4, | ||
715 | .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID, | ||
716 | .readable_reg = axi_dmac_regmap_rdwr, | ||
717 | .writeable_reg = axi_dmac_regmap_rdwr, | ||
718 | }; | ||
719 | |||
577 | /* | 720 | /* |
578 | * The configuration stored in the devicetree matches the configuration | 721 | * The configuration stored in the devicetree matches the configuration |
579 | * parameters of the peripheral instance and allows the driver to know which | 722 | * parameters of the peripheral instance and allows the driver to know which |
@@ -617,7 +760,7 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, | |||
617 | return ret; | 760 | return ret; |
618 | chan->dest_width = val / 8; | 761 | chan->dest_width = val / 8; |
619 | 762 | ||
620 | chan->align_mask = max(chan->dest_width, chan->src_width) - 1; | 763 | chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1; |
621 | 764 | ||
622 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) | 765 | if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) |
623 | chan->direction = DMA_MEM_TO_MEM; | 766 | chan->direction = DMA_MEM_TO_MEM; |
@@ -631,9 +774,12 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, | |||
631 | return 0; | 774 | return 0; |
632 | } | 775 | } |
633 | 776 | ||
634 | static void axi_dmac_detect_caps(struct axi_dmac *dmac) | 777 | static int axi_dmac_detect_caps(struct axi_dmac *dmac) |
635 | { | 778 | { |
636 | struct axi_dmac_chan *chan = &dmac->chan; | 779 | struct axi_dmac_chan *chan = &dmac->chan; |
780 | unsigned int version; | ||
781 | |||
782 | version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); | ||
637 | 783 | ||
638 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); | 784 | axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC); |
639 | if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) | 785 | if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC) |
@@ -647,6 +793,35 @@ static void axi_dmac_detect_caps(struct axi_dmac *dmac) | |||
647 | chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); | 793 | chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); |
648 | if (chan->max_length != UINT_MAX) | 794 | if (chan->max_length != UINT_MAX) |
649 | chan->max_length++; | 795 | chan->max_length++; |
796 | |||
797 | axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff); | ||
798 | if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 && | ||
799 | chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) { | ||
800 | dev_err(dmac->dma_dev.dev, | ||
801 | "Destination memory-mapped interface not supported."); | ||
802 | return -ENODEV; | ||
803 | } | ||
804 | |||
805 | axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff); | ||
806 | if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 && | ||
807 | chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) { | ||
808 | dev_err(dmac->dma_dev.dev, | ||
809 | "Source memory-mapped interface not supported."); | ||
810 | return -ENODEV; | ||
811 | } | ||
812 | |||
813 | if (version >= ADI_AXI_PCORE_VER(4, 2, 'a')) | ||
814 | chan->hw_partial_xfer = true; | ||
815 | |||
816 | if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) { | ||
817 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00); | ||
818 | chan->length_align_mask = | ||
819 | axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH); | ||
820 | } else { | ||
821 | chan->length_align_mask = chan->address_align_mask; | ||
822 | } | ||
823 | |||
824 | return 0; | ||
650 | } | 825 | } |
651 | 826 | ||
652 | static int axi_dmac_probe(struct platform_device *pdev) | 827 | static int axi_dmac_probe(struct platform_device *pdev) |
@@ -722,7 +897,11 @@ static int axi_dmac_probe(struct platform_device *pdev) | |||
722 | if (ret < 0) | 897 | if (ret < 0) |
723 | return ret; | 898 | return ret; |
724 | 899 | ||
725 | axi_dmac_detect_caps(dmac); | 900 | ret = axi_dmac_detect_caps(dmac); |
901 | if (ret) | ||
902 | goto err_clk_disable; | ||
903 | |||
904 | dma_dev->copy_align = (dmac->chan.address_align_mask + 1); | ||
726 | 905 | ||
727 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); | 906 | axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); |
728 | 907 | ||
@@ -742,6 +921,8 @@ static int axi_dmac_probe(struct platform_device *pdev) | |||
742 | 921 | ||
743 | platform_set_drvdata(pdev, dmac); | 922 | platform_set_drvdata(pdev, dmac); |
744 | 923 | ||
924 | devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); | ||
925 | |||
745 | return 0; | 926 | return 0; |
746 | 927 | ||
747 | err_unregister_of: | 928 | err_unregister_of: |
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 6b8c4c458e8a..7fe9309a876b 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c | |||
@@ -156,7 +156,6 @@ struct jz4780_dma_dev { | |||
156 | }; | 156 | }; |
157 | 157 | ||
158 | struct jz4780_dma_filter_data { | 158 | struct jz4780_dma_filter_data { |
159 | struct device_node *of_node; | ||
160 | uint32_t transfer_type; | 159 | uint32_t transfer_type; |
161 | int channel; | 160 | int channel; |
162 | }; | 161 | }; |
@@ -772,8 +771,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) | |||
772 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | 771 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); |
773 | struct jz4780_dma_filter_data *data = param; | 772 | struct jz4780_dma_filter_data *data = param; |
774 | 773 | ||
775 | if (jzdma->dma_device.dev->of_node != data->of_node) | ||
776 | return false; | ||
777 | 774 | ||
778 | if (data->channel > -1) { | 775 | if (data->channel > -1) { |
779 | if (data->channel != jzchan->id) | 776 | if (data->channel != jzchan->id) |
@@ -797,7 +794,6 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, | |||
797 | if (dma_spec->args_count != 2) | 794 | if (dma_spec->args_count != 2) |
798 | return NULL; | 795 | return NULL; |
799 | 796 | ||
800 | data.of_node = ofdma->of_node; | ||
801 | data.transfer_type = dma_spec->args[0]; | 797 | data.transfer_type = dma_spec->args[0]; |
802 | data.channel = dma_spec->args[1]; | 798 | data.channel = dma_spec->args[1]; |
803 | 799 | ||
@@ -822,7 +818,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, | |||
822 | return dma_get_slave_channel( | 818 | return dma_get_slave_channel( |
823 | &jzdma->chan[data.channel].vchan.chan); | 819 | &jzdma->chan[data.channel].vchan.chan); |
824 | } else { | 820 | } else { |
825 | return dma_request_channel(mask, jz4780_dma_filter_fn, &data); | 821 | return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data, |
822 | ofdma->of_node); | ||
826 | } | 823 | } |
827 | } | 824 | } |
828 | 825 | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 58cbf9fd5a46..03ac4b96117c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -61,7 +61,7 @@ static long dmaengine_ref_count; | |||
61 | /* --- sysfs implementation --- */ | 61 | /* --- sysfs implementation --- */ |
62 | 62 | ||
63 | /** | 63 | /** |
64 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | 64 | * dev_to_dma_chan - convert a device pointer to its sysfs container object |
65 | * @dev - device node | 65 | * @dev - device node |
66 | * | 66 | * |
67 | * Must be called under dma_list_mutex | 67 | * Must be called under dma_list_mutex |
@@ -629,11 +629,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); | |||
629 | * @mask: capabilities that the channel must satisfy | 629 | * @mask: capabilities that the channel must satisfy |
630 | * @fn: optional callback to disposition available channels | 630 | * @fn: optional callback to disposition available channels |
631 | * @fn_param: opaque parameter to pass to dma_filter_fn | 631 | * @fn_param: opaque parameter to pass to dma_filter_fn |
632 | * @np: device node to look for DMA channels | ||
632 | * | 633 | * |
633 | * Returns pointer to appropriate DMA channel on success or NULL. | 634 | * Returns pointer to appropriate DMA channel on success or NULL. |
634 | */ | 635 | */ |
635 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 636 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
636 | dma_filter_fn fn, void *fn_param) | 637 | dma_filter_fn fn, void *fn_param, |
638 | struct device_node *np) | ||
637 | { | 639 | { |
638 | struct dma_device *device, *_d; | 640 | struct dma_device *device, *_d; |
639 | struct dma_chan *chan = NULL; | 641 | struct dma_chan *chan = NULL; |
@@ -641,6 +643,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |||
641 | /* Find a channel */ | 643 | /* Find a channel */ |
642 | mutex_lock(&dma_list_mutex); | 644 | mutex_lock(&dma_list_mutex); |
643 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | 645 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
646 | /* Finds a DMA controller with matching device node */ | ||
647 | if (np && device->dev->of_node && np != device->dev->of_node) | ||
648 | continue; | ||
649 | |||
644 | chan = find_candidate(device, mask, fn, fn_param); | 650 | chan = find_candidate(device, mask, fn, fn_param); |
645 | if (!IS_ERR(chan)) | 651 | if (!IS_ERR(chan)) |
646 | break; | 652 | break; |
@@ -699,7 +705,7 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) | |||
699 | chan = acpi_dma_request_slave_chan_by_name(dev, name); | 705 | chan = acpi_dma_request_slave_chan_by_name(dev, name); |
700 | 706 | ||
701 | if (chan) { | 707 | if (chan) { |
702 | /* Valid channel found or requester need to be deferred */ | 708 | /* Valid channel found or requester needs to be deferred */ |
703 | if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) | 709 | if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) |
704 | return chan; | 710 | return chan; |
705 | } | 711 | } |
@@ -757,7 +763,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) | |||
757 | if (!mask) | 763 | if (!mask) |
758 | return ERR_PTR(-ENODEV); | 764 | return ERR_PTR(-ENODEV); |
759 | 765 | ||
760 | chan = __dma_request_channel(mask, NULL, NULL); | 766 | chan = __dma_request_channel(mask, NULL, NULL, NULL); |
761 | if (!chan) { | 767 | if (!chan) { |
762 | mutex_lock(&dma_list_mutex); | 768 | mutex_lock(&dma_list_mutex); |
763 | if (list_empty(&dma_device_list)) | 769 | if (list_empty(&dma_device_list)) |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d0ad46e916a6..3d22ae8dca72 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(pq_sources, | |||
62 | static int timeout = 3000; | 62 | static int timeout = 3000; |
63 | module_param(timeout, uint, S_IRUGO | S_IWUSR); | 63 | module_param(timeout, uint, S_IRUGO | S_IWUSR); |
64 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | 64 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
65 | "Pass -1 for infinite timeout"); | 65 | "Pass 0xFFFFFFFF (4294967295) for maximum timeout"); |
66 | 66 | ||
67 | static bool noverify; | 67 | static bool noverify; |
68 | module_param(noverify, bool, S_IRUGO | S_IWUSR); | 68 | module_param(noverify, bool, S_IRUGO | S_IWUSR); |
@@ -94,7 +94,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default | |||
94 | * @iterations: iterations before stopping test | 94 | * @iterations: iterations before stopping test |
95 | * @xor_sources: number of xor source buffers | 95 | * @xor_sources: number of xor source buffers |
96 | * @pq_sources: number of p+q source buffers | 96 | * @pq_sources: number of p+q source buffers |
97 | * @timeout: transfer timeout in msec, -1 for infinite timeout | 97 | * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295) |
98 | */ | 98 | */ |
99 | struct dmatest_params { | 99 | struct dmatest_params { |
100 | unsigned int buf_size; | 100 | unsigned int buf_size; |
@@ -105,7 +105,7 @@ struct dmatest_params { | |||
105 | unsigned int iterations; | 105 | unsigned int iterations; |
106 | unsigned int xor_sources; | 106 | unsigned int xor_sources; |
107 | unsigned int pq_sources; | 107 | unsigned int pq_sources; |
108 | int timeout; | 108 | unsigned int timeout; |
109 | bool noverify; | 109 | bool noverify; |
110 | bool norandom; | 110 | bool norandom; |
111 | int alignment; | 111 | int alignment; |
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig new file mode 100644 index 000000000000..7ff17b2db6a1 --- /dev/null +++ b/drivers/dma/dw-edma/Kconfig | |||
@@ -0,0 +1,19 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | config DW_EDMA | ||
4 | tristate "Synopsys DesignWare eDMA controller driver" | ||
5 | depends on PCI && PCI_MSI | ||
6 | select DMA_ENGINE | ||
7 | select DMA_VIRTUAL_CHANNELS | ||
8 | help | ||
9 | Support the Synopsys DesignWare eDMA controller, normally | ||
10 | implemented on endpoints SoCs. | ||
11 | |||
12 | config DW_EDMA_PCIE | ||
13 | tristate "Synopsys DesignWare eDMA PCIe driver" | ||
14 | depends on PCI && PCI_MSI | ||
15 | select DW_EDMA | ||
16 | help | ||
17 | Provides a glue-logic between the Synopsys DesignWare | ||
18 | eDMA controller and an endpoint PCIe device. This also serves | ||
19 | as a reference design to whom desires to use this IP. | ||
diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile new file mode 100644 index 000000000000..8d45c0d5689d --- /dev/null +++ b/drivers/dma/dw-edma/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | obj-$(CONFIG_DW_EDMA) += dw-edma.o | ||
4 | dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o | ||
5 | dw-edma-objs := dw-edma-core.o \ | ||
6 | dw-edma-v0-core.o $(dw-edma-y) | ||
7 | obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o | ||
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c new file mode 100644 index 000000000000..ff392c01bad1 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.c | |||
@@ -0,0 +1,937 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA core driver | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/pm_runtime.h> | ||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/dma/edma.h> | ||
17 | #include <linux/pci.h> | ||
18 | |||
19 | #include "dw-edma-core.h" | ||
20 | #include "dw-edma-v0-core.h" | ||
21 | #include "../dmaengine.h" | ||
22 | #include "../virt-dma.h" | ||
23 | |||
24 | static inline | ||
25 | struct device *dchan2dev(struct dma_chan *dchan) | ||
26 | { | ||
27 | return &dchan->dev->device; | ||
28 | } | ||
29 | |||
30 | static inline | ||
31 | struct device *chan2dev(struct dw_edma_chan *chan) | ||
32 | { | ||
33 | return &chan->vc.chan.dev->device; | ||
34 | } | ||
35 | |||
36 | static inline | ||
37 | struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd) | ||
38 | { | ||
39 | return container_of(vd, struct dw_edma_desc, vd); | ||
40 | } | ||
41 | |||
42 | static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk) | ||
43 | { | ||
44 | struct dw_edma_burst *burst; | ||
45 | |||
46 | burst = kzalloc(sizeof(*burst), GFP_NOWAIT); | ||
47 | if (unlikely(!burst)) | ||
48 | return NULL; | ||
49 | |||
50 | INIT_LIST_HEAD(&burst->list); | ||
51 | if (chunk->burst) { | ||
52 | /* Create and add new element into the linked list */ | ||
53 | chunk->bursts_alloc++; | ||
54 | list_add_tail(&burst->list, &chunk->burst->list); | ||
55 | } else { | ||
56 | /* List head */ | ||
57 | chunk->bursts_alloc = 0; | ||
58 | chunk->burst = burst; | ||
59 | } | ||
60 | |||
61 | return burst; | ||
62 | } | ||
63 | |||
64 | static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) | ||
65 | { | ||
66 | struct dw_edma_chan *chan = desc->chan; | ||
67 | struct dw_edma *dw = chan->chip->dw; | ||
68 | struct dw_edma_chunk *chunk; | ||
69 | |||
70 | chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT); | ||
71 | if (unlikely(!chunk)) | ||
72 | return NULL; | ||
73 | |||
74 | INIT_LIST_HEAD(&chunk->list); | ||
75 | chunk->chan = chan; | ||
76 | /* Toggling change bit (CB) in each chunk, this is a mechanism to | ||
77 | * inform the eDMA HW block that this is a new linked list ready | ||
78 | * to be consumed. | ||
79 | * - Odd chunks originate CB equal to 0 | ||
80 | * - Even chunks originate CB equal to 1 | ||
81 | */ | ||
82 | chunk->cb = !(desc->chunks_alloc % 2); | ||
83 | chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off; | ||
84 | chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off; | ||
85 | |||
86 | if (desc->chunk) { | ||
87 | /* Create and add new element into the linked list */ | ||
88 | desc->chunks_alloc++; | ||
89 | list_add_tail(&chunk->list, &desc->chunk->list); | ||
90 | if (!dw_edma_alloc_burst(chunk)) { | ||
91 | kfree(chunk); | ||
92 | return NULL; | ||
93 | } | ||
94 | } else { | ||
95 | /* List head */ | ||
96 | chunk->burst = NULL; | ||
97 | desc->chunks_alloc = 0; | ||
98 | desc->chunk = chunk; | ||
99 | } | ||
100 | |||
101 | return chunk; | ||
102 | } | ||
103 | |||
104 | static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan) | ||
105 | { | ||
106 | struct dw_edma_desc *desc; | ||
107 | |||
108 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | ||
109 | if (unlikely(!desc)) | ||
110 | return NULL; | ||
111 | |||
112 | desc->chan = chan; | ||
113 | if (!dw_edma_alloc_chunk(desc)) { | ||
114 | kfree(desc); | ||
115 | return NULL; | ||
116 | } | ||
117 | |||
118 | return desc; | ||
119 | } | ||
120 | |||
121 | static void dw_edma_free_burst(struct dw_edma_chunk *chunk) | ||
122 | { | ||
123 | struct dw_edma_burst *child, *_next; | ||
124 | |||
125 | /* Remove all the list elements */ | ||
126 | list_for_each_entry_safe(child, _next, &chunk->burst->list, list) { | ||
127 | list_del(&child->list); | ||
128 | kfree(child); | ||
129 | chunk->bursts_alloc--; | ||
130 | } | ||
131 | |||
132 | /* Remove the list head */ | ||
133 | kfree(child); | ||
134 | chunk->burst = NULL; | ||
135 | } | ||
136 | |||
137 | static void dw_edma_free_chunk(struct dw_edma_desc *desc) | ||
138 | { | ||
139 | struct dw_edma_chunk *child, *_next; | ||
140 | |||
141 | if (!desc->chunk) | ||
142 | return; | ||
143 | |||
144 | /* Remove all the list elements */ | ||
145 | list_for_each_entry_safe(child, _next, &desc->chunk->list, list) { | ||
146 | dw_edma_free_burst(child); | ||
147 | list_del(&child->list); | ||
148 | kfree(child); | ||
149 | desc->chunks_alloc--; | ||
150 | } | ||
151 | |||
152 | /* Remove the list head */ | ||
153 | kfree(child); | ||
154 | desc->chunk = NULL; | ||
155 | } | ||
156 | |||
157 | static void dw_edma_free_desc(struct dw_edma_desc *desc) | ||
158 | { | ||
159 | dw_edma_free_chunk(desc); | ||
160 | kfree(desc); | ||
161 | } | ||
162 | |||
163 | static void vchan_free_desc(struct virt_dma_desc *vdesc) | ||
164 | { | ||
165 | dw_edma_free_desc(vd2dw_edma_desc(vdesc)); | ||
166 | } | ||
167 | |||
168 | static void dw_edma_start_transfer(struct dw_edma_chan *chan) | ||
169 | { | ||
170 | struct dw_edma_chunk *child; | ||
171 | struct dw_edma_desc *desc; | ||
172 | struct virt_dma_desc *vd; | ||
173 | |||
174 | vd = vchan_next_desc(&chan->vc); | ||
175 | if (!vd) | ||
176 | return; | ||
177 | |||
178 | desc = vd2dw_edma_desc(vd); | ||
179 | if (!desc) | ||
180 | return; | ||
181 | |||
182 | child = list_first_entry_or_null(&desc->chunk->list, | ||
183 | struct dw_edma_chunk, list); | ||
184 | if (!child) | ||
185 | return; | ||
186 | |||
187 | dw_edma_v0_core_start(child, !desc->xfer_sz); | ||
188 | desc->xfer_sz += child->ll_region.sz; | ||
189 | dw_edma_free_burst(child); | ||
190 | list_del(&child->list); | ||
191 | kfree(child); | ||
192 | desc->chunks_alloc--; | ||
193 | } | ||
194 | |||
195 | static int dw_edma_device_config(struct dma_chan *dchan, | ||
196 | struct dma_slave_config *config) | ||
197 | { | ||
198 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
199 | |||
200 | memcpy(&chan->config, config, sizeof(*config)); | ||
201 | chan->configured = true; | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int dw_edma_device_pause(struct dma_chan *dchan) | ||
207 | { | ||
208 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
209 | int err = 0; | ||
210 | |||
211 | if (!chan->configured) | ||
212 | err = -EPERM; | ||
213 | else if (chan->status != EDMA_ST_BUSY) | ||
214 | err = -EPERM; | ||
215 | else if (chan->request != EDMA_REQ_NONE) | ||
216 | err = -EPERM; | ||
217 | else | ||
218 | chan->request = EDMA_REQ_PAUSE; | ||
219 | |||
220 | return err; | ||
221 | } | ||
222 | |||
223 | static int dw_edma_device_resume(struct dma_chan *dchan) | ||
224 | { | ||
225 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
226 | int err = 0; | ||
227 | |||
228 | if (!chan->configured) { | ||
229 | err = -EPERM; | ||
230 | } else if (chan->status != EDMA_ST_PAUSE) { | ||
231 | err = -EPERM; | ||
232 | } else if (chan->request != EDMA_REQ_NONE) { | ||
233 | err = -EPERM; | ||
234 | } else { | ||
235 | chan->status = EDMA_ST_BUSY; | ||
236 | dw_edma_start_transfer(chan); | ||
237 | } | ||
238 | |||
239 | return err; | ||
240 | } | ||
241 | |||
242 | static int dw_edma_device_terminate_all(struct dma_chan *dchan) | ||
243 | { | ||
244 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
245 | int err = 0; | ||
246 | LIST_HEAD(head); | ||
247 | |||
248 | if (!chan->configured) { | ||
249 | /* Do nothing */ | ||
250 | } else if (chan->status == EDMA_ST_PAUSE) { | ||
251 | chan->status = EDMA_ST_IDLE; | ||
252 | chan->configured = false; | ||
253 | } else if (chan->status == EDMA_ST_IDLE) { | ||
254 | chan->configured = false; | ||
255 | } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) { | ||
256 | /* | ||
257 | * The channel is in a false BUSY state, probably didn't | ||
258 | * receive or lost an interrupt | ||
259 | */ | ||
260 | chan->status = EDMA_ST_IDLE; | ||
261 | chan->configured = false; | ||
262 | } else if (chan->request > EDMA_REQ_PAUSE) { | ||
263 | err = -EPERM; | ||
264 | } else { | ||
265 | chan->request = EDMA_REQ_STOP; | ||
266 | } | ||
267 | |||
268 | return err; | ||
269 | } | ||
270 | |||
271 | static void dw_edma_device_issue_pending(struct dma_chan *dchan) | ||
272 | { | ||
273 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
274 | unsigned long flags; | ||
275 | |||
276 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
277 | if (chan->configured && chan->request == EDMA_REQ_NONE && | ||
278 | chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) { | ||
279 | chan->status = EDMA_ST_BUSY; | ||
280 | dw_edma_start_transfer(chan); | ||
281 | } | ||
282 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
283 | } | ||
284 | |||
285 | static enum dma_status | ||
286 | dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | ||
287 | struct dma_tx_state *txstate) | ||
288 | { | ||
289 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
290 | struct dw_edma_desc *desc; | ||
291 | struct virt_dma_desc *vd; | ||
292 | unsigned long flags; | ||
293 | enum dma_status ret; | ||
294 | u32 residue = 0; | ||
295 | |||
296 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
297 | if (ret == DMA_COMPLETE) | ||
298 | return ret; | ||
299 | |||
300 | if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE) | ||
301 | ret = DMA_PAUSED; | ||
302 | |||
303 | if (!txstate) | ||
304 | goto ret_residue; | ||
305 | |||
306 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
307 | vd = vchan_find_desc(&chan->vc, cookie); | ||
308 | if (vd) { | ||
309 | desc = vd2dw_edma_desc(vd); | ||
310 | if (desc) | ||
311 | residue = desc->alloc_sz - desc->xfer_sz; | ||
312 | } | ||
313 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
314 | |||
315 | ret_residue: | ||
316 | dma_set_residue(txstate, residue); | ||
317 | |||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static struct dma_async_tx_descriptor * | ||
322 | dw_edma_device_transfer(struct dw_edma_transfer *xfer) | ||
323 | { | ||
324 | struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan); | ||
325 | enum dma_transfer_direction direction = xfer->direction; | ||
326 | phys_addr_t src_addr, dst_addr; | ||
327 | struct scatterlist *sg = NULL; | ||
328 | struct dw_edma_chunk *chunk; | ||
329 | struct dw_edma_burst *burst; | ||
330 | struct dw_edma_desc *desc; | ||
331 | u32 cnt; | ||
332 | int i; | ||
333 | |||
334 | if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) || | ||
335 | (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)) | ||
336 | return NULL; | ||
337 | |||
338 | if (xfer->cyclic) { | ||
339 | if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt) | ||
340 | return NULL; | ||
341 | } else { | ||
342 | if (xfer->xfer.sg.len < 1) | ||
343 | return NULL; | ||
344 | } | ||
345 | |||
346 | if (!chan->configured) | ||
347 | return NULL; | ||
348 | |||
349 | desc = dw_edma_alloc_desc(chan); | ||
350 | if (unlikely(!desc)) | ||
351 | goto err_alloc; | ||
352 | |||
353 | chunk = dw_edma_alloc_chunk(desc); | ||
354 | if (unlikely(!chunk)) | ||
355 | goto err_alloc; | ||
356 | |||
357 | src_addr = chan->config.src_addr; | ||
358 | dst_addr = chan->config.dst_addr; | ||
359 | |||
360 | if (xfer->cyclic) { | ||
361 | cnt = xfer->xfer.cyclic.cnt; | ||
362 | } else { | ||
363 | cnt = xfer->xfer.sg.len; | ||
364 | sg = xfer->xfer.sg.sgl; | ||
365 | } | ||
366 | |||
367 | for (i = 0; i < cnt; i++) { | ||
368 | if (!xfer->cyclic && !sg) | ||
369 | break; | ||
370 | |||
371 | if (chunk->bursts_alloc == chan->ll_max) { | ||
372 | chunk = dw_edma_alloc_chunk(desc); | ||
373 | if (unlikely(!chunk)) | ||
374 | goto err_alloc; | ||
375 | } | ||
376 | |||
377 | burst = dw_edma_alloc_burst(chunk); | ||
378 | if (unlikely(!burst)) | ||
379 | goto err_alloc; | ||
380 | |||
381 | if (xfer->cyclic) | ||
382 | burst->sz = xfer->xfer.cyclic.len; | ||
383 | else | ||
384 | burst->sz = sg_dma_len(sg); | ||
385 | |||
386 | chunk->ll_region.sz += burst->sz; | ||
387 | desc->alloc_sz += burst->sz; | ||
388 | |||
389 | if (direction == DMA_DEV_TO_MEM) { | ||
390 | burst->sar = src_addr; | ||
391 | if (xfer->cyclic) { | ||
392 | burst->dar = xfer->xfer.cyclic.paddr; | ||
393 | } else { | ||
394 | burst->dar = sg_dma_address(sg); | ||
395 | /* Unlike the typical assumption by other | ||
396 | * drivers/IPs the peripheral memory isn't | ||
397 | * a FIFO memory, in this case, it's a | ||
398 | * linear memory and that why the source | ||
399 | * and destination addresses are increased | ||
400 | * by the same portion (data length) | ||
401 | */ | ||
402 | src_addr += sg_dma_len(sg); | ||
403 | } | ||
404 | } else { | ||
405 | burst->dar = dst_addr; | ||
406 | if (xfer->cyclic) { | ||
407 | burst->sar = xfer->xfer.cyclic.paddr; | ||
408 | } else { | ||
409 | burst->sar = sg_dma_address(sg); | ||
410 | /* Unlike the typical assumption by other | ||
411 | * drivers/IPs the peripheral memory isn't | ||
412 | * a FIFO memory, in this case, it's a | ||
413 | * linear memory and that why the source | ||
414 | * and destination addresses are increased | ||
415 | * by the same portion (data length) | ||
416 | */ | ||
417 | dst_addr += sg_dma_len(sg); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | if (!xfer->cyclic) | ||
422 | sg = sg_next(sg); | ||
423 | } | ||
424 | |||
425 | return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags); | ||
426 | |||
427 | err_alloc: | ||
428 | if (desc) | ||
429 | dw_edma_free_desc(desc); | ||
430 | |||
431 | return NULL; | ||
432 | } | ||
433 | |||
434 | static struct dma_async_tx_descriptor * | ||
435 | dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | ||
436 | unsigned int len, | ||
437 | enum dma_transfer_direction direction, | ||
438 | unsigned long flags, void *context) | ||
439 | { | ||
440 | struct dw_edma_transfer xfer; | ||
441 | |||
442 | xfer.dchan = dchan; | ||
443 | xfer.direction = direction; | ||
444 | xfer.xfer.sg.sgl = sgl; | ||
445 | xfer.xfer.sg.len = len; | ||
446 | xfer.flags = flags; | ||
447 | xfer.cyclic = false; | ||
448 | |||
449 | return dw_edma_device_transfer(&xfer); | ||
450 | } | ||
451 | |||
452 | static struct dma_async_tx_descriptor * | ||
453 | dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr, | ||
454 | size_t len, size_t count, | ||
455 | enum dma_transfer_direction direction, | ||
456 | unsigned long flags) | ||
457 | { | ||
458 | struct dw_edma_transfer xfer; | ||
459 | |||
460 | xfer.dchan = dchan; | ||
461 | xfer.direction = direction; | ||
462 | xfer.xfer.cyclic.paddr = paddr; | ||
463 | xfer.xfer.cyclic.len = len; | ||
464 | xfer.xfer.cyclic.cnt = count; | ||
465 | xfer.flags = flags; | ||
466 | xfer.cyclic = true; | ||
467 | |||
468 | return dw_edma_device_transfer(&xfer); | ||
469 | } | ||
470 | |||
471 | static void dw_edma_done_interrupt(struct dw_edma_chan *chan) | ||
472 | { | ||
473 | struct dw_edma_desc *desc; | ||
474 | struct virt_dma_desc *vd; | ||
475 | unsigned long flags; | ||
476 | |||
477 | dw_edma_v0_core_clear_done_int(chan); | ||
478 | |||
479 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
480 | vd = vchan_next_desc(&chan->vc); | ||
481 | if (vd) { | ||
482 | switch (chan->request) { | ||
483 | case EDMA_REQ_NONE: | ||
484 | desc = vd2dw_edma_desc(vd); | ||
485 | if (desc->chunks_alloc) { | ||
486 | chan->status = EDMA_ST_BUSY; | ||
487 | dw_edma_start_transfer(chan); | ||
488 | } else { | ||
489 | list_del(&vd->node); | ||
490 | vchan_cookie_complete(vd); | ||
491 | chan->status = EDMA_ST_IDLE; | ||
492 | } | ||
493 | break; | ||
494 | |||
495 | case EDMA_REQ_STOP: | ||
496 | list_del(&vd->node); | ||
497 | vchan_cookie_complete(vd); | ||
498 | chan->request = EDMA_REQ_NONE; | ||
499 | chan->status = EDMA_ST_IDLE; | ||
500 | break; | ||
501 | |||
502 | case EDMA_REQ_PAUSE: | ||
503 | chan->request = EDMA_REQ_NONE; | ||
504 | chan->status = EDMA_ST_PAUSE; | ||
505 | break; | ||
506 | |||
507 | default: | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
512 | } | ||
513 | |||
514 | static void dw_edma_abort_interrupt(struct dw_edma_chan *chan) | ||
515 | { | ||
516 | struct virt_dma_desc *vd; | ||
517 | unsigned long flags; | ||
518 | |||
519 | dw_edma_v0_core_clear_abort_int(chan); | ||
520 | |||
521 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
522 | vd = vchan_next_desc(&chan->vc); | ||
523 | if (vd) { | ||
524 | list_del(&vd->node); | ||
525 | vchan_cookie_complete(vd); | ||
526 | } | ||
527 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
528 | chan->request = EDMA_REQ_NONE; | ||
529 | chan->status = EDMA_ST_IDLE; | ||
530 | } | ||
531 | |||
532 | static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write) | ||
533 | { | ||
534 | struct dw_edma_irq *dw_irq = data; | ||
535 | struct dw_edma *dw = dw_irq->dw; | ||
536 | unsigned long total, pos, val; | ||
537 | unsigned long off; | ||
538 | u32 mask; | ||
539 | |||
540 | if (write) { | ||
541 | total = dw->wr_ch_cnt; | ||
542 | off = 0; | ||
543 | mask = dw_irq->wr_mask; | ||
544 | } else { | ||
545 | total = dw->rd_ch_cnt; | ||
546 | off = dw->wr_ch_cnt; | ||
547 | mask = dw_irq->rd_mask; | ||
548 | } | ||
549 | |||
550 | val = dw_edma_v0_core_status_done_int(dw, write ? | ||
551 | EDMA_DIR_WRITE : | ||
552 | EDMA_DIR_READ); | ||
553 | val &= mask; | ||
554 | for_each_set_bit(pos, &val, total) { | ||
555 | struct dw_edma_chan *chan = &dw->chan[pos + off]; | ||
556 | |||
557 | dw_edma_done_interrupt(chan); | ||
558 | } | ||
559 | |||
560 | val = dw_edma_v0_core_status_abort_int(dw, write ? | ||
561 | EDMA_DIR_WRITE : | ||
562 | EDMA_DIR_READ); | ||
563 | val &= mask; | ||
564 | for_each_set_bit(pos, &val, total) { | ||
565 | struct dw_edma_chan *chan = &dw->chan[pos + off]; | ||
566 | |||
567 | dw_edma_abort_interrupt(chan); | ||
568 | } | ||
569 | |||
570 | return IRQ_HANDLED; | ||
571 | } | ||
572 | |||
573 | static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data) | ||
574 | { | ||
575 | return dw_edma_interrupt(irq, data, true); | ||
576 | } | ||
577 | |||
578 | static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data) | ||
579 | { | ||
580 | return dw_edma_interrupt(irq, data, false); | ||
581 | } | ||
582 | |||
583 | static irqreturn_t dw_edma_interrupt_common(int irq, void *data) | ||
584 | { | ||
585 | dw_edma_interrupt(irq, data, true); | ||
586 | dw_edma_interrupt(irq, data, false); | ||
587 | |||
588 | return IRQ_HANDLED; | ||
589 | } | ||
590 | |||
591 | static int dw_edma_alloc_chan_resources(struct dma_chan *dchan) | ||
592 | { | ||
593 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
594 | |||
595 | if (chan->status != EDMA_ST_IDLE) | ||
596 | return -EBUSY; | ||
597 | |||
598 | pm_runtime_get(chan->chip->dev); | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static void dw_edma_free_chan_resources(struct dma_chan *dchan) | ||
604 | { | ||
605 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | ||
606 | struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan); | ||
607 | int ret; | ||
608 | |||
609 | while (time_before(jiffies, timeout)) { | ||
610 | ret = dw_edma_device_terminate_all(dchan); | ||
611 | if (!ret) | ||
612 | break; | ||
613 | |||
614 | if (time_after_eq(jiffies, timeout)) | ||
615 | return; | ||
616 | |||
617 | cpu_relax(); | ||
618 | } | ||
619 | |||
620 | pm_runtime_put(chan->chip->dev); | ||
621 | } | ||
622 | |||
623 | static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write, | ||
624 | u32 wr_alloc, u32 rd_alloc) | ||
625 | { | ||
626 | struct dw_edma_region *dt_region; | ||
627 | struct device *dev = chip->dev; | ||
628 | struct dw_edma *dw = chip->dw; | ||
629 | struct dw_edma_chan *chan; | ||
630 | size_t ll_chunk, dt_chunk; | ||
631 | struct dw_edma_irq *irq; | ||
632 | struct dma_device *dma; | ||
633 | u32 i, j, cnt, ch_cnt; | ||
634 | u32 alloc, off_alloc; | ||
635 | int err = 0; | ||
636 | u32 pos; | ||
637 | |||
638 | ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; | ||
639 | ll_chunk = dw->ll_region.sz; | ||
640 | dt_chunk = dw->dt_region.sz; | ||
641 | |||
642 | /* Calculate linked list chunk for each channel */ | ||
643 | ll_chunk /= roundup_pow_of_two(ch_cnt); | ||
644 | |||
645 | /* Calculate linked list chunk for each channel */ | ||
646 | dt_chunk /= roundup_pow_of_two(ch_cnt); | ||
647 | |||
648 | if (write) { | ||
649 | i = 0; | ||
650 | cnt = dw->wr_ch_cnt; | ||
651 | dma = &dw->wr_edma; | ||
652 | alloc = wr_alloc; | ||
653 | off_alloc = 0; | ||
654 | } else { | ||
655 | i = dw->wr_ch_cnt; | ||
656 | cnt = dw->rd_ch_cnt; | ||
657 | dma = &dw->rd_edma; | ||
658 | alloc = rd_alloc; | ||
659 | off_alloc = wr_alloc; | ||
660 | } | ||
661 | |||
662 | INIT_LIST_HEAD(&dma->channels); | ||
663 | for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) { | ||
664 | chan = &dw->chan[i]; | ||
665 | |||
666 | dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL); | ||
667 | if (!dt_region) | ||
668 | return -ENOMEM; | ||
669 | |||
670 | chan->vc.chan.private = dt_region; | ||
671 | |||
672 | chan->chip = chip; | ||
673 | chan->id = j; | ||
674 | chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ; | ||
675 | chan->configured = false; | ||
676 | chan->request = EDMA_REQ_NONE; | ||
677 | chan->status = EDMA_ST_IDLE; | ||
678 | |||
679 | chan->ll_off = (ll_chunk * i); | ||
680 | chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1; | ||
681 | |||
682 | chan->dt_off = (dt_chunk * i); | ||
683 | |||
684 | dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n", | ||
685 | write ? "write" : "read", j, | ||
686 | chan->ll_off, chan->ll_max); | ||
687 | |||
688 | if (dw->nr_irqs == 1) | ||
689 | pos = 0; | ||
690 | else | ||
691 | pos = off_alloc + (j % alloc); | ||
692 | |||
693 | irq = &dw->irq[pos]; | ||
694 | |||
695 | if (write) | ||
696 | irq->wr_mask |= BIT(j); | ||
697 | else | ||
698 | irq->rd_mask |= BIT(j); | ||
699 | |||
700 | irq->dw = dw; | ||
701 | memcpy(&chan->msi, &irq->msi, sizeof(chan->msi)); | ||
702 | |||
703 | dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n", | ||
704 | write ? "write" : "read", j, | ||
705 | chan->msi.address_hi, chan->msi.address_lo, | ||
706 | chan->msi.data); | ||
707 | |||
708 | chan->vc.desc_free = vchan_free_desc; | ||
709 | vchan_init(&chan->vc, dma); | ||
710 | |||
711 | dt_region->paddr = dw->dt_region.paddr + chan->dt_off; | ||
712 | dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off; | ||
713 | dt_region->sz = dt_chunk; | ||
714 | |||
715 | dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n", | ||
716 | write ? "write" : "read", j, chan->dt_off); | ||
717 | |||
718 | dw_edma_v0_core_device_config(chan); | ||
719 | } | ||
720 | |||
721 | /* Set DMA channel capabilities */ | ||
722 | dma_cap_zero(dma->cap_mask); | ||
723 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | ||
724 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | ||
725 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
726 | dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV); | ||
727 | dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
728 | dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
729 | dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
730 | dma->chancnt = cnt; | ||
731 | |||
732 | /* Set DMA channel callbacks */ | ||
733 | dma->dev = chip->dev; | ||
734 | dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources; | ||
735 | dma->device_free_chan_resources = dw_edma_free_chan_resources; | ||
736 | dma->device_config = dw_edma_device_config; | ||
737 | dma->device_pause = dw_edma_device_pause; | ||
738 | dma->device_resume = dw_edma_device_resume; | ||
739 | dma->device_terminate_all = dw_edma_device_terminate_all; | ||
740 | dma->device_issue_pending = dw_edma_device_issue_pending; | ||
741 | dma->device_tx_status = dw_edma_device_tx_status; | ||
742 | dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg; | ||
743 | dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic; | ||
744 | |||
745 | dma_set_max_seg_size(dma->dev, U32_MAX); | ||
746 | |||
747 | /* Register DMA device */ | ||
748 | err = dma_async_device_register(dma); | ||
749 | |||
750 | return err; | ||
751 | } | ||
752 | |||
753 | static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt) | ||
754 | { | ||
755 | if (*nr_irqs && *alloc < cnt) { | ||
756 | (*alloc)++; | ||
757 | (*nr_irqs)--; | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt) | ||
762 | { | ||
763 | while (*mask * alloc < cnt) | ||
764 | (*mask)++; | ||
765 | } | ||
766 | |||
767 | static int dw_edma_irq_request(struct dw_edma_chip *chip, | ||
768 | u32 *wr_alloc, u32 *rd_alloc) | ||
769 | { | ||
770 | struct device *dev = chip->dev; | ||
771 | struct dw_edma *dw = chip->dw; | ||
772 | u32 wr_mask = 1; | ||
773 | u32 rd_mask = 1; | ||
774 | int i, err = 0; | ||
775 | u32 ch_cnt; | ||
776 | |||
777 | ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt; | ||
778 | |||
779 | if (dw->nr_irqs < 1) | ||
780 | return -EINVAL; | ||
781 | |||
782 | if (dw->nr_irqs == 1) { | ||
783 | /* Common IRQ shared among all channels */ | ||
784 | err = request_irq(pci_irq_vector(to_pci_dev(dev), 0), | ||
785 | dw_edma_interrupt_common, | ||
786 | IRQF_SHARED, dw->name, &dw->irq[0]); | ||
787 | if (err) { | ||
788 | dw->nr_irqs = 0; | ||
789 | return err; | ||
790 | } | ||
791 | |||
792 | get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0), | ||
793 | &dw->irq[0].msi); | ||
794 | } else { | ||
795 | /* Distribute IRQs equally among all channels */ | ||
796 | int tmp = dw->nr_irqs; | ||
797 | |||
798 | while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) { | ||
799 | dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt); | ||
800 | dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt); | ||
801 | } | ||
802 | |||
803 | dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt); | ||
804 | dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt); | ||
805 | |||
806 | for (i = 0; i < (*wr_alloc + *rd_alloc); i++) { | ||
807 | err = request_irq(pci_irq_vector(to_pci_dev(dev), i), | ||
808 | i < *wr_alloc ? | ||
809 | dw_edma_interrupt_write : | ||
810 | dw_edma_interrupt_read, | ||
811 | IRQF_SHARED, dw->name, | ||
812 | &dw->irq[i]); | ||
813 | if (err) { | ||
814 | dw->nr_irqs = i; | ||
815 | return err; | ||
816 | } | ||
817 | |||
818 | get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i), | ||
819 | &dw->irq[i].msi); | ||
820 | } | ||
821 | |||
822 | dw->nr_irqs = i; | ||
823 | } | ||
824 | |||
825 | return err; | ||
826 | } | ||
827 | |||
828 | int dw_edma_probe(struct dw_edma_chip *chip) | ||
829 | { | ||
830 | struct device *dev = chip->dev; | ||
831 | struct dw_edma *dw = chip->dw; | ||
832 | u32 wr_alloc = 0; | ||
833 | u32 rd_alloc = 0; | ||
834 | int i, err; | ||
835 | |||
836 | raw_spin_lock_init(&dw->lock); | ||
837 | |||
838 | /* Find out how many write channels are supported by hardware */ | ||
839 | dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE); | ||
840 | if (!dw->wr_ch_cnt) | ||
841 | return -EINVAL; | ||
842 | |||
843 | /* Find out how many read channels are supported by hardware */ | ||
844 | dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ); | ||
845 | if (!dw->rd_ch_cnt) | ||
846 | return -EINVAL; | ||
847 | |||
848 | dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n", | ||
849 | dw->wr_ch_cnt, dw->rd_ch_cnt); | ||
850 | |||
851 | /* Allocate channels */ | ||
852 | dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt, | ||
853 | sizeof(*dw->chan), GFP_KERNEL); | ||
854 | if (!dw->chan) | ||
855 | return -ENOMEM; | ||
856 | |||
857 | snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id); | ||
858 | |||
859 | /* Disable eDMA, only to establish the ideal initial conditions */ | ||
860 | dw_edma_v0_core_off(dw); | ||
861 | |||
862 | /* Request IRQs */ | ||
863 | err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc); | ||
864 | if (err) | ||
865 | return err; | ||
866 | |||
867 | /* Setup write channels */ | ||
868 | err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc); | ||
869 | if (err) | ||
870 | goto err_irq_free; | ||
871 | |||
872 | /* Setup read channels */ | ||
873 | err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc); | ||
874 | if (err) | ||
875 | goto err_irq_free; | ||
876 | |||
877 | /* Power management */ | ||
878 | pm_runtime_enable(dev); | ||
879 | |||
880 | /* Turn debugfs on */ | ||
881 | dw_edma_v0_core_debugfs_on(chip); | ||
882 | |||
883 | return 0; | ||
884 | |||
885 | err_irq_free: | ||
886 | for (i = (dw->nr_irqs - 1); i >= 0; i--) | ||
887 | free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); | ||
888 | |||
889 | dw->nr_irqs = 0; | ||
890 | |||
891 | return err; | ||
892 | } | ||
893 | EXPORT_SYMBOL_GPL(dw_edma_probe); | ||
894 | |||
895 | int dw_edma_remove(struct dw_edma_chip *chip) | ||
896 | { | ||
897 | struct dw_edma_chan *chan, *_chan; | ||
898 | struct device *dev = chip->dev; | ||
899 | struct dw_edma *dw = chip->dw; | ||
900 | int i; | ||
901 | |||
902 | /* Disable eDMA */ | ||
903 | dw_edma_v0_core_off(dw); | ||
904 | |||
905 | /* Free irqs */ | ||
906 | for (i = (dw->nr_irqs - 1); i >= 0; i--) | ||
907 | free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]); | ||
908 | |||
909 | /* Power management */ | ||
910 | pm_runtime_disable(dev); | ||
911 | |||
912 | list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels, | ||
913 | vc.chan.device_node) { | ||
914 | list_del(&chan->vc.chan.device_node); | ||
915 | tasklet_kill(&chan->vc.task); | ||
916 | } | ||
917 | |||
918 | list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels, | ||
919 | vc.chan.device_node) { | ||
920 | list_del(&chan->vc.chan.device_node); | ||
921 | tasklet_kill(&chan->vc.task); | ||
922 | } | ||
923 | |||
924 | /* Deregister eDMA device */ | ||
925 | dma_async_device_unregister(&dw->wr_edma); | ||
926 | dma_async_device_unregister(&dw->rd_edma); | ||
927 | |||
928 | /* Turn debugfs off */ | ||
929 | dw_edma_v0_core_debugfs_off(); | ||
930 | |||
931 | return 0; | ||
932 | } | ||
933 | EXPORT_SYMBOL_GPL(dw_edma_remove); | ||
934 | |||
935 | MODULE_LICENSE("GPL v2"); | ||
936 | MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver"); | ||
937 | MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); | ||
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h new file mode 100644 index 000000000000..b6cc90cbc9dc --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-core.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA core driver | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _DW_EDMA_CORE_H | ||
10 | #define _DW_EDMA_CORE_H | ||
11 | |||
12 | #include <linux/msi.h> | ||
13 | #include <linux/dma/edma.h> | ||
14 | |||
15 | #include "../virt-dma.h" | ||
16 | |||
17 | #define EDMA_LL_SZ 24 | ||
18 | |||
19 | enum dw_edma_dir { | ||
20 | EDMA_DIR_WRITE = 0, | ||
21 | EDMA_DIR_READ | ||
22 | }; | ||
23 | |||
24 | enum dw_edma_mode { | ||
25 | EDMA_MODE_LEGACY = 0, | ||
26 | EDMA_MODE_UNROLL | ||
27 | }; | ||
28 | |||
29 | enum dw_edma_request { | ||
30 | EDMA_REQ_NONE = 0, | ||
31 | EDMA_REQ_STOP, | ||
32 | EDMA_REQ_PAUSE | ||
33 | }; | ||
34 | |||
35 | enum dw_edma_status { | ||
36 | EDMA_ST_IDLE = 0, | ||
37 | EDMA_ST_PAUSE, | ||
38 | EDMA_ST_BUSY | ||
39 | }; | ||
40 | |||
41 | struct dw_edma_chan; | ||
42 | struct dw_edma_chunk; | ||
43 | |||
44 | struct dw_edma_burst { | ||
45 | struct list_head list; | ||
46 | u64 sar; | ||
47 | u64 dar; | ||
48 | u32 sz; | ||
49 | }; | ||
50 | |||
51 | struct dw_edma_region { | ||
52 | phys_addr_t paddr; | ||
53 | dma_addr_t vaddr; | ||
54 | size_t sz; | ||
55 | }; | ||
56 | |||
57 | struct dw_edma_chunk { | ||
58 | struct list_head list; | ||
59 | struct dw_edma_chan *chan; | ||
60 | struct dw_edma_burst *burst; | ||
61 | |||
62 | u32 bursts_alloc; | ||
63 | |||
64 | u8 cb; | ||
65 | struct dw_edma_region ll_region; /* Linked list */ | ||
66 | }; | ||
67 | |||
68 | struct dw_edma_desc { | ||
69 | struct virt_dma_desc vd; | ||
70 | struct dw_edma_chan *chan; | ||
71 | struct dw_edma_chunk *chunk; | ||
72 | |||
73 | u32 chunks_alloc; | ||
74 | |||
75 | u32 alloc_sz; | ||
76 | u32 xfer_sz; | ||
77 | }; | ||
78 | |||
79 | struct dw_edma_chan { | ||
80 | struct virt_dma_chan vc; | ||
81 | struct dw_edma_chip *chip; | ||
82 | int id; | ||
83 | enum dw_edma_dir dir; | ||
84 | |||
85 | off_t ll_off; | ||
86 | u32 ll_max; | ||
87 | |||
88 | off_t dt_off; | ||
89 | |||
90 | struct msi_msg msi; | ||
91 | |||
92 | enum dw_edma_request request; | ||
93 | enum dw_edma_status status; | ||
94 | u8 configured; | ||
95 | |||
96 | struct dma_slave_config config; | ||
97 | }; | ||
98 | |||
99 | struct dw_edma_irq { | ||
100 | struct msi_msg msi; | ||
101 | u32 wr_mask; | ||
102 | u32 rd_mask; | ||
103 | struct dw_edma *dw; | ||
104 | }; | ||
105 | |||
106 | struct dw_edma { | ||
107 | char name[20]; | ||
108 | |||
109 | struct dma_device wr_edma; | ||
110 | u16 wr_ch_cnt; | ||
111 | |||
112 | struct dma_device rd_edma; | ||
113 | u16 rd_ch_cnt; | ||
114 | |||
115 | struct dw_edma_region rg_region; /* Registers */ | ||
116 | struct dw_edma_region ll_region; /* Linked list */ | ||
117 | struct dw_edma_region dt_region; /* Data */ | ||
118 | |||
119 | struct dw_edma_irq *irq; | ||
120 | int nr_irqs; | ||
121 | |||
122 | u32 version; | ||
123 | enum dw_edma_mode mode; | ||
124 | |||
125 | struct dw_edma_chan *chan; | ||
126 | const struct dw_edma_core_ops *ops; | ||
127 | |||
128 | raw_spinlock_t lock; /* Only for legacy */ | ||
129 | }; | ||
130 | |||
131 | struct dw_edma_sg { | ||
132 | struct scatterlist *sgl; | ||
133 | unsigned int len; | ||
134 | }; | ||
135 | |||
136 | struct dw_edma_cyclic { | ||
137 | dma_addr_t paddr; | ||
138 | size_t len; | ||
139 | size_t cnt; | ||
140 | }; | ||
141 | |||
142 | struct dw_edma_transfer { | ||
143 | struct dma_chan *dchan; | ||
144 | union dw_edma_xfer { | ||
145 | struct dw_edma_sg sg; | ||
146 | struct dw_edma_cyclic cyclic; | ||
147 | } xfer; | ||
148 | enum dma_transfer_direction direction; | ||
149 | unsigned long flags; | ||
150 | bool cyclic; | ||
151 | }; | ||
152 | |||
153 | static inline | ||
154 | struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc) | ||
155 | { | ||
156 | return container_of(vc, struct dw_edma_chan, vc); | ||
157 | } | ||
158 | |||
159 | static inline | ||
160 | struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan) | ||
161 | { | ||
162 | return vc2dw_edma_chan(to_virt_chan(dchan)); | ||
163 | } | ||
164 | |||
165 | #endif /* _DW_EDMA_CORE_H */ | ||
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c new file mode 100644 index 000000000000..4c96e1c948f2 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-pcie.c | |||
@@ -0,0 +1,229 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA PCIe driver | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/dma/edma.h> | ||
14 | #include <linux/pci-epf.h> | ||
15 | #include <linux/msi.h> | ||
16 | |||
17 | #include "dw-edma-core.h" | ||
18 | |||
19 | struct dw_edma_pcie_data { | ||
20 | /* eDMA registers location */ | ||
21 | enum pci_barno rg_bar; | ||
22 | off_t rg_off; | ||
23 | size_t rg_sz; | ||
24 | /* eDMA memory linked list location */ | ||
25 | enum pci_barno ll_bar; | ||
26 | off_t ll_off; | ||
27 | size_t ll_sz; | ||
28 | /* eDMA memory data location */ | ||
29 | enum pci_barno dt_bar; | ||
30 | off_t dt_off; | ||
31 | size_t dt_sz; | ||
32 | /* Other */ | ||
33 | u32 version; | ||
34 | enum dw_edma_mode mode; | ||
35 | u8 irqs; | ||
36 | }; | ||
37 | |||
38 | static const struct dw_edma_pcie_data snps_edda_data = { | ||
39 | /* eDMA registers location */ | ||
40 | .rg_bar = BAR_0, | ||
41 | .rg_off = 0x00001000, /* 4 Kbytes */ | ||
42 | .rg_sz = 0x00002000, /* 8 Kbytes */ | ||
43 | /* eDMA memory linked list location */ | ||
44 | .ll_bar = BAR_2, | ||
45 | .ll_off = 0x00000000, /* 0 Kbytes */ | ||
46 | .ll_sz = 0x00800000, /* 8 Mbytes */ | ||
47 | /* eDMA memory data location */ | ||
48 | .dt_bar = BAR_2, | ||
49 | .dt_off = 0x00800000, /* 8 Mbytes */ | ||
50 | .dt_sz = 0x03800000, /* 56 Mbytes */ | ||
51 | /* Other */ | ||
52 | .version = 0, | ||
53 | .mode = EDMA_MODE_UNROLL, | ||
54 | .irqs = 1, | ||
55 | }; | ||
56 | |||
57 | static int dw_edma_pcie_probe(struct pci_dev *pdev, | ||
58 | const struct pci_device_id *pid) | ||
59 | { | ||
60 | const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data; | ||
61 | struct device *dev = &pdev->dev; | ||
62 | struct dw_edma_chip *chip; | ||
63 | int err, nr_irqs; | ||
64 | struct dw_edma *dw; | ||
65 | |||
66 | /* Enable PCI device */ | ||
67 | err = pcim_enable_device(pdev); | ||
68 | if (err) { | ||
69 | pci_err(pdev, "enabling device failed\n"); | ||
70 | return err; | ||
71 | } | ||
72 | |||
73 | /* Mapping PCI BAR regions */ | ||
74 | err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) | | ||
75 | BIT(pdata->ll_bar) | | ||
76 | BIT(pdata->dt_bar), | ||
77 | pci_name(pdev)); | ||
78 | if (err) { | ||
79 | pci_err(pdev, "eDMA BAR I/O remapping failed\n"); | ||
80 | return err; | ||
81 | } | ||
82 | |||
83 | pci_set_master(pdev); | ||
84 | |||
85 | /* DMA configuration */ | ||
86 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
87 | if (!err) { | ||
88 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
89 | if (err) { | ||
90 | pci_err(pdev, "consistent DMA mask 64 set failed\n"); | ||
91 | return err; | ||
92 | } | ||
93 | } else { | ||
94 | pci_err(pdev, "DMA mask 64 set failed\n"); | ||
95 | |||
96 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
97 | if (err) { | ||
98 | pci_err(pdev, "DMA mask 32 set failed\n"); | ||
99 | return err; | ||
100 | } | ||
101 | |||
102 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
103 | if (err) { | ||
104 | pci_err(pdev, "consistent DMA mask 32 set failed\n"); | ||
105 | return err; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* Data structure allocation */ | ||
110 | chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); | ||
111 | if (!chip) | ||
112 | return -ENOMEM; | ||
113 | |||
114 | dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL); | ||
115 | if (!dw) | ||
116 | return -ENOMEM; | ||
117 | |||
118 | /* IRQs allocation */ | ||
119 | nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs, | ||
120 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | ||
121 | if (nr_irqs < 1) { | ||
122 | pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n", | ||
123 | nr_irqs); | ||
124 | return -EPERM; | ||
125 | } | ||
126 | |||
127 | /* Data structure initialization */ | ||
128 | chip->dw = dw; | ||
129 | chip->dev = dev; | ||
130 | chip->id = pdev->devfn; | ||
131 | chip->irq = pdev->irq; | ||
132 | |||
133 | dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; | ||
134 | dw->rg_region.vaddr += pdata->rg_off; | ||
135 | dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; | ||
136 | dw->rg_region.paddr += pdata->rg_off; | ||
137 | dw->rg_region.sz = pdata->rg_sz; | ||
138 | |||
139 | dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; | ||
140 | dw->ll_region.vaddr += pdata->ll_off; | ||
141 | dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; | ||
142 | dw->ll_region.paddr += pdata->ll_off; | ||
143 | dw->ll_region.sz = pdata->ll_sz; | ||
144 | |||
145 | dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; | ||
146 | dw->dt_region.vaddr += pdata->dt_off; | ||
147 | dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; | ||
148 | dw->dt_region.paddr += pdata->dt_off; | ||
149 | dw->dt_region.sz = pdata->dt_sz; | ||
150 | |||
151 | dw->version = pdata->version; | ||
152 | dw->mode = pdata->mode; | ||
153 | dw->nr_irqs = nr_irqs; | ||
154 | |||
155 | /* Debug info */ | ||
156 | pci_dbg(pdev, "Version:\t%u\n", dw->version); | ||
157 | |||
158 | pci_dbg(pdev, "Mode:\t%s\n", | ||
159 | dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); | ||
160 | |||
161 | pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", | ||
162 | pdata->rg_bar, pdata->rg_off, pdata->rg_sz, | ||
163 | &dw->rg_region.vaddr, &dw->rg_region.paddr); | ||
164 | |||
165 | pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", | ||
166 | pdata->ll_bar, pdata->ll_off, pdata->ll_sz, | ||
167 | &dw->ll_region.vaddr, &dw->ll_region.paddr); | ||
168 | |||
169 | pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", | ||
170 | pdata->dt_bar, pdata->dt_off, pdata->dt_sz, | ||
171 | &dw->dt_region.vaddr, &dw->dt_region.paddr); | ||
172 | |||
173 | pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); | ||
174 | |||
175 | /* Validating if PCI interrupts were enabled */ | ||
176 | if (!pci_dev_msi_enabled(pdev)) { | ||
177 | pci_err(pdev, "enable interrupt failed\n"); | ||
178 | return -EPERM; | ||
179 | } | ||
180 | |||
181 | dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL); | ||
182 | if (!dw->irq) | ||
183 | return -ENOMEM; | ||
184 | |||
185 | /* Starting eDMA driver */ | ||
186 | err = dw_edma_probe(chip); | ||
187 | if (err) { | ||
188 | pci_err(pdev, "eDMA probe failed\n"); | ||
189 | return err; | ||
190 | } | ||
191 | |||
192 | /* Saving data structure reference */ | ||
193 | pci_set_drvdata(pdev, chip); | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static void dw_edma_pcie_remove(struct pci_dev *pdev) | ||
199 | { | ||
200 | struct dw_edma_chip *chip = pci_get_drvdata(pdev); | ||
201 | int err; | ||
202 | |||
203 | /* Stopping eDMA driver */ | ||
204 | err = dw_edma_remove(chip); | ||
205 | if (err) | ||
206 | pci_warn(pdev, "can't remove device properly: %d\n", err); | ||
207 | |||
208 | /* Freeing IRQs */ | ||
209 | pci_free_irq_vectors(pdev); | ||
210 | } | ||
211 | |||
212 | static const struct pci_device_id dw_edma_pcie_id_table[] = { | ||
213 | { PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) }, | ||
214 | { } | ||
215 | }; | ||
216 | MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table); | ||
217 | |||
218 | static struct pci_driver dw_edma_pcie_driver = { | ||
219 | .name = "dw-edma-pcie", | ||
220 | .id_table = dw_edma_pcie_id_table, | ||
221 | .probe = dw_edma_pcie_probe, | ||
222 | .remove = dw_edma_pcie_remove, | ||
223 | }; | ||
224 | |||
225 | module_pci_driver(dw_edma_pcie_driver); | ||
226 | |||
227 | MODULE_LICENSE("GPL v2"); | ||
228 | MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver"); | ||
229 | MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>"); | ||
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c new file mode 100644 index 000000000000..8a3180ed49a6 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.c | |||
@@ -0,0 +1,354 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA v0 core | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/bitfield.h> | ||
10 | |||
11 | #include "dw-edma-core.h" | ||
12 | #include "dw-edma-v0-core.h" | ||
13 | #include "dw-edma-v0-regs.h" | ||
14 | #include "dw-edma-v0-debugfs.h" | ||
15 | |||
16 | enum dw_edma_control { | ||
17 | DW_EDMA_V0_CB = BIT(0), | ||
18 | DW_EDMA_V0_TCB = BIT(1), | ||
19 | DW_EDMA_V0_LLP = BIT(2), | ||
20 | DW_EDMA_V0_LIE = BIT(3), | ||
21 | DW_EDMA_V0_RIE = BIT(4), | ||
22 | DW_EDMA_V0_CCS = BIT(8), | ||
23 | DW_EDMA_V0_LLE = BIT(9), | ||
24 | }; | ||
25 | |||
26 | static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) | ||
27 | { | ||
28 | return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; | ||
29 | } | ||
30 | |||
31 | #define SET(dw, name, value) \ | ||
32 | writel(value, &(__dw_regs(dw)->name)) | ||
33 | |||
34 | #define GET(dw, name) \ | ||
35 | readl(&(__dw_regs(dw)->name)) | ||
36 | |||
37 | #define SET_RW(dw, dir, name, value) \ | ||
38 | do { \ | ||
39 | if ((dir) == EDMA_DIR_WRITE) \ | ||
40 | SET(dw, wr_##name, value); \ | ||
41 | else \ | ||
42 | SET(dw, rd_##name, value); \ | ||
43 | } while (0) | ||
44 | |||
45 | #define GET_RW(dw, dir, name) \ | ||
46 | ((dir) == EDMA_DIR_WRITE \ | ||
47 | ? GET(dw, wr_##name) \ | ||
48 | : GET(dw, rd_##name)) | ||
49 | |||
50 | #define SET_BOTH(dw, name, value) \ | ||
51 | do { \ | ||
52 | SET(dw, wr_##name, value); \ | ||
53 | SET(dw, rd_##name, value); \ | ||
54 | } while (0) | ||
55 | |||
56 | static inline struct dw_edma_v0_ch_regs __iomem * | ||
57 | __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch) | ||
58 | { | ||
59 | if (dw->mode == EDMA_MODE_LEGACY) | ||
60 | return &(__dw_regs(dw)->type.legacy.ch); | ||
61 | |||
62 | if (dir == EDMA_DIR_WRITE) | ||
63 | return &__dw_regs(dw)->type.unroll.ch[ch].wr; | ||
64 | |||
65 | return &__dw_regs(dw)->type.unroll.ch[ch].rd; | ||
66 | } | ||
67 | |||
68 | static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, | ||
69 | u32 value, void __iomem *addr) | ||
70 | { | ||
71 | if (dw->mode == EDMA_MODE_LEGACY) { | ||
72 | u32 viewport_sel; | ||
73 | unsigned long flags; | ||
74 | |||
75 | raw_spin_lock_irqsave(&dw->lock, flags); | ||
76 | |||
77 | viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); | ||
78 | if (dir == EDMA_DIR_READ) | ||
79 | viewport_sel |= BIT(31); | ||
80 | |||
81 | writel(viewport_sel, | ||
82 | &(__dw_regs(dw)->type.legacy.viewport_sel)); | ||
83 | writel(value, addr); | ||
84 | |||
85 | raw_spin_unlock_irqrestore(&dw->lock, flags); | ||
86 | } else { | ||
87 | writel(value, addr); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch, | ||
92 | const void __iomem *addr) | ||
93 | { | ||
94 | u32 value; | ||
95 | |||
96 | if (dw->mode == EDMA_MODE_LEGACY) { | ||
97 | u32 viewport_sel; | ||
98 | unsigned long flags; | ||
99 | |||
100 | raw_spin_lock_irqsave(&dw->lock, flags); | ||
101 | |||
102 | viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); | ||
103 | if (dir == EDMA_DIR_READ) | ||
104 | viewport_sel |= BIT(31); | ||
105 | |||
106 | writel(viewport_sel, | ||
107 | &(__dw_regs(dw)->type.legacy.viewport_sel)); | ||
108 | value = readl(addr); | ||
109 | |||
110 | raw_spin_unlock_irqrestore(&dw->lock, flags); | ||
111 | } else { | ||
112 | value = readl(addr); | ||
113 | } | ||
114 | |||
115 | return value; | ||
116 | } | ||
117 | |||
118 | #define SET_CH(dw, dir, ch, name, value) \ | ||
119 | writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name)) | ||
120 | |||
121 | #define GET_CH(dw, dir, ch, name) \ | ||
122 | readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name)) | ||
123 | |||
124 | #define SET_LL(ll, value) \ | ||
125 | writel(value, ll) | ||
126 | |||
127 | /* eDMA management callbacks */ | ||
128 | void dw_edma_v0_core_off(struct dw_edma *dw) | ||
129 | { | ||
130 | SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); | ||
131 | SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK); | ||
132 | SET_BOTH(dw, engine_en, 0); | ||
133 | } | ||
134 | |||
135 | u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir) | ||
136 | { | ||
137 | u32 num_ch; | ||
138 | |||
139 | if (dir == EDMA_DIR_WRITE) | ||
140 | num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl)); | ||
141 | else | ||
142 | num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl)); | ||
143 | |||
144 | if (num_ch > EDMA_V0_MAX_NR_CH) | ||
145 | num_ch = EDMA_V0_MAX_NR_CH; | ||
146 | |||
147 | return (u16)num_ch; | ||
148 | } | ||
149 | |||
150 | enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan) | ||
151 | { | ||
152 | struct dw_edma *dw = chan->chip->dw; | ||
153 | u32 tmp; | ||
154 | |||
155 | tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK, | ||
156 | GET_CH(dw, chan->dir, chan->id, ch_control1)); | ||
157 | |||
158 | if (tmp == 1) | ||
159 | return DMA_IN_PROGRESS; | ||
160 | else if (tmp == 3) | ||
161 | return DMA_COMPLETE; | ||
162 | else | ||
163 | return DMA_ERROR; | ||
164 | } | ||
165 | |||
166 | void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan) | ||
167 | { | ||
168 | struct dw_edma *dw = chan->chip->dw; | ||
169 | |||
170 | SET_RW(dw, chan->dir, int_clear, | ||
171 | FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id))); | ||
172 | } | ||
173 | |||
174 | void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan) | ||
175 | { | ||
176 | struct dw_edma *dw = chan->chip->dw; | ||
177 | |||
178 | SET_RW(dw, chan->dir, int_clear, | ||
179 | FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id))); | ||
180 | } | ||
181 | |||
182 | u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir) | ||
183 | { | ||
184 | return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status)); | ||
185 | } | ||
186 | |||
187 | u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir) | ||
188 | { | ||
189 | return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status)); | ||
190 | } | ||
191 | |||
192 | static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) | ||
193 | { | ||
194 | struct dw_edma_burst *child; | ||
195 | struct dw_edma_v0_lli *lli; | ||
196 | struct dw_edma_v0_llp *llp; | ||
197 | u32 control = 0, i = 0; | ||
198 | u64 sar, dar, addr; | ||
199 | int j; | ||
200 | |||
201 | lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; | ||
202 | |||
203 | if (chunk->cb) | ||
204 | control = DW_EDMA_V0_CB; | ||
205 | |||
206 | j = chunk->bursts_alloc; | ||
207 | list_for_each_entry(child, &chunk->burst->list, list) { | ||
208 | j--; | ||
209 | if (!j) | ||
210 | control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE); | ||
211 | |||
212 | /* Channel control */ | ||
213 | SET_LL(&lli[i].control, control); | ||
214 | /* Transfer size */ | ||
215 | SET_LL(&lli[i].transfer_size, child->sz); | ||
216 | /* SAR - low, high */ | ||
217 | sar = cpu_to_le64(child->sar); | ||
218 | SET_LL(&lli[i].sar_low, lower_32_bits(sar)); | ||
219 | SET_LL(&lli[i].sar_high, upper_32_bits(sar)); | ||
220 | /* DAR - low, high */ | ||
221 | dar = cpu_to_le64(child->dar); | ||
222 | SET_LL(&lli[i].dar_low, lower_32_bits(dar)); | ||
223 | SET_LL(&lli[i].dar_high, upper_32_bits(dar)); | ||
224 | i++; | ||
225 | } | ||
226 | |||
227 | llp = (struct dw_edma_v0_llp *)&lli[i]; | ||
228 | control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; | ||
229 | if (!chunk->cb) | ||
230 | control |= DW_EDMA_V0_CB; | ||
231 | |||
232 | /* Channel control */ | ||
233 | SET_LL(&llp->control, control); | ||
234 | /* Linked list - low, high */ | ||
235 | addr = cpu_to_le64(chunk->ll_region.paddr); | ||
236 | SET_LL(&llp->llp_low, lower_32_bits(addr)); | ||
237 | SET_LL(&llp->llp_high, upper_32_bits(addr)); | ||
238 | } | ||
239 | |||
240 | void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) | ||
241 | { | ||
242 | struct dw_edma_chan *chan = chunk->chan; | ||
243 | struct dw_edma *dw = chan->chip->dw; | ||
244 | u32 tmp; | ||
245 | u64 llp; | ||
246 | |||
247 | dw_edma_v0_core_write_chunk(chunk); | ||
248 | |||
249 | if (first) { | ||
250 | /* Enable engine */ | ||
251 | SET_RW(dw, chan->dir, engine_en, BIT(0)); | ||
252 | /* Interrupt unmask - done, abort */ | ||
253 | tmp = GET_RW(dw, chan->dir, int_mask); | ||
254 | tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)); | ||
255 | tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)); | ||
256 | SET_RW(dw, chan->dir, int_mask, tmp); | ||
257 | /* Linked list error */ | ||
258 | tmp = GET_RW(dw, chan->dir, linked_list_err_en); | ||
259 | tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id)); | ||
260 | SET_RW(dw, chan->dir, linked_list_err_en, tmp); | ||
261 | /* Channel control */ | ||
262 | SET_CH(dw, chan->dir, chan->id, ch_control1, | ||
263 | (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); | ||
264 | /* Linked list - low, high */ | ||
265 | llp = cpu_to_le64(chunk->ll_region.paddr); | ||
266 | SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); | ||
267 | SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); | ||
268 | } | ||
269 | /* Doorbell */ | ||
270 | SET_RW(dw, chan->dir, doorbell, | ||
271 | FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id)); | ||
272 | } | ||
273 | |||
274 | int dw_edma_v0_core_device_config(struct dw_edma_chan *chan) | ||
275 | { | ||
276 | struct dw_edma *dw = chan->chip->dw; | ||
277 | u32 tmp = 0; | ||
278 | |||
279 | /* MSI done addr - low, high */ | ||
280 | SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo); | ||
281 | SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi); | ||
282 | /* MSI abort addr - low, high */ | ||
283 | SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo); | ||
284 | SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi); | ||
285 | /* MSI data - low, high */ | ||
286 | switch (chan->id) { | ||
287 | case 0: | ||
288 | case 1: | ||
289 | tmp = GET_RW(dw, chan->dir, ch01_imwr_data); | ||
290 | break; | ||
291 | |||
292 | case 2: | ||
293 | case 3: | ||
294 | tmp = GET_RW(dw, chan->dir, ch23_imwr_data); | ||
295 | break; | ||
296 | |||
297 | case 4: | ||
298 | case 5: | ||
299 | tmp = GET_RW(dw, chan->dir, ch45_imwr_data); | ||
300 | break; | ||
301 | |||
302 | case 6: | ||
303 | case 7: | ||
304 | tmp = GET_RW(dw, chan->dir, ch67_imwr_data); | ||
305 | break; | ||
306 | } | ||
307 | |||
308 | if (chan->id & BIT(0)) { | ||
309 | /* Channel odd {1, 3, 5, 7} */ | ||
310 | tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK; | ||
311 | tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK, | ||
312 | chan->msi.data); | ||
313 | } else { | ||
314 | /* Channel even {0, 2, 4, 6} */ | ||
315 | tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK; | ||
316 | tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK, | ||
317 | chan->msi.data); | ||
318 | } | ||
319 | |||
320 | switch (chan->id) { | ||
321 | case 0: | ||
322 | case 1: | ||
323 | SET_RW(dw, chan->dir, ch01_imwr_data, tmp); | ||
324 | break; | ||
325 | |||
326 | case 2: | ||
327 | case 3: | ||
328 | SET_RW(dw, chan->dir, ch23_imwr_data, tmp); | ||
329 | break; | ||
330 | |||
331 | case 4: | ||
332 | case 5: | ||
333 | SET_RW(dw, chan->dir, ch45_imwr_data, tmp); | ||
334 | break; | ||
335 | |||
336 | case 6: | ||
337 | case 7: | ||
338 | SET_RW(dw, chan->dir, ch67_imwr_data, tmp); | ||
339 | break; | ||
340 | } | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | /* eDMA debugfs callbacks */ | ||
346 | void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip) | ||
347 | { | ||
348 | dw_edma_v0_debugfs_on(chip); | ||
349 | } | ||
350 | |||
351 | void dw_edma_v0_core_debugfs_off(void) | ||
352 | { | ||
353 | dw_edma_v0_debugfs_off(); | ||
354 | } | ||
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h new file mode 100644 index 000000000000..abae1527f1f9 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-core.h | |||
@@ -0,0 +1,28 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA v0 core | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _DW_EDMA_V0_CORE_H | ||
10 | #define _DW_EDMA_V0_CORE_H | ||
11 | |||
12 | #include <linux/dma/edma.h> | ||
13 | |||
14 | /* eDMA management callbacks */ | ||
15 | void dw_edma_v0_core_off(struct dw_edma *chan); | ||
16 | u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir); | ||
17 | enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan); | ||
18 | void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan); | ||
19 | void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan); | ||
20 | u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir); | ||
21 | u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir); | ||
22 | void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first); | ||
23 | int dw_edma_v0_core_device_config(struct dw_edma_chan *chan); | ||
24 | /* eDMA debug fs callbacks */ | ||
25 | void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip); | ||
26 | void dw_edma_v0_core_debugfs_off(void); | ||
27 | |||
28 | #endif /* _DW_EDMA_V0_CORE_H */ | ||
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c new file mode 100644 index 000000000000..3226f528cc11 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c | |||
@@ -0,0 +1,310 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA v0 core | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/bitfield.h> | ||
11 | |||
12 | #include "dw-edma-v0-debugfs.h" | ||
13 | #include "dw-edma-v0-regs.h" | ||
14 | #include "dw-edma-core.h" | ||
15 | |||
16 | #define REGS_ADDR(name) \ | ||
17 | ((dma_addr_t *)®s->name) | ||
18 | #define REGISTER(name) \ | ||
19 | { #name, REGS_ADDR(name) } | ||
20 | |||
21 | #define WR_REGISTER(name) \ | ||
22 | { #name, REGS_ADDR(wr_##name) } | ||
23 | #define RD_REGISTER(name) \ | ||
24 | { #name, REGS_ADDR(rd_##name) } | ||
25 | |||
26 | #define WR_REGISTER_LEGACY(name) \ | ||
27 | { #name, REGS_ADDR(type.legacy.wr_##name) } | ||
28 | #define RD_REGISTER_LEGACY(name) \ | ||
29 | { #name, REGS_ADDR(type.legacy.rd_##name) } | ||
30 | |||
31 | #define WR_REGISTER_UNROLL(name) \ | ||
32 | { #name, REGS_ADDR(type.unroll.wr_##name) } | ||
33 | #define RD_REGISTER_UNROLL(name) \ | ||
34 | { #name, REGS_ADDR(type.unroll.rd_##name) } | ||
35 | |||
36 | #define WRITE_STR "write" | ||
37 | #define READ_STR "read" | ||
38 | #define CHANNEL_STR "channel" | ||
39 | #define REGISTERS_STR "registers" | ||
40 | |||
41 | static struct dentry *base_dir; | ||
42 | static struct dw_edma *dw; | ||
43 | static struct dw_edma_v0_regs *regs; | ||
44 | |||
45 | static struct { | ||
46 | void *start; | ||
47 | void *end; | ||
48 | } lim[2][EDMA_V0_MAX_NR_CH]; | ||
49 | |||
50 | struct debugfs_entries { | ||
51 | char name[24]; | ||
52 | dma_addr_t *reg; | ||
53 | }; | ||
54 | |||
55 | static int dw_edma_debugfs_u32_get(void *data, u64 *val) | ||
56 | { | ||
57 | if (dw->mode == EDMA_MODE_LEGACY && | ||
58 | data >= (void *)®s->type.legacy.ch) { | ||
59 | void *ptr = (void *)®s->type.legacy.ch; | ||
60 | u32 viewport_sel = 0; | ||
61 | unsigned long flags; | ||
62 | u16 ch; | ||
63 | |||
64 | for (ch = 0; ch < dw->wr_ch_cnt; ch++) | ||
65 | if (lim[0][ch].start >= data && data < lim[0][ch].end) { | ||
66 | ptr += (data - lim[0][ch].start); | ||
67 | goto legacy_sel_wr; | ||
68 | } | ||
69 | |||
70 | for (ch = 0; ch < dw->rd_ch_cnt; ch++) | ||
71 | if (lim[1][ch].start >= data && data < lim[1][ch].end) { | ||
72 | ptr += (data - lim[1][ch].start); | ||
73 | goto legacy_sel_rd; | ||
74 | } | ||
75 | |||
76 | return 0; | ||
77 | legacy_sel_rd: | ||
78 | viewport_sel = BIT(31); | ||
79 | legacy_sel_wr: | ||
80 | viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch); | ||
81 | |||
82 | raw_spin_lock_irqsave(&dw->lock, flags); | ||
83 | |||
84 | writel(viewport_sel, ®s->type.legacy.viewport_sel); | ||
85 | *val = readl(ptr); | ||
86 | |||
87 | raw_spin_unlock_irqrestore(&dw->lock, flags); | ||
88 | } else { | ||
89 | *val = readl(data); | ||
90 | } | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n"); | ||
95 | |||
96 | static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[], | ||
97 | int nr_entries, struct dentry *dir) | ||
98 | { | ||
99 | int i; | ||
100 | |||
101 | for (i = 0; i < nr_entries; i++) { | ||
102 | if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir, | ||
103 | entries[i].reg, &fops_x32)) | ||
104 | break; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, | ||
109 | struct dentry *dir) | ||
110 | { | ||
111 | int nr_entries; | ||
112 | const struct debugfs_entries debugfs_regs[] = { | ||
113 | REGISTER(ch_control1), | ||
114 | REGISTER(ch_control2), | ||
115 | REGISTER(transfer_size), | ||
116 | REGISTER(sar_low), | ||
117 | REGISTER(sar_high), | ||
118 | REGISTER(dar_low), | ||
119 | REGISTER(dar_high), | ||
120 | REGISTER(llp_low), | ||
121 | REGISTER(llp_high), | ||
122 | }; | ||
123 | |||
124 | nr_entries = ARRAY_SIZE(debugfs_regs); | ||
125 | dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir); | ||
126 | } | ||
127 | |||
128 | static void dw_edma_debugfs_regs_wr(struct dentry *dir) | ||
129 | { | ||
130 | const struct debugfs_entries debugfs_regs[] = { | ||
131 | /* eDMA global registers */ | ||
132 | WR_REGISTER(engine_en), | ||
133 | WR_REGISTER(doorbell), | ||
134 | WR_REGISTER(ch_arb_weight_low), | ||
135 | WR_REGISTER(ch_arb_weight_high), | ||
136 | /* eDMA interrupts registers */ | ||
137 | WR_REGISTER(int_status), | ||
138 | WR_REGISTER(int_mask), | ||
139 | WR_REGISTER(int_clear), | ||
140 | WR_REGISTER(err_status), | ||
141 | WR_REGISTER(done_imwr_low), | ||
142 | WR_REGISTER(done_imwr_high), | ||
143 | WR_REGISTER(abort_imwr_low), | ||
144 | WR_REGISTER(abort_imwr_high), | ||
145 | WR_REGISTER(ch01_imwr_data), | ||
146 | WR_REGISTER(ch23_imwr_data), | ||
147 | WR_REGISTER(ch45_imwr_data), | ||
148 | WR_REGISTER(ch67_imwr_data), | ||
149 | WR_REGISTER(linked_list_err_en), | ||
150 | }; | ||
151 | const struct debugfs_entries debugfs_unroll_regs[] = { | ||
152 | /* eDMA channel context grouping */ | ||
153 | WR_REGISTER_UNROLL(engine_chgroup), | ||
154 | WR_REGISTER_UNROLL(engine_hshake_cnt_low), | ||
155 | WR_REGISTER_UNROLL(engine_hshake_cnt_high), | ||
156 | WR_REGISTER_UNROLL(ch0_pwr_en), | ||
157 | WR_REGISTER_UNROLL(ch1_pwr_en), | ||
158 | WR_REGISTER_UNROLL(ch2_pwr_en), | ||
159 | WR_REGISTER_UNROLL(ch3_pwr_en), | ||
160 | WR_REGISTER_UNROLL(ch4_pwr_en), | ||
161 | WR_REGISTER_UNROLL(ch5_pwr_en), | ||
162 | WR_REGISTER_UNROLL(ch6_pwr_en), | ||
163 | WR_REGISTER_UNROLL(ch7_pwr_en), | ||
164 | }; | ||
165 | struct dentry *regs_dir, *ch_dir; | ||
166 | int nr_entries, i; | ||
167 | char name[16]; | ||
168 | |||
169 | regs_dir = debugfs_create_dir(WRITE_STR, dir); | ||
170 | if (!regs_dir) | ||
171 | return; | ||
172 | |||
173 | nr_entries = ARRAY_SIZE(debugfs_regs); | ||
174 | dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); | ||
175 | |||
176 | if (dw->mode == EDMA_MODE_UNROLL) { | ||
177 | nr_entries = ARRAY_SIZE(debugfs_unroll_regs); | ||
178 | dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, | ||
179 | regs_dir); | ||
180 | } | ||
181 | |||
182 | for (i = 0; i < dw->wr_ch_cnt; i++) { | ||
183 | snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); | ||
184 | |||
185 | ch_dir = debugfs_create_dir(name, regs_dir); | ||
186 | if (!ch_dir) | ||
187 | return; | ||
188 | |||
189 | dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].wr, ch_dir); | ||
190 | |||
191 | lim[0][i].start = ®s->type.unroll.ch[i].wr; | ||
192 | lim[0][i].end = ®s->type.unroll.ch[i].padding_1[0]; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | static void dw_edma_debugfs_regs_rd(struct dentry *dir) | ||
197 | { | ||
198 | const struct debugfs_entries debugfs_regs[] = { | ||
199 | /* eDMA global registers */ | ||
200 | RD_REGISTER(engine_en), | ||
201 | RD_REGISTER(doorbell), | ||
202 | RD_REGISTER(ch_arb_weight_low), | ||
203 | RD_REGISTER(ch_arb_weight_high), | ||
204 | /* eDMA interrupts registers */ | ||
205 | RD_REGISTER(int_status), | ||
206 | RD_REGISTER(int_mask), | ||
207 | RD_REGISTER(int_clear), | ||
208 | RD_REGISTER(err_status_low), | ||
209 | RD_REGISTER(err_status_high), | ||
210 | RD_REGISTER(linked_list_err_en), | ||
211 | RD_REGISTER(done_imwr_low), | ||
212 | RD_REGISTER(done_imwr_high), | ||
213 | RD_REGISTER(abort_imwr_low), | ||
214 | RD_REGISTER(abort_imwr_high), | ||
215 | RD_REGISTER(ch01_imwr_data), | ||
216 | RD_REGISTER(ch23_imwr_data), | ||
217 | RD_REGISTER(ch45_imwr_data), | ||
218 | RD_REGISTER(ch67_imwr_data), | ||
219 | }; | ||
220 | const struct debugfs_entries debugfs_unroll_regs[] = { | ||
221 | /* eDMA channel context grouping */ | ||
222 | RD_REGISTER_UNROLL(engine_chgroup), | ||
223 | RD_REGISTER_UNROLL(engine_hshake_cnt_low), | ||
224 | RD_REGISTER_UNROLL(engine_hshake_cnt_high), | ||
225 | RD_REGISTER_UNROLL(ch0_pwr_en), | ||
226 | RD_REGISTER_UNROLL(ch1_pwr_en), | ||
227 | RD_REGISTER_UNROLL(ch2_pwr_en), | ||
228 | RD_REGISTER_UNROLL(ch3_pwr_en), | ||
229 | RD_REGISTER_UNROLL(ch4_pwr_en), | ||
230 | RD_REGISTER_UNROLL(ch5_pwr_en), | ||
231 | RD_REGISTER_UNROLL(ch6_pwr_en), | ||
232 | RD_REGISTER_UNROLL(ch7_pwr_en), | ||
233 | }; | ||
234 | struct dentry *regs_dir, *ch_dir; | ||
235 | int nr_entries, i; | ||
236 | char name[16]; | ||
237 | |||
238 | regs_dir = debugfs_create_dir(READ_STR, dir); | ||
239 | if (!regs_dir) | ||
240 | return; | ||
241 | |||
242 | nr_entries = ARRAY_SIZE(debugfs_regs); | ||
243 | dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); | ||
244 | |||
245 | if (dw->mode == EDMA_MODE_UNROLL) { | ||
246 | nr_entries = ARRAY_SIZE(debugfs_unroll_regs); | ||
247 | dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries, | ||
248 | regs_dir); | ||
249 | } | ||
250 | |||
251 | for (i = 0; i < dw->rd_ch_cnt; i++) { | ||
252 | snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i); | ||
253 | |||
254 | ch_dir = debugfs_create_dir(name, regs_dir); | ||
255 | if (!ch_dir) | ||
256 | return; | ||
257 | |||
258 | dw_edma_debugfs_regs_ch(®s->type.unroll.ch[i].rd, ch_dir); | ||
259 | |||
260 | lim[1][i].start = ®s->type.unroll.ch[i].rd; | ||
261 | lim[1][i].end = ®s->type.unroll.ch[i].padding_2[0]; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | static void dw_edma_debugfs_regs(void) | ||
266 | { | ||
267 | const struct debugfs_entries debugfs_regs[] = { | ||
268 | REGISTER(ctrl_data_arb_prior), | ||
269 | REGISTER(ctrl), | ||
270 | }; | ||
271 | struct dentry *regs_dir; | ||
272 | int nr_entries; | ||
273 | |||
274 | regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir); | ||
275 | if (!regs_dir) | ||
276 | return; | ||
277 | |||
278 | nr_entries = ARRAY_SIZE(debugfs_regs); | ||
279 | dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir); | ||
280 | |||
281 | dw_edma_debugfs_regs_wr(regs_dir); | ||
282 | dw_edma_debugfs_regs_rd(regs_dir); | ||
283 | } | ||
284 | |||
285 | void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) | ||
286 | { | ||
287 | dw = chip->dw; | ||
288 | if (!dw) | ||
289 | return; | ||
290 | |||
291 | regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; | ||
292 | if (!regs) | ||
293 | return; | ||
294 | |||
295 | base_dir = debugfs_create_dir(dw->name, 0); | ||
296 | if (!base_dir) | ||
297 | return; | ||
298 | |||
299 | debugfs_create_u32("version", 0444, base_dir, &dw->version); | ||
300 | debugfs_create_u32("mode", 0444, base_dir, &dw->mode); | ||
301 | debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt); | ||
302 | debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt); | ||
303 | |||
304 | dw_edma_debugfs_regs(); | ||
305 | } | ||
306 | |||
307 | void dw_edma_v0_debugfs_off(void) | ||
308 | { | ||
309 | debugfs_remove_recursive(base_dir); | ||
310 | } | ||
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h new file mode 100644 index 000000000000..5450a0a94193 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA v0 core | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _DW_EDMA_V0_DEBUG_FS_H | ||
10 | #define _DW_EDMA_V0_DEBUG_FS_H | ||
11 | |||
12 | #include <linux/dma/edma.h> | ||
13 | |||
14 | #ifdef CONFIG_DEBUG_FS | ||
15 | void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip); | ||
16 | void dw_edma_v0_debugfs_off(void); | ||
17 | #else | ||
18 | static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip) | ||
19 | { | ||
20 | } | ||
21 | |||
22 | static inline void dw_edma_v0_debugfs_off(void) | ||
23 | { | ||
24 | } | ||
25 | #endif /* CONFIG_DEBUG_FS */ | ||
26 | |||
27 | #endif /* _DW_EDMA_V0_DEBUG_FS_H */ | ||
diff --git a/drivers/dma/dw-edma/dw-edma-v0-regs.h b/drivers/dma/dw-edma/dw-edma-v0-regs.h new file mode 100644 index 000000000000..cd6476884507 --- /dev/null +++ b/drivers/dma/dw-edma/dw-edma-v0-regs.h | |||
@@ -0,0 +1,158 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA v0 core | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _DW_EDMA_V0_REGS_H | ||
10 | #define _DW_EDMA_V0_REGS_H | ||
11 | |||
12 | #include <linux/dmaengine.h> | ||
13 | |||
14 | #define EDMA_V0_MAX_NR_CH 8 | ||
15 | #define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0) | ||
16 | #define EDMA_V0_DONE_INT_MASK GENMASK(7, 0) | ||
17 | #define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16) | ||
18 | #define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0) | ||
19 | #define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16) | ||
20 | #define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5) | ||
21 | #define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0) | ||
22 | #define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0) | ||
23 | |||
24 | #define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16) | ||
25 | #define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0) | ||
26 | |||
27 | struct dw_edma_v0_ch_regs { | ||
28 | u32 ch_control1; /* 0x000 */ | ||
29 | u32 ch_control2; /* 0x004 */ | ||
30 | u32 transfer_size; /* 0x008 */ | ||
31 | u32 sar_low; /* 0x00c */ | ||
32 | u32 sar_high; /* 0x010 */ | ||
33 | u32 dar_low; /* 0x014 */ | ||
34 | u32 dar_high; /* 0x018 */ | ||
35 | u32 llp_low; /* 0x01c */ | ||
36 | u32 llp_high; /* 0x020 */ | ||
37 | }; | ||
38 | |||
39 | struct dw_edma_v0_ch { | ||
40 | struct dw_edma_v0_ch_regs wr; /* 0x200 */ | ||
41 | u32 padding_1[55]; /* [0x224..0x2fc] */ | ||
42 | struct dw_edma_v0_ch_regs rd; /* 0x300 */ | ||
43 | u32 padding_2[55]; /* [0x224..0x2fc] */ | ||
44 | }; | ||
45 | |||
46 | struct dw_edma_v0_unroll { | ||
47 | u32 padding_1; /* 0x0f8 */ | ||
48 | u32 wr_engine_chgroup; /* 0x100 */ | ||
49 | u32 rd_engine_chgroup; /* 0x104 */ | ||
50 | u32 wr_engine_hshake_cnt_low; /* 0x108 */ | ||
51 | u32 wr_engine_hshake_cnt_high; /* 0x10c */ | ||
52 | u32 padding_2[2]; /* [0x110..0x114] */ | ||
53 | u32 rd_engine_hshake_cnt_low; /* 0x118 */ | ||
54 | u32 rd_engine_hshake_cnt_high; /* 0x11c */ | ||
55 | u32 padding_3[2]; /* [0x120..0x124] */ | ||
56 | u32 wr_ch0_pwr_en; /* 0x128 */ | ||
57 | u32 wr_ch1_pwr_en; /* 0x12c */ | ||
58 | u32 wr_ch2_pwr_en; /* 0x130 */ | ||
59 | u32 wr_ch3_pwr_en; /* 0x134 */ | ||
60 | u32 wr_ch4_pwr_en; /* 0x138 */ | ||
61 | u32 wr_ch5_pwr_en; /* 0x13c */ | ||
62 | u32 wr_ch6_pwr_en; /* 0x140 */ | ||
63 | u32 wr_ch7_pwr_en; /* 0x144 */ | ||
64 | u32 padding_4[8]; /* [0x148..0x164] */ | ||
65 | u32 rd_ch0_pwr_en; /* 0x168 */ | ||
66 | u32 rd_ch1_pwr_en; /* 0x16c */ | ||
67 | u32 rd_ch2_pwr_en; /* 0x170 */ | ||
68 | u32 rd_ch3_pwr_en; /* 0x174 */ | ||
69 | u32 rd_ch4_pwr_en; /* 0x178 */ | ||
70 | u32 rd_ch5_pwr_en; /* 0x18c */ | ||
71 | u32 rd_ch6_pwr_en; /* 0x180 */ | ||
72 | u32 rd_ch7_pwr_en; /* 0x184 */ | ||
73 | u32 padding_5[30]; /* [0x188..0x1fc] */ | ||
74 | struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */ | ||
75 | }; | ||
76 | |||
77 | struct dw_edma_v0_legacy { | ||
78 | u32 viewport_sel; /* 0x0f8 */ | ||
79 | struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */ | ||
80 | }; | ||
81 | |||
82 | struct dw_edma_v0_regs { | ||
83 | /* eDMA global registers */ | ||
84 | u32 ctrl_data_arb_prior; /* 0x000 */ | ||
85 | u32 padding_1; /* 0x004 */ | ||
86 | u32 ctrl; /* 0x008 */ | ||
87 | u32 wr_engine_en; /* 0x00c */ | ||
88 | u32 wr_doorbell; /* 0x010 */ | ||
89 | u32 padding_2; /* 0x014 */ | ||
90 | u32 wr_ch_arb_weight_low; /* 0x018 */ | ||
91 | u32 wr_ch_arb_weight_high; /* 0x01c */ | ||
92 | u32 padding_3[3]; /* [0x020..0x028] */ | ||
93 | u32 rd_engine_en; /* 0x02c */ | ||
94 | u32 rd_doorbell; /* 0x030 */ | ||
95 | u32 padding_4; /* 0x034 */ | ||
96 | u32 rd_ch_arb_weight_low; /* 0x038 */ | ||
97 | u32 rd_ch_arb_weight_high; /* 0x03c */ | ||
98 | u32 padding_5[3]; /* [0x040..0x048] */ | ||
99 | /* eDMA interrupts registers */ | ||
100 | u32 wr_int_status; /* 0x04c */ | ||
101 | u32 padding_6; /* 0x050 */ | ||
102 | u32 wr_int_mask; /* 0x054 */ | ||
103 | u32 wr_int_clear; /* 0x058 */ | ||
104 | u32 wr_err_status; /* 0x05c */ | ||
105 | u32 wr_done_imwr_low; /* 0x060 */ | ||
106 | u32 wr_done_imwr_high; /* 0x064 */ | ||
107 | u32 wr_abort_imwr_low; /* 0x068 */ | ||
108 | u32 wr_abort_imwr_high; /* 0x06c */ | ||
109 | u32 wr_ch01_imwr_data; /* 0x070 */ | ||
110 | u32 wr_ch23_imwr_data; /* 0x074 */ | ||
111 | u32 wr_ch45_imwr_data; /* 0x078 */ | ||
112 | u32 wr_ch67_imwr_data; /* 0x07c */ | ||
113 | u32 padding_7[4]; /* [0x080..0x08c] */ | ||
114 | u32 wr_linked_list_err_en; /* 0x090 */ | ||
115 | u32 padding_8[3]; /* [0x094..0x09c] */ | ||
116 | u32 rd_int_status; /* 0x0a0 */ | ||
117 | u32 padding_9; /* 0x0a4 */ | ||
118 | u32 rd_int_mask; /* 0x0a8 */ | ||
119 | u32 rd_int_clear; /* 0x0ac */ | ||
120 | u32 padding_10; /* 0x0b0 */ | ||
121 | u32 rd_err_status_low; /* 0x0b4 */ | ||
122 | u32 rd_err_status_high; /* 0x0b8 */ | ||
123 | u32 padding_11[2]; /* [0x0bc..0x0c0] */ | ||
124 | u32 rd_linked_list_err_en; /* 0x0c4 */ | ||
125 | u32 padding_12; /* 0x0c8 */ | ||
126 | u32 rd_done_imwr_low; /* 0x0cc */ | ||
127 | u32 rd_done_imwr_high; /* 0x0d0 */ | ||
128 | u32 rd_abort_imwr_low; /* 0x0d4 */ | ||
129 | u32 rd_abort_imwr_high; /* 0x0d8 */ | ||
130 | u32 rd_ch01_imwr_data; /* 0x0dc */ | ||
131 | u32 rd_ch23_imwr_data; /* 0x0e0 */ | ||
132 | u32 rd_ch45_imwr_data; /* 0x0e4 */ | ||
133 | u32 rd_ch67_imwr_data; /* 0x0e8 */ | ||
134 | u32 padding_13[4]; /* [0x0ec..0x0f8] */ | ||
135 | /* eDMA channel context grouping */ | ||
136 | union dw_edma_v0_type { | ||
137 | struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */ | ||
138 | struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */ | ||
139 | } type; | ||
140 | }; | ||
141 | |||
142 | struct dw_edma_v0_lli { | ||
143 | u32 control; | ||
144 | u32 transfer_size; | ||
145 | u32 sar_low; | ||
146 | u32 sar_high; | ||
147 | u32 dar_low; | ||
148 | u32 dar_high; | ||
149 | }; | ||
150 | |||
151 | struct dw_edma_v0_llp { | ||
152 | u32 control; | ||
153 | u32 reserved; | ||
154 | u32 llp_low; | ||
155 | u32 llp_high; | ||
156 | }; | ||
157 | |||
158 | #endif /* _DW_EDMA_V0_REGS_H */ | ||
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index e79a75db0852..8de87b15a988 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c | |||
@@ -15,10 +15,13 @@ | |||
15 | struct dw_dma_pci_data { | 15 | struct dw_dma_pci_data { |
16 | const struct dw_dma_platform_data *pdata; | 16 | const struct dw_dma_platform_data *pdata; |
17 | int (*probe)(struct dw_dma_chip *chip); | 17 | int (*probe)(struct dw_dma_chip *chip); |
18 | int (*remove)(struct dw_dma_chip *chip); | ||
19 | struct dw_dma_chip *chip; | ||
18 | }; | 20 | }; |
19 | 21 | ||
20 | static const struct dw_dma_pci_data dw_pci_data = { | 22 | static const struct dw_dma_pci_data dw_pci_data = { |
21 | .probe = dw_dma_probe, | 23 | .probe = dw_dma_probe, |
24 | .remove = dw_dma_remove, | ||
22 | }; | 25 | }; |
23 | 26 | ||
24 | static const struct dw_dma_platform_data idma32_pdata = { | 27 | static const struct dw_dma_platform_data idma32_pdata = { |
@@ -34,11 +37,13 @@ static const struct dw_dma_platform_data idma32_pdata = { | |||
34 | static const struct dw_dma_pci_data idma32_pci_data = { | 37 | static const struct dw_dma_pci_data idma32_pci_data = { |
35 | .pdata = &idma32_pdata, | 38 | .pdata = &idma32_pdata, |
36 | .probe = idma32_dma_probe, | 39 | .probe = idma32_dma_probe, |
40 | .remove = idma32_dma_remove, | ||
37 | }; | 41 | }; |
38 | 42 | ||
39 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | 43 | static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) |
40 | { | 44 | { |
41 | const struct dw_dma_pci_data *data = (void *)pid->driver_data; | 45 | const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data; |
46 | struct dw_dma_pci_data *data; | ||
42 | struct dw_dma_chip *chip; | 47 | struct dw_dma_chip *chip; |
43 | int ret; | 48 | int ret; |
44 | 49 | ||
@@ -63,6 +68,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
63 | if (ret) | 68 | if (ret) |
64 | return ret; | 69 | return ret; |
65 | 70 | ||
71 | data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL); | ||
72 | if (!data) | ||
73 | return -ENOMEM; | ||
74 | |||
66 | chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); | 75 | chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); |
67 | if (!chip) | 76 | if (!chip) |
68 | return -ENOMEM; | 77 | return -ENOMEM; |
@@ -73,21 +82,24 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
73 | chip->irq = pdev->irq; | 82 | chip->irq = pdev->irq; |
74 | chip->pdata = data->pdata; | 83 | chip->pdata = data->pdata; |
75 | 84 | ||
85 | data->chip = chip; | ||
86 | |||
76 | ret = data->probe(chip); | 87 | ret = data->probe(chip); |
77 | if (ret) | 88 | if (ret) |
78 | return ret; | 89 | return ret; |
79 | 90 | ||
80 | pci_set_drvdata(pdev, chip); | 91 | pci_set_drvdata(pdev, data); |
81 | 92 | ||
82 | return 0; | 93 | return 0; |
83 | } | 94 | } |
84 | 95 | ||
85 | static void dw_pci_remove(struct pci_dev *pdev) | 96 | static void dw_pci_remove(struct pci_dev *pdev) |
86 | { | 97 | { |
87 | struct dw_dma_chip *chip = pci_get_drvdata(pdev); | 98 | struct dw_dma_pci_data *data = pci_get_drvdata(pdev); |
99 | struct dw_dma_chip *chip = data->chip; | ||
88 | int ret; | 100 | int ret; |
89 | 101 | ||
90 | ret = dw_dma_remove(chip); | 102 | ret = data->remove(chip); |
91 | if (ret) | 103 | if (ret) |
92 | dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); | 104 | dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret); |
93 | } | 105 | } |
@@ -96,16 +108,16 @@ static void dw_pci_remove(struct pci_dev *pdev) | |||
96 | 108 | ||
97 | static int dw_pci_suspend_late(struct device *dev) | 109 | static int dw_pci_suspend_late(struct device *dev) |
98 | { | 110 | { |
99 | struct pci_dev *pci = to_pci_dev(dev); | 111 | struct dw_dma_pci_data *data = dev_get_drvdata(dev); |
100 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 112 | struct dw_dma_chip *chip = data->chip; |
101 | 113 | ||
102 | return do_dw_dma_disable(chip); | 114 | return do_dw_dma_disable(chip); |
103 | }; | 115 | }; |
104 | 116 | ||
105 | static int dw_pci_resume_early(struct device *dev) | 117 | static int dw_pci_resume_early(struct device *dev) |
106 | { | 118 | { |
107 | struct pci_dev *pci = to_pci_dev(dev); | 119 | struct dw_dma_pci_data *data = dev_get_drvdata(dev); |
108 | struct dw_dma_chip *chip = pci_get_drvdata(pci); | 120 | struct dw_dma_chip *chip = data->chip; |
109 | 121 | ||
110 | return do_dw_dma_enable(chip); | 122 | return do_dw_dma_enable(chip); |
111 | }; | 123 | }; |
@@ -131,6 +143,11 @@ static const struct pci_device_id dw_pci_id_table[] = { | |||
131 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, | 143 | { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data }, |
132 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, | 144 | { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data }, |
133 | 145 | ||
146 | /* Elkhart Lake iDMA 32-bit (OSE DMA) */ | ||
147 | { PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data }, | ||
148 | { PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data }, | ||
149 | { PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data }, | ||
150 | |||
134 | /* Haswell */ | 151 | /* Haswell */ |
135 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data }, | 152 | { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data }, |
136 | 153 | ||
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index 680b2a00a953..44d92c34dec3 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c | |||
@@ -47,7 +47,7 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) | |||
47 | struct edma_regs *regs = &fsl_chan->edma->regs; | 47 | struct edma_regs *regs = &fsl_chan->edma->regs; |
48 | u32 ch = fsl_chan->vchan.chan.chan_id; | 48 | u32 ch = fsl_chan->vchan.chan.chan_id; |
49 | 49 | ||
50 | if (fsl_chan->edma->version == v1) { | 50 | if (fsl_chan->edma->drvdata->version == v1) { |
51 | edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); | 51 | edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); |
52 | edma_writeb(fsl_chan->edma, ch, regs->serq); | 52 | edma_writeb(fsl_chan->edma, ch, regs->serq); |
53 | } else { | 53 | } else { |
@@ -64,7 +64,7 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) | |||
64 | struct edma_regs *regs = &fsl_chan->edma->regs; | 64 | struct edma_regs *regs = &fsl_chan->edma->regs; |
65 | u32 ch = fsl_chan->vchan.chan.chan_id; | 65 | u32 ch = fsl_chan->vchan.chan.chan_id; |
66 | 66 | ||
67 | if (fsl_chan->edma->version == v1) { | 67 | if (fsl_chan->edma->drvdata->version == v1) { |
68 | edma_writeb(fsl_chan->edma, ch, regs->cerq); | 68 | edma_writeb(fsl_chan->edma, ch, regs->cerq); |
69 | edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); | 69 | edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); |
70 | } else { | 70 | } else { |
@@ -77,22 +77,33 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) | |||
77 | } | 77 | } |
78 | EXPORT_SYMBOL_GPL(fsl_edma_disable_request); | 78 | EXPORT_SYMBOL_GPL(fsl_edma_disable_request); |
79 | 79 | ||
80 | static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr, | ||
81 | u32 off, u32 slot, bool enable) | ||
82 | { | ||
83 | u8 val8; | ||
84 | |||
85 | if (enable) | ||
86 | val8 = EDMAMUX_CHCFG_ENBL | slot; | ||
87 | else | ||
88 | val8 = EDMAMUX_CHCFG_DIS; | ||
89 | |||
90 | iowrite8(val8, addr + off); | ||
91 | } | ||
92 | |||
80 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, | 93 | void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, |
81 | unsigned int slot, bool enable) | 94 | unsigned int slot, bool enable) |
82 | { | 95 | { |
83 | u32 ch = fsl_chan->vchan.chan.chan_id; | 96 | u32 ch = fsl_chan->vchan.chan.chan_id; |
84 | void __iomem *muxaddr; | 97 | void __iomem *muxaddr; |
85 | unsigned int chans_per_mux, ch_off; | 98 | unsigned int chans_per_mux, ch_off; |
99 | u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; | ||
86 | 100 | ||
87 | chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; | 101 | chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; |
88 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; | 102 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; |
89 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; | 103 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; |
90 | slot = EDMAMUX_CHCFG_SOURCE(slot); | 104 | slot = EDMAMUX_CHCFG_SOURCE(slot); |
91 | 105 | ||
92 | if (enable) | 106 | mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable); |
93 | iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); | ||
94 | else | ||
95 | iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); | ||
96 | } | 107 | } |
97 | EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); | 108 | EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); |
98 | 109 | ||
@@ -647,28 +658,28 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma) | |||
647 | edma->regs.erql = edma->membase + EDMA_ERQ; | 658 | edma->regs.erql = edma->membase + EDMA_ERQ; |
648 | edma->regs.eeil = edma->membase + EDMA_EEI; | 659 | edma->regs.eeil = edma->membase + EDMA_EEI; |
649 | 660 | ||
650 | edma->regs.serq = edma->membase + ((edma->version == v1) ? | 661 | edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ? |
651 | EDMA_SERQ : EDMA64_SERQ); | 662 | EDMA64_SERQ : EDMA_SERQ); |
652 | edma->regs.cerq = edma->membase + ((edma->version == v1) ? | 663 | edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ? |
653 | EDMA_CERQ : EDMA64_CERQ); | 664 | EDMA64_CERQ : EDMA_CERQ); |
654 | edma->regs.seei = edma->membase + ((edma->version == v1) ? | 665 | edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ? |
655 | EDMA_SEEI : EDMA64_SEEI); | 666 | EDMA64_SEEI : EDMA_SEEI); |
656 | edma->regs.ceei = edma->membase + ((edma->version == v1) ? | 667 | edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ? |
657 | EDMA_CEEI : EDMA64_CEEI); | 668 | EDMA64_CEEI : EDMA_CEEI); |
658 | edma->regs.cint = edma->membase + ((edma->version == v1) ? | 669 | edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ? |
659 | EDMA_CINT : EDMA64_CINT); | 670 | EDMA64_CINT : EDMA_CINT); |
660 | edma->regs.cerr = edma->membase + ((edma->version == v1) ? | 671 | edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ? |
661 | EDMA_CERR : EDMA64_CERR); | 672 | EDMA64_CERR : EDMA_CERR); |
662 | edma->regs.ssrt = edma->membase + ((edma->version == v1) ? | 673 | edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ? |
663 | EDMA_SSRT : EDMA64_SSRT); | 674 | EDMA64_SSRT : EDMA_SSRT); |
664 | edma->regs.cdne = edma->membase + ((edma->version == v1) ? | 675 | edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ? |
665 | EDMA_CDNE : EDMA64_CDNE); | 676 | EDMA64_CDNE : EDMA_CDNE); |
666 | edma->regs.intl = edma->membase + ((edma->version == v1) ? | 677 | edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ? |
667 | EDMA_INTR : EDMA64_INTL); | 678 | EDMA64_INTL : EDMA_INTR); |
668 | edma->regs.errl = edma->membase + ((edma->version == v1) ? | 679 | edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ? |
669 | EDMA_ERR : EDMA64_ERRL); | 680 | EDMA64_ERRL : EDMA_ERR); |
670 | 681 | ||
671 | if (edma->version == v2) { | 682 | if (edma->drvdata->version == v2) { |
672 | edma->regs.erqh = edma->membase + EDMA64_ERQH; | 683 | edma->regs.erqh = edma->membase + EDMA64_ERQH; |
673 | edma->regs.eeih = edma->membase + EDMA64_EEIH; | 684 | edma->regs.eeih = edma->membase + EDMA64_EEIH; |
674 | edma->regs.errh = edma->membase + EDMA64_ERRH; | 685 | edma->regs.errh = edma->membase + EDMA64_ERRH; |
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index c53f76eeb4d3..4e175560292c 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #define _FSL_EDMA_COMMON_H_ | 7 | #define _FSL_EDMA_COMMON_H_ |
8 | 8 | ||
9 | #include <linux/dma-direction.h> | 9 | #include <linux/dma-direction.h> |
10 | #include <linux/platform_device.h> | ||
10 | #include "virt-dma.h" | 11 | #include "virt-dma.h" |
11 | 12 | ||
12 | #define EDMA_CR_EDBG BIT(1) | 13 | #define EDMA_CR_EDBG BIT(1) |
@@ -140,17 +141,24 @@ enum edma_version { | |||
140 | v2, /* 64ch Coldfire */ | 141 | v2, /* 64ch Coldfire */ |
141 | }; | 142 | }; |
142 | 143 | ||
144 | struct fsl_edma_drvdata { | ||
145 | enum edma_version version; | ||
146 | u32 dmamuxs; | ||
147 | int (*setup_irq)(struct platform_device *pdev, | ||
148 | struct fsl_edma_engine *fsl_edma); | ||
149 | }; | ||
150 | |||
143 | struct fsl_edma_engine { | 151 | struct fsl_edma_engine { |
144 | struct dma_device dma_dev; | 152 | struct dma_device dma_dev; |
145 | void __iomem *membase; | 153 | void __iomem *membase; |
146 | void __iomem *muxbase[DMAMUX_NR]; | 154 | void __iomem *muxbase[DMAMUX_NR]; |
147 | struct clk *muxclk[DMAMUX_NR]; | 155 | struct clk *muxclk[DMAMUX_NR]; |
148 | struct mutex fsl_edma_mutex; | 156 | struct mutex fsl_edma_mutex; |
157 | const struct fsl_edma_drvdata *drvdata; | ||
149 | u32 n_chans; | 158 | u32 n_chans; |
150 | int txirq; | 159 | int txirq; |
151 | int errirq; | 160 | int errirq; |
152 | bool big_endian; | 161 | bool big_endian; |
153 | enum edma_version version; | ||
154 | struct edma_regs regs; | 162 | struct edma_regs regs; |
155 | struct fsl_edma_chan chans[]; | 163 | struct fsl_edma_chan chans[]; |
156 | }; | 164 | }; |
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 0ddad3adb761..fcbad6ae954a 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
@@ -92,7 +92,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, | |||
92 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; | 92 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; |
93 | struct dma_chan *chan, *_chan; | 93 | struct dma_chan *chan, *_chan; |
94 | struct fsl_edma_chan *fsl_chan; | 94 | struct fsl_edma_chan *fsl_chan; |
95 | unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; | 95 | u32 dmamux_nr = fsl_edma->drvdata->dmamuxs; |
96 | unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr; | ||
96 | 97 | ||
97 | if (dma_spec->args_count != 2) | 98 | if (dma_spec->args_count != 2) |
98 | return NULL; | 99 | return NULL; |
@@ -180,16 +181,38 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) | |||
180 | clk_disable_unprepare(fsl_edma->muxclk[i]); | 181 | clk_disable_unprepare(fsl_edma->muxclk[i]); |
181 | } | 182 | } |
182 | 183 | ||
184 | static struct fsl_edma_drvdata vf610_data = { | ||
185 | .version = v1, | ||
186 | .dmamuxs = DMAMUX_NR, | ||
187 | .setup_irq = fsl_edma_irq_init, | ||
188 | }; | ||
189 | |||
190 | static const struct of_device_id fsl_edma_dt_ids[] = { | ||
191 | { .compatible = "fsl,vf610-edma", .data = &vf610_data}, | ||
192 | { /* sentinel */ } | ||
193 | }; | ||
194 | MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); | ||
195 | |||
183 | static int fsl_edma_probe(struct platform_device *pdev) | 196 | static int fsl_edma_probe(struct platform_device *pdev) |
184 | { | 197 | { |
198 | const struct of_device_id *of_id = | ||
199 | of_match_device(fsl_edma_dt_ids, &pdev->dev); | ||
185 | struct device_node *np = pdev->dev.of_node; | 200 | struct device_node *np = pdev->dev.of_node; |
186 | struct fsl_edma_engine *fsl_edma; | 201 | struct fsl_edma_engine *fsl_edma; |
202 | const struct fsl_edma_drvdata *drvdata = NULL; | ||
187 | struct fsl_edma_chan *fsl_chan; | 203 | struct fsl_edma_chan *fsl_chan; |
188 | struct edma_regs *regs; | 204 | struct edma_regs *regs; |
189 | struct resource *res; | 205 | struct resource *res; |
190 | int len, chans; | 206 | int len, chans; |
191 | int ret, i; | 207 | int ret, i; |
192 | 208 | ||
209 | if (of_id) | ||
210 | drvdata = of_id->data; | ||
211 | if (!drvdata) { | ||
212 | dev_err(&pdev->dev, "unable to find driver data\n"); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | |||
193 | ret = of_property_read_u32(np, "dma-channels", &chans); | 216 | ret = of_property_read_u32(np, "dma-channels", &chans); |
194 | if (ret) { | 217 | if (ret) { |
195 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | 218 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); |
@@ -201,7 +224,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
201 | if (!fsl_edma) | 224 | if (!fsl_edma) |
202 | return -ENOMEM; | 225 | return -ENOMEM; |
203 | 226 | ||
204 | fsl_edma->version = v1; | 227 | fsl_edma->drvdata = drvdata; |
205 | fsl_edma->n_chans = chans; | 228 | fsl_edma->n_chans = chans; |
206 | mutex_init(&fsl_edma->fsl_edma_mutex); | 229 | mutex_init(&fsl_edma->fsl_edma_mutex); |
207 | 230 | ||
@@ -213,7 +236,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
213 | fsl_edma_setup_regs(fsl_edma); | 236 | fsl_edma_setup_regs(fsl_edma); |
214 | regs = &fsl_edma->regs; | 237 | regs = &fsl_edma->regs; |
215 | 238 | ||
216 | for (i = 0; i < DMAMUX_NR; i++) { | 239 | for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) { |
217 | char clkname[32]; | 240 | char clkname[32]; |
218 | 241 | ||
219 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); | 242 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); |
@@ -259,7 +282,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
259 | } | 282 | } |
260 | 283 | ||
261 | edma_writel(fsl_edma, ~0, regs->intl); | 284 | edma_writel(fsl_edma, ~0, regs->intl); |
262 | ret = fsl_edma_irq_init(pdev, fsl_edma); | 285 | ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); |
263 | if (ret) | 286 | if (ret) |
264 | return ret; | 287 | return ret; |
265 | 288 | ||
@@ -291,7 +314,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
291 | if (ret) { | 314 | if (ret) { |
292 | dev_err(&pdev->dev, | 315 | dev_err(&pdev->dev, |
293 | "Can't register Freescale eDMA engine. (%d)\n", ret); | 316 | "Can't register Freescale eDMA engine. (%d)\n", ret); |
294 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); | 317 | fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); |
295 | return ret; | 318 | return ret; |
296 | } | 319 | } |
297 | 320 | ||
@@ -300,7 +323,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
300 | dev_err(&pdev->dev, | 323 | dev_err(&pdev->dev, |
301 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); | 324 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); |
302 | dma_async_device_unregister(&fsl_edma->dma_dev); | 325 | dma_async_device_unregister(&fsl_edma->dma_dev); |
303 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); | 326 | fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); |
304 | return ret; | 327 | return ret; |
305 | } | 328 | } |
306 | 329 | ||
@@ -319,7 +342,7 @@ static int fsl_edma_remove(struct platform_device *pdev) | |||
319 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); | 342 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); |
320 | of_dma_controller_free(np); | 343 | of_dma_controller_free(np); |
321 | dma_async_device_unregister(&fsl_edma->dma_dev); | 344 | dma_async_device_unregister(&fsl_edma->dma_dev); |
322 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); | 345 | fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs); |
323 | 346 | ||
324 | return 0; | 347 | return 0; |
325 | } | 348 | } |
@@ -378,12 +401,6 @@ static const struct dev_pm_ops fsl_edma_pm_ops = { | |||
378 | .resume_early = fsl_edma_resume_early, | 401 | .resume_early = fsl_edma_resume_early, |
379 | }; | 402 | }; |
380 | 403 | ||
381 | static const struct of_device_id fsl_edma_dt_ids[] = { | ||
382 | { .compatible = "fsl,vf610-edma", }, | ||
383 | { /* sentinel */ } | ||
384 | }; | ||
385 | MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids); | ||
386 | |||
387 | static struct platform_driver fsl_edma_driver = { | 404 | static struct platform_driver fsl_edma_driver = { |
388 | .driver = { | 405 | .driver = { |
389 | .name = "fsl-edma", | 406 | .name = "fsl-edma", |
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 60b062c3647b..8e341c0c13bc 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c | |||
@@ -113,6 +113,7 @@ | |||
113 | /* Field definition for Descriptor offset */ | 113 | /* Field definition for Descriptor offset */ |
114 | #define QDMA_CCDF_STATUS 20 | 114 | #define QDMA_CCDF_STATUS 20 |
115 | #define QDMA_CCDF_OFFSET 20 | 115 | #define QDMA_CCDF_OFFSET 20 |
116 | #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32) | ||
116 | 117 | ||
117 | /* Field definition for safe loop count*/ | 118 | /* Field definition for safe loop count*/ |
118 | #define FSL_QDMA_HALT_COUNT 1500 | 119 | #define FSL_QDMA_HALT_COUNT 1500 |
@@ -341,6 +342,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) | |||
341 | static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, | 342 | static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, |
342 | dma_addr_t dst, dma_addr_t src, u32 len) | 343 | dma_addr_t dst, dma_addr_t src, u32 len) |
343 | { | 344 | { |
345 | u32 cmd; | ||
344 | struct fsl_qdma_format *sdf, *ddf; | 346 | struct fsl_qdma_format *sdf, *ddf; |
345 | struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; | 347 | struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest; |
346 | 348 | ||
@@ -369,14 +371,14 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, | |||
369 | /* This entry is the last entry. */ | 371 | /* This entry is the last entry. */ |
370 | qdma_csgf_set_f(csgf_dest, len); | 372 | qdma_csgf_set_f(csgf_dest, len); |
371 | /* Descriptor Buffer */ | 373 | /* Descriptor Buffer */ |
372 | sdf->data = | 374 | cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << |
373 | cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << | 375 | FSL_QDMA_CMD_RWTTYPE_OFFSET); |
374 | FSL_QDMA_CMD_RWTTYPE_OFFSET); | 376 | sdf->data = QDMA_SDDF_CMD(cmd); |
375 | ddf->data = | 377 | |
376 | cpu_to_le64(FSL_QDMA_CMD_RWTTYPE << | 378 | cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << |
377 | FSL_QDMA_CMD_RWTTYPE_OFFSET); | 379 | FSL_QDMA_CMD_RWTTYPE_OFFSET); |
378 | ddf->data |= | 380 | cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); |
379 | cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); | 381 | ddf->data = QDMA_SDDF_CMD(cmd); |
380 | } | 382 | } |
381 | 383 | ||
382 | /* | 384 | /* |
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 0c2610066ba9..025d8ad5a63c 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c | |||
@@ -61,10 +61,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) | |||
61 | 61 | ||
62 | if (hsuc->direction == DMA_MEM_TO_DEV) { | 62 | if (hsuc->direction == DMA_MEM_TO_DEV) { |
63 | bsr = config->dst_maxburst; | 63 | bsr = config->dst_maxburst; |
64 | mtsr = config->src_addr_width; | 64 | mtsr = config->dst_addr_width; |
65 | } else if (hsuc->direction == DMA_DEV_TO_MEM) { | 65 | } else if (hsuc->direction == DMA_DEV_TO_MEM) { |
66 | bsr = config->src_maxburst; | 66 | bsr = config->src_maxburst; |
67 | mtsr = config->dst_addr_width; | 67 | mtsr = config->src_addr_width; |
68 | } | 68 | } |
69 | 69 | ||
70 | hsu_chan_disable(hsuc); | 70 | hsu_chan_disable(hsuc); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 4ec84a633bd3..a01f4b5d793c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1934,16 +1934,11 @@ disable_clk_ipg: | |||
1934 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) | 1934 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) |
1935 | { | 1935 | { |
1936 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1936 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1937 | struct sdma_engine *sdma = sdmac->sdma; | ||
1938 | struct imx_dma_data *data = fn_param; | 1937 | struct imx_dma_data *data = fn_param; |
1939 | 1938 | ||
1940 | if (!imx_dma_is_general_purpose(chan)) | 1939 | if (!imx_dma_is_general_purpose(chan)) |
1941 | return false; | 1940 | return false; |
1942 | 1941 | ||
1943 | /* return false if it's not the right device */ | ||
1944 | if (sdma->dev->of_node != data->of_node) | ||
1945 | return false; | ||
1946 | |||
1947 | sdmac->data = *data; | 1942 | sdmac->data = *data; |
1948 | chan->private = &sdmac->data; | 1943 | chan->private = &sdmac->data; |
1949 | 1944 | ||
@@ -1971,9 +1966,9 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec, | |||
1971 | * be set to sdmac->event_id1. | 1966 | * be set to sdmac->event_id1. |
1972 | */ | 1967 | */ |
1973 | data.dma_request2 = 0; | 1968 | data.dma_request2 = 0; |
1974 | data.of_node = ofdma->of_node; | ||
1975 | 1969 | ||
1976 | return dma_request_channel(mask, sdma_filter_fn, &data); | 1970 | return __dma_request_channel(&mask, sdma_filter_fn, &data, |
1971 | ofdma->of_node); | ||
1977 | } | 1972 | } |
1978 | 1973 | ||
1979 | static int sdma_probe(struct platform_device *pdev) | 1974 | static int sdma_probe(struct platform_device *pdev) |
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index 7de54b2fafdb..e15bd15a9ef6 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c | |||
@@ -164,6 +164,11 @@ static void mcf_edma_irq_free(struct platform_device *pdev, | |||
164 | free_irq(irq, mcf_edma); | 164 | free_irq(irq, mcf_edma); |
165 | } | 165 | } |
166 | 166 | ||
167 | static struct fsl_edma_drvdata mcf_data = { | ||
168 | .version = v2, | ||
169 | .setup_irq = mcf_edma_irq_init, | ||
170 | }; | ||
171 | |||
167 | static int mcf_edma_probe(struct platform_device *pdev) | 172 | static int mcf_edma_probe(struct platform_device *pdev) |
168 | { | 173 | { |
169 | struct mcf_edma_platform_data *pdata; | 174 | struct mcf_edma_platform_data *pdata; |
@@ -187,8 +192,8 @@ static int mcf_edma_probe(struct platform_device *pdev) | |||
187 | 192 | ||
188 | mcf_edma->n_chans = chans; | 193 | mcf_edma->n_chans = chans; |
189 | 194 | ||
190 | /* Set up version for ColdFire edma */ | 195 | /* Set up drvdata for ColdFire edma */ |
191 | mcf_edma->version = v2; | 196 | mcf_edma->drvdata = &mcf_data; |
192 | mcf_edma->big_endian = 1; | 197 | mcf_edma->big_endian = 1; |
193 | 198 | ||
194 | if (!mcf_edma->n_chans) { | 199 | if (!mcf_edma->n_chans) { |
@@ -223,7 +228,7 @@ static int mcf_edma_probe(struct platform_device *pdev) | |||
223 | iowrite32(~0, regs->inth); | 228 | iowrite32(~0, regs->inth); |
224 | iowrite32(~0, regs->intl); | 229 | iowrite32(~0, regs->intl); |
225 | 230 | ||
226 | ret = mcf_edma_irq_init(pdev, mcf_edma); | 231 | ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma); |
227 | if (ret) | 232 | if (ret) |
228 | return ret; | 233 | return ret; |
229 | 234 | ||
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig index 7411eb3d419e..1ad63ddc292d 100644 --- a/drivers/dma/mediatek/Kconfig +++ b/drivers/dma/mediatek/Kconfig | |||
@@ -25,3 +25,14 @@ config MTK_CQDMA | |||
25 | 25 | ||
26 | This controller provides the channels which is dedicated to | 26 | This controller provides the channels which is dedicated to |
27 | memory-to-memory transfer to offload from CPU. | 27 | memory-to-memory transfer to offload from CPU. |
28 | |||
29 | config MTK_UART_APDMA | ||
30 | tristate "MediaTek SoCs APDMA support for UART" | ||
31 | depends on OF && SERIAL_8250_MT6577 | ||
32 | select DMA_ENGINE | ||
33 | select DMA_VIRTUAL_CHANNELS | ||
34 | help | ||
35 | Support for the UART DMA engine found on MediaTek MTK SoCs. | ||
36 | When SERIAL_8250_MT6577 is enabled, and if you want to use DMA, | ||
37 | you can enable the config. The DMA engine can only be used | ||
38 | with MediaTek SoCs. | ||
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile index 13b144594510..5ba39a5edc13 100644 --- a/drivers/dma/mediatek/Makefile +++ b/drivers/dma/mediatek/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | 1 | # SPDX-License-Identifier: GPL-2.0-only |
2 | obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o | ||
2 | obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o | 3 | obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o |
3 | obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o | 4 | obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o |
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c new file mode 100644 index 000000000000..546995c20876 --- /dev/null +++ b/drivers/dma/mediatek/mtk-uart-apdma.c | |||
@@ -0,0 +1,666 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * MediaTek UART APDMA driver. | ||
4 | * | ||
5 | * Copyright (c) 2019 MediaTek Inc. | ||
6 | * Author: Long Cheng <long.cheng@mediatek.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/clk.h> | ||
10 | #include <linux/dmaengine.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/iopoll.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/list.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/of_device.h> | ||
20 | #include <linux/of_dma.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | #include "../virt-dma.h" | ||
27 | |||
28 | /* The default number of virtual channel */ | ||
29 | #define MTK_UART_APDMA_NR_VCHANS 8 | ||
30 | |||
31 | #define VFF_EN_B BIT(0) | ||
32 | #define VFF_STOP_B BIT(0) | ||
33 | #define VFF_FLUSH_B BIT(0) | ||
34 | #define VFF_4G_EN_B BIT(0) | ||
35 | /* rx valid size >= vff thre */ | ||
36 | #define VFF_RX_INT_EN_B (BIT(0) | BIT(1)) | ||
37 | /* tx left size >= vff thre */ | ||
38 | #define VFF_TX_INT_EN_B BIT(0) | ||
39 | #define VFF_WARM_RST_B BIT(0) | ||
40 | #define VFF_RX_INT_CLR_B (BIT(0) | BIT(1)) | ||
41 | #define VFF_TX_INT_CLR_B 0 | ||
42 | #define VFF_STOP_CLR_B 0 | ||
43 | #define VFF_EN_CLR_B 0 | ||
44 | #define VFF_INT_EN_CLR_B 0 | ||
45 | #define VFF_4G_SUPPORT_CLR_B 0 | ||
46 | |||
47 | /* | ||
48 | * interrupt trigger level for tx | ||
49 | * if threshold is n, no polling is required to start tx. | ||
50 | * otherwise need polling VFF_FLUSH. | ||
51 | */ | ||
52 | #define VFF_TX_THRE(n) (n) | ||
53 | /* interrupt trigger level for rx */ | ||
54 | #define VFF_RX_THRE(n) ((n) * 3 / 4) | ||
55 | |||
56 | #define VFF_RING_SIZE 0xffff | ||
57 | /* invert this bit when wrap ring head again */ | ||
58 | #define VFF_RING_WRAP 0x10000 | ||
59 | |||
60 | #define VFF_INT_FLAG 0x00 | ||
61 | #define VFF_INT_EN 0x04 | ||
62 | #define VFF_EN 0x08 | ||
63 | #define VFF_RST 0x0c | ||
64 | #define VFF_STOP 0x10 | ||
65 | #define VFF_FLUSH 0x14 | ||
66 | #define VFF_ADDR 0x1c | ||
67 | #define VFF_LEN 0x24 | ||
68 | #define VFF_THRE 0x28 | ||
69 | #define VFF_WPT 0x2c | ||
70 | #define VFF_RPT 0x30 | ||
71 | /* TX: the buffer size HW can read. RX: the buffer size SW can read. */ | ||
72 | #define VFF_VALID_SIZE 0x3c | ||
73 | /* TX: the buffer size SW can write. RX: the buffer size HW can write. */ | ||
74 | #define VFF_LEFT_SIZE 0x40 | ||
75 | #define VFF_DEBUG_STATUS 0x50 | ||
76 | #define VFF_4G_SUPPORT 0x54 | ||
77 | |||
78 | struct mtk_uart_apdmadev { | ||
79 | struct dma_device ddev; | ||
80 | struct clk *clk; | ||
81 | bool support_33bits; | ||
82 | unsigned int dma_requests; | ||
83 | }; | ||
84 | |||
85 | struct mtk_uart_apdma_desc { | ||
86 | struct virt_dma_desc vd; | ||
87 | |||
88 | dma_addr_t addr; | ||
89 | unsigned int avail_len; | ||
90 | }; | ||
91 | |||
92 | struct mtk_chan { | ||
93 | struct virt_dma_chan vc; | ||
94 | struct dma_slave_config cfg; | ||
95 | struct mtk_uart_apdma_desc *desc; | ||
96 | enum dma_transfer_direction dir; | ||
97 | |||
98 | void __iomem *base; | ||
99 | unsigned int irq; | ||
100 | |||
101 | unsigned int rx_status; | ||
102 | }; | ||
103 | |||
104 | static inline struct mtk_uart_apdmadev * | ||
105 | to_mtk_uart_apdma_dev(struct dma_device *d) | ||
106 | { | ||
107 | return container_of(d, struct mtk_uart_apdmadev, ddev); | ||
108 | } | ||
109 | |||
110 | static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c) | ||
111 | { | ||
112 | return container_of(c, struct mtk_chan, vc.chan); | ||
113 | } | ||
114 | |||
115 | static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc | ||
116 | (struct dma_async_tx_descriptor *t) | ||
117 | { | ||
118 | return container_of(t, struct mtk_uart_apdma_desc, vd.tx); | ||
119 | } | ||
120 | |||
121 | static void mtk_uart_apdma_write(struct mtk_chan *c, | ||
122 | unsigned int reg, unsigned int val) | ||
123 | { | ||
124 | writel(val, c->base + reg); | ||
125 | } | ||
126 | |||
127 | static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg) | ||
128 | { | ||
129 | return readl(c->base + reg); | ||
130 | } | ||
131 | |||
132 | static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd) | ||
133 | { | ||
134 | struct dma_chan *chan = vd->tx.chan; | ||
135 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
136 | |||
137 | kfree(c->desc); | ||
138 | } | ||
139 | |||
140 | static void mtk_uart_apdma_start_tx(struct mtk_chan *c) | ||
141 | { | ||
142 | struct mtk_uart_apdmadev *mtkd = | ||
143 | to_mtk_uart_apdma_dev(c->vc.chan.device); | ||
144 | struct mtk_uart_apdma_desc *d = c->desc; | ||
145 | unsigned int wpt, vff_sz; | ||
146 | |||
147 | vff_sz = c->cfg.dst_port_window_size; | ||
148 | if (!mtk_uart_apdma_read(c, VFF_LEN)) { | ||
149 | mtk_uart_apdma_write(c, VFF_ADDR, d->addr); | ||
150 | mtk_uart_apdma_write(c, VFF_LEN, vff_sz); | ||
151 | mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz)); | ||
152 | mtk_uart_apdma_write(c, VFF_WPT, 0); | ||
153 | mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); | ||
154 | |||
155 | if (mtkd->support_33bits) | ||
156 | mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); | ||
157 | } | ||
158 | |||
159 | mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); | ||
160 | if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) | ||
161 | dev_err(c->vc.chan.device->dev, "Enable TX fail\n"); | ||
162 | |||
163 | if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) { | ||
164 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); | ||
165 | return; | ||
166 | } | ||
167 | |||
168 | wpt = mtk_uart_apdma_read(c, VFF_WPT); | ||
169 | |||
170 | wpt += c->desc->avail_len; | ||
171 | if ((wpt & VFF_RING_SIZE) == vff_sz) | ||
172 | wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP; | ||
173 | |||
174 | /* Let DMA start moving data */ | ||
175 | mtk_uart_apdma_write(c, VFF_WPT, wpt); | ||
176 | |||
177 | /* HW auto set to 0 when left size >= threshold */ | ||
178 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B); | ||
179 | if (!mtk_uart_apdma_read(c, VFF_FLUSH)) | ||
180 | mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); | ||
181 | } | ||
182 | |||
183 | static void mtk_uart_apdma_start_rx(struct mtk_chan *c) | ||
184 | { | ||
185 | struct mtk_uart_apdmadev *mtkd = | ||
186 | to_mtk_uart_apdma_dev(c->vc.chan.device); | ||
187 | struct mtk_uart_apdma_desc *d = c->desc; | ||
188 | unsigned int vff_sz; | ||
189 | |||
190 | vff_sz = c->cfg.src_port_window_size; | ||
191 | if (!mtk_uart_apdma_read(c, VFF_LEN)) { | ||
192 | mtk_uart_apdma_write(c, VFF_ADDR, d->addr); | ||
193 | mtk_uart_apdma_write(c, VFF_LEN, vff_sz); | ||
194 | mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz)); | ||
195 | mtk_uart_apdma_write(c, VFF_RPT, 0); | ||
196 | mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); | ||
197 | |||
198 | if (mtkd->support_33bits) | ||
199 | mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B); | ||
200 | } | ||
201 | |||
202 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B); | ||
203 | mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B); | ||
204 | if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B) | ||
205 | dev_err(c->vc.chan.device->dev, "Enable RX fail\n"); | ||
206 | } | ||
207 | |||
208 | static void mtk_uart_apdma_tx_handler(struct mtk_chan *c) | ||
209 | { | ||
210 | struct mtk_uart_apdma_desc *d = c->desc; | ||
211 | |||
212 | mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); | ||
213 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); | ||
214 | mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); | ||
215 | |||
216 | list_del(&d->vd.node); | ||
217 | vchan_cookie_complete(&d->vd); | ||
218 | } | ||
219 | |||
220 | static void mtk_uart_apdma_rx_handler(struct mtk_chan *c) | ||
221 | { | ||
222 | struct mtk_uart_apdma_desc *d = c->desc; | ||
223 | unsigned int len, wg, rg; | ||
224 | int cnt; | ||
225 | |||
226 | mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); | ||
227 | |||
228 | if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE)) | ||
229 | return; | ||
230 | |||
231 | mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); | ||
232 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); | ||
233 | |||
234 | len = c->cfg.src_port_window_size; | ||
235 | rg = mtk_uart_apdma_read(c, VFF_RPT); | ||
236 | wg = mtk_uart_apdma_read(c, VFF_WPT); | ||
237 | cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE); | ||
238 | |||
239 | /* | ||
240 | * The buffer is ring buffer. If wrap bit different, | ||
241 | * represents the start of the next cycle for WPT | ||
242 | */ | ||
243 | if ((rg ^ wg) & VFF_RING_WRAP) | ||
244 | cnt += len; | ||
245 | |||
246 | c->rx_status = d->avail_len - cnt; | ||
247 | mtk_uart_apdma_write(c, VFF_RPT, wg); | ||
248 | |||
249 | list_del(&d->vd.node); | ||
250 | vchan_cookie_complete(&d->vd); | ||
251 | } | ||
252 | |||
253 | static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id) | ||
254 | { | ||
255 | struct dma_chan *chan = (struct dma_chan *)dev_id; | ||
256 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
257 | unsigned long flags; | ||
258 | |||
259 | spin_lock_irqsave(&c->vc.lock, flags); | ||
260 | if (c->dir == DMA_DEV_TO_MEM) | ||
261 | mtk_uart_apdma_rx_handler(c); | ||
262 | else if (c->dir == DMA_MEM_TO_DEV) | ||
263 | mtk_uart_apdma_tx_handler(c); | ||
264 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
265 | |||
266 | return IRQ_HANDLED; | ||
267 | } | ||
268 | |||
269 | static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan) | ||
270 | { | ||
271 | struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); | ||
272 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
273 | unsigned int status; | ||
274 | int ret; | ||
275 | |||
276 | ret = pm_runtime_get_sync(mtkd->ddev.dev); | ||
277 | if (ret < 0) { | ||
278 | pm_runtime_put_noidle(chan->device->dev); | ||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | mtk_uart_apdma_write(c, VFF_ADDR, 0); | ||
283 | mtk_uart_apdma_write(c, VFF_THRE, 0); | ||
284 | mtk_uart_apdma_write(c, VFF_LEN, 0); | ||
285 | mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B); | ||
286 | |||
287 | ret = readx_poll_timeout(readl, c->base + VFF_EN, | ||
288 | status, !status, 10, 100); | ||
289 | if (ret) | ||
290 | return ret; | ||
291 | |||
292 | ret = request_irq(c->irq, mtk_uart_apdma_irq_handler, | ||
293 | IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan); | ||
294 | if (ret < 0) { | ||
295 | dev_err(chan->device->dev, "Can't request dma IRQ\n"); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | if (mtkd->support_33bits) | ||
300 | mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B); | ||
301 | |||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan) | ||
306 | { | ||
307 | struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device); | ||
308 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
309 | |||
310 | free_irq(c->irq, chan); | ||
311 | |||
312 | tasklet_kill(&c->vc.task); | ||
313 | |||
314 | vchan_free_chan_resources(&c->vc); | ||
315 | |||
316 | pm_runtime_put_sync(mtkd->ddev.dev); | ||
317 | } | ||
318 | |||
319 | static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan, | ||
320 | dma_cookie_t cookie, | ||
321 | struct dma_tx_state *txstate) | ||
322 | { | ||
323 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
324 | enum dma_status ret; | ||
325 | |||
326 | ret = dma_cookie_status(chan, cookie, txstate); | ||
327 | if (!txstate) | ||
328 | return ret; | ||
329 | |||
330 | dma_set_residue(txstate, c->rx_status); | ||
331 | |||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * dmaengine_prep_slave_single will call the function. and sglen is 1. | ||
337 | * 8250 uart using one ring buffer, and deal with one sg. | ||
338 | */ | ||
339 | static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg | ||
340 | (struct dma_chan *chan, struct scatterlist *sgl, | ||
341 | unsigned int sglen, enum dma_transfer_direction dir, | ||
342 | unsigned long tx_flags, void *context) | ||
343 | { | ||
344 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
345 | struct mtk_uart_apdma_desc *d; | ||
346 | |||
347 | if (!is_slave_direction(dir) || sglen != 1) | ||
348 | return NULL; | ||
349 | |||
350 | /* Now allocate and setup the descriptor */ | ||
351 | d = kzalloc(sizeof(*d), GFP_ATOMIC); | ||
352 | if (!d) | ||
353 | return NULL; | ||
354 | |||
355 | d->avail_len = sg_dma_len(sgl); | ||
356 | d->addr = sg_dma_address(sgl); | ||
357 | c->dir = dir; | ||
358 | |||
359 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | ||
360 | } | ||
361 | |||
362 | static void mtk_uart_apdma_issue_pending(struct dma_chan *chan) | ||
363 | { | ||
364 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
365 | struct virt_dma_desc *vd; | ||
366 | unsigned long flags; | ||
367 | |||
368 | spin_lock_irqsave(&c->vc.lock, flags); | ||
369 | if (vchan_issue_pending(&c->vc)) { | ||
370 | vd = vchan_next_desc(&c->vc); | ||
371 | c->desc = to_mtk_uart_apdma_desc(&vd->tx); | ||
372 | |||
373 | if (c->dir == DMA_DEV_TO_MEM) | ||
374 | mtk_uart_apdma_start_rx(c); | ||
375 | else if (c->dir == DMA_MEM_TO_DEV) | ||
376 | mtk_uart_apdma_start_tx(c); | ||
377 | } | ||
378 | |||
379 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
380 | } | ||
381 | |||
382 | static int mtk_uart_apdma_slave_config(struct dma_chan *chan, | ||
383 | struct dma_slave_config *config) | ||
384 | { | ||
385 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
386 | |||
387 | memcpy(&c->cfg, config, sizeof(*config)); | ||
388 | |||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) | ||
393 | { | ||
394 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
395 | unsigned long flags; | ||
396 | unsigned int status; | ||
397 | LIST_HEAD(head); | ||
398 | int ret; | ||
399 | |||
400 | mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B); | ||
401 | |||
402 | ret = readx_poll_timeout(readl, c->base + VFF_FLUSH, | ||
403 | status, status != VFF_FLUSH_B, 10, 100); | ||
404 | if (ret) | ||
405 | dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n", | ||
406 | mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); | ||
407 | |||
408 | /* | ||
409 | * Stop need 3 steps. | ||
410 | * 1. set stop to 1 | ||
411 | * 2. wait en to 0 | ||
412 | * 3. set stop as 0 | ||
413 | */ | ||
414 | mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B); | ||
415 | ret = readx_poll_timeout(readl, c->base + VFF_EN, | ||
416 | status, !status, 10, 100); | ||
417 | if (ret) | ||
418 | dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n", | ||
419 | mtk_uart_apdma_read(c, VFF_DEBUG_STATUS)); | ||
420 | |||
421 | mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B); | ||
422 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); | ||
423 | |||
424 | if (c->dir == DMA_DEV_TO_MEM) | ||
425 | mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B); | ||
426 | else if (c->dir == DMA_MEM_TO_DEV) | ||
427 | mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B); | ||
428 | |||
429 | synchronize_irq(c->irq); | ||
430 | |||
431 | spin_lock_irqsave(&c->vc.lock, flags); | ||
432 | vchan_get_all_descriptors(&c->vc, &head); | ||
433 | vchan_dma_desc_free_list(&c->vc, &head); | ||
434 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | static int mtk_uart_apdma_device_pause(struct dma_chan *chan) | ||
440 | { | ||
441 | struct mtk_chan *c = to_mtk_uart_apdma_chan(chan); | ||
442 | unsigned long flags; | ||
443 | |||
444 | spin_lock_irqsave(&c->vc.lock, flags); | ||
445 | |||
446 | mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B); | ||
447 | mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B); | ||
448 | |||
449 | synchronize_irq(c->irq); | ||
450 | |||
451 | spin_unlock_irqrestore(&c->vc.lock, flags); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd) | ||
457 | { | ||
458 | while (!list_empty(&mtkd->ddev.channels)) { | ||
459 | struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels, | ||
460 | struct mtk_chan, vc.chan.device_node); | ||
461 | |||
462 | list_del(&c->vc.chan.device_node); | ||
463 | tasklet_kill(&c->vc.task); | ||
464 | } | ||
465 | } | ||
466 | |||
467 | static const struct of_device_id mtk_uart_apdma_match[] = { | ||
468 | { .compatible = "mediatek,mt6577-uart-dma", }, | ||
469 | { /* sentinel */ }, | ||
470 | }; | ||
471 | MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match); | ||
472 | |||
473 | static int mtk_uart_apdma_probe(struct platform_device *pdev) | ||
474 | { | ||
475 | struct device_node *np = pdev->dev.of_node; | ||
476 | struct mtk_uart_apdmadev *mtkd; | ||
477 | int bit_mask = 32, rc; | ||
478 | struct resource *res; | ||
479 | struct mtk_chan *c; | ||
480 | unsigned int i; | ||
481 | |||
482 | mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL); | ||
483 | if (!mtkd) | ||
484 | return -ENOMEM; | ||
485 | |||
486 | mtkd->clk = devm_clk_get(&pdev->dev, NULL); | ||
487 | if (IS_ERR(mtkd->clk)) { | ||
488 | dev_err(&pdev->dev, "No clock specified\n"); | ||
489 | rc = PTR_ERR(mtkd->clk); | ||
490 | return rc; | ||
491 | } | ||
492 | |||
493 | if (of_property_read_bool(np, "mediatek,dma-33bits")) | ||
494 | mtkd->support_33bits = true; | ||
495 | |||
496 | if (mtkd->support_33bits) | ||
497 | bit_mask = 33; | ||
498 | |||
499 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask)); | ||
500 | if (rc) | ||
501 | return rc; | ||
502 | |||
503 | dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask); | ||
504 | mtkd->ddev.device_alloc_chan_resources = | ||
505 | mtk_uart_apdma_alloc_chan_resources; | ||
506 | mtkd->ddev.device_free_chan_resources = | ||
507 | mtk_uart_apdma_free_chan_resources; | ||
508 | mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status; | ||
509 | mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending; | ||
510 | mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg; | ||
511 | mtkd->ddev.device_config = mtk_uart_apdma_slave_config; | ||
512 | mtkd->ddev.device_pause = mtk_uart_apdma_device_pause; | ||
513 | mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all; | ||
514 | mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); | ||
515 | mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE); | ||
516 | mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
517 | mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
518 | mtkd->ddev.dev = &pdev->dev; | ||
519 | INIT_LIST_HEAD(&mtkd->ddev.channels); | ||
520 | |||
521 | mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS; | ||
522 | if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) { | ||
523 | dev_info(&pdev->dev, | ||
524 | "Using %u as missing dma-requests property\n", | ||
525 | MTK_UART_APDMA_NR_VCHANS); | ||
526 | } | ||
527 | |||
528 | for (i = 0; i < mtkd->dma_requests; i++) { | ||
529 | c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL); | ||
530 | if (!c) { | ||
531 | rc = -ENODEV; | ||
532 | goto err_no_dma; | ||
533 | } | ||
534 | |||
535 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); | ||
536 | if (!res) { | ||
537 | rc = -ENODEV; | ||
538 | goto err_no_dma; | ||
539 | } | ||
540 | |||
541 | c->base = devm_ioremap_resource(&pdev->dev, res); | ||
542 | if (IS_ERR(c->base)) { | ||
543 | rc = PTR_ERR(c->base); | ||
544 | goto err_no_dma; | ||
545 | } | ||
546 | c->vc.desc_free = mtk_uart_apdma_desc_free; | ||
547 | vchan_init(&c->vc, &mtkd->ddev); | ||
548 | |||
549 | rc = platform_get_irq(pdev, i); | ||
550 | if (rc < 0) { | ||
551 | dev_err(&pdev->dev, "failed to get IRQ[%d]\n", i); | ||
552 | goto err_no_dma; | ||
553 | } | ||
554 | c->irq = rc; | ||
555 | } | ||
556 | |||
557 | pm_runtime_enable(&pdev->dev); | ||
558 | pm_runtime_set_active(&pdev->dev); | ||
559 | |||
560 | rc = dma_async_device_register(&mtkd->ddev); | ||
561 | if (rc) | ||
562 | goto rpm_disable; | ||
563 | |||
564 | platform_set_drvdata(pdev, mtkd); | ||
565 | |||
566 | /* Device-tree DMA controller registration */ | ||
567 | rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd); | ||
568 | if (rc) | ||
569 | goto dma_remove; | ||
570 | |||
571 | return rc; | ||
572 | |||
573 | dma_remove: | ||
574 | dma_async_device_unregister(&mtkd->ddev); | ||
575 | rpm_disable: | ||
576 | pm_runtime_disable(&pdev->dev); | ||
577 | err_no_dma: | ||
578 | mtk_uart_apdma_free(mtkd); | ||
579 | return rc; | ||
580 | } | ||
581 | |||
582 | static int mtk_uart_apdma_remove(struct platform_device *pdev) | ||
583 | { | ||
584 | struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev); | ||
585 | |||
586 | of_dma_controller_free(pdev->dev.of_node); | ||
587 | |||
588 | mtk_uart_apdma_free(mtkd); | ||
589 | |||
590 | dma_async_device_unregister(&mtkd->ddev); | ||
591 | |||
592 | pm_runtime_disable(&pdev->dev); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | #ifdef CONFIG_PM_SLEEP | ||
598 | static int mtk_uart_apdma_suspend(struct device *dev) | ||
599 | { | ||
600 | struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); | ||
601 | |||
602 | if (!pm_runtime_suspended(dev)) | ||
603 | clk_disable_unprepare(mtkd->clk); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | static int mtk_uart_apdma_resume(struct device *dev) | ||
609 | { | ||
610 | int ret; | ||
611 | struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); | ||
612 | |||
613 | if (!pm_runtime_suspended(dev)) { | ||
614 | ret = clk_prepare_enable(mtkd->clk); | ||
615 | if (ret) | ||
616 | return ret; | ||
617 | } | ||
618 | |||
619 | return 0; | ||
620 | } | ||
621 | #endif /* CONFIG_PM_SLEEP */ | ||
622 | |||
623 | #ifdef CONFIG_PM | ||
624 | static int mtk_uart_apdma_runtime_suspend(struct device *dev) | ||
625 | { | ||
626 | struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); | ||
627 | |||
628 | clk_disable_unprepare(mtkd->clk); | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | |||
633 | static int mtk_uart_apdma_runtime_resume(struct device *dev) | ||
634 | { | ||
635 | int ret; | ||
636 | struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev); | ||
637 | |||
638 | ret = clk_prepare_enable(mtkd->clk); | ||
639 | if (ret) | ||
640 | return ret; | ||
641 | |||
642 | return 0; | ||
643 | } | ||
644 | #endif /* CONFIG_PM */ | ||
645 | |||
646 | static const struct dev_pm_ops mtk_uart_apdma_pm_ops = { | ||
647 | SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume) | ||
648 | SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend, | ||
649 | mtk_uart_apdma_runtime_resume, NULL) | ||
650 | }; | ||
651 | |||
652 | static struct platform_driver mtk_uart_apdma_driver = { | ||
653 | .probe = mtk_uart_apdma_probe, | ||
654 | .remove = mtk_uart_apdma_remove, | ||
655 | .driver = { | ||
656 | .name = KBUILD_MODNAME, | ||
657 | .pm = &mtk_uart_apdma_pm_ops, | ||
658 | .of_match_table = of_match_ptr(mtk_uart_apdma_match), | ||
659 | }, | ||
660 | }; | ||
661 | |||
662 | module_platform_driver(mtk_uart_apdma_driver); | ||
663 | |||
664 | MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver"); | ||
665 | MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>"); | ||
666 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 730a18d0c6d6..fea8608a7810 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c | |||
@@ -717,10 +717,8 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev) | |||
717 | if (mic_dma_dbg) { | 717 | if (mic_dma_dbg) { |
718 | mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), | 718 | mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev), |
719 | mic_dma_dbg); | 719 | mic_dma_dbg); |
720 | if (mic_dma_dev->dbg_dir) | 720 | debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, |
721 | debugfs_create_file("mic_dma_reg", 0444, | 721 | mic_dma_dev, &mic_dma_reg_fops); |
722 | mic_dma_dev->dbg_dir, mic_dma_dev, | ||
723 | &mic_dma_reg_fops); | ||
724 | } | 722 | } |
725 | return 0; | 723 | return 0; |
726 | } | 724 | } |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index bb3ccbf90a31..e7d1e12bf464 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -582,18 +582,12 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, | |||
582 | } | 582 | } |
583 | 583 | ||
584 | struct mmp_tdma_filter_param { | 584 | struct mmp_tdma_filter_param { |
585 | struct device_node *of_node; | ||
586 | unsigned int chan_id; | 585 | unsigned int chan_id; |
587 | }; | 586 | }; |
588 | 587 | ||
589 | static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) | 588 | static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param) |
590 | { | 589 | { |
591 | struct mmp_tdma_filter_param *param = fn_param; | 590 | struct mmp_tdma_filter_param *param = fn_param; |
592 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | ||
593 | struct dma_device *pdma_device = tdmac->chan.device; | ||
594 | |||
595 | if (pdma_device->dev->of_node != param->of_node) | ||
596 | return false; | ||
597 | 591 | ||
598 | if (chan->chan_id != param->chan_id) | 592 | if (chan->chan_id != param->chan_id) |
599 | return false; | 593 | return false; |
@@ -611,13 +605,13 @@ static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, | |||
611 | if (dma_spec->args_count != 1) | 605 | if (dma_spec->args_count != 1) |
612 | return NULL; | 606 | return NULL; |
613 | 607 | ||
614 | param.of_node = ofdma->of_node; | ||
615 | param.chan_id = dma_spec->args[0]; | 608 | param.chan_id = dma_spec->args[0]; |
616 | 609 | ||
617 | if (param.chan_id >= TDMA_CHANNEL_NUM) | 610 | if (param.chan_id >= TDMA_CHANNEL_NUM) |
618 | return NULL; | 611 | return NULL; |
619 | 612 | ||
620 | return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m); | 613 | return __dma_request_channel(&mask, mmp_tdma_filter_fn, ¶m, |
614 | ofdma->of_node); | ||
621 | } | 615 | } |
622 | 616 | ||
623 | static const struct of_device_id mmp_tdma_dt_ids[] = { | 617 | static const struct of_device_id mmp_tdma_dt_ids[] = { |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 20a9cb7cb6d3..3039bba0e4d5 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -719,7 +719,6 @@ err_out: | |||
719 | } | 719 | } |
720 | 720 | ||
721 | struct mxs_dma_filter_param { | 721 | struct mxs_dma_filter_param { |
722 | struct device_node *of_node; | ||
723 | unsigned int chan_id; | 722 | unsigned int chan_id; |
724 | }; | 723 | }; |
725 | 724 | ||
@@ -730,9 +729,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) | |||
730 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 729 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
731 | int chan_irq; | 730 | int chan_irq; |
732 | 731 | ||
733 | if (mxs_dma->dma_device.dev->of_node != param->of_node) | ||
734 | return false; | ||
735 | |||
736 | if (chan->chan_id != param->chan_id) | 732 | if (chan->chan_id != param->chan_id) |
737 | return false; | 733 | return false; |
738 | 734 | ||
@@ -755,13 +751,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, | |||
755 | if (dma_spec->args_count != 1) | 751 | if (dma_spec->args_count != 1) |
756 | return NULL; | 752 | return NULL; |
757 | 753 | ||
758 | param.of_node = ofdma->of_node; | ||
759 | param.chan_id = dma_spec->args[0]; | 754 | param.chan_id = dma_spec->args[0]; |
760 | 755 | ||
761 | if (param.chan_id >= mxs_dma->nr_channels) | 756 | if (param.chan_id >= mxs_dma->nr_channels) |
762 | return NULL; | 757 | return NULL; |
763 | 758 | ||
764 | return dma_request_channel(mask, mxs_dma_filter_fn, ¶m); | 759 | return __dma_request_channel(&mask, mxs_dma_filter_fn, ¶m, |
760 | ofdma->of_node); | ||
765 | } | 761 | } |
766 | 762 | ||
767 | static int __init mxs_dma_probe(struct platform_device *pdev) | 763 | static int __init mxs_dma_probe(struct platform_device *pdev) |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 1e4d9ef2aea1..c2d779daa4b5 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -313,8 +313,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
313 | if (count != 1) | 313 | if (count != 1) |
314 | return NULL; | 314 | return NULL; |
315 | 315 | ||
316 | return dma_request_channel(info->dma_cap, info->filter_fn, | 316 | return __dma_request_channel(&info->dma_cap, info->filter_fn, |
317 | &dma_spec->args[0]); | 317 | &dma_spec->args[0], dma_spec->np); |
318 | } | 318 | } |
319 | EXPORT_SYMBOL_GPL(of_dma_simple_xlate); | 319 | EXPORT_SYMBOL_GPL(of_dma_simple_xlate); |
320 | 320 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 56f9fabc99c4..1163af2ba4a3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/bug.h> | 27 | #include <linux/bug.h> |
28 | #include <linux/reset.h> | ||
28 | 29 | ||
29 | #include "dmaengine.h" | 30 | #include "dmaengine.h" |
30 | #define PL330_MAX_CHAN 8 | 31 | #define PL330_MAX_CHAN 8 |
@@ -496,6 +497,9 @@ struct pl330_dmac { | |||
496 | unsigned int num_peripherals; | 497 | unsigned int num_peripherals; |
497 | struct dma_pl330_chan *peripherals; /* keep at end */ | 498 | struct dma_pl330_chan *peripherals; /* keep at end */ |
498 | int quirks; | 499 | int quirks; |
500 | |||
501 | struct reset_control *rstc; | ||
502 | struct reset_control *rstc_ocp; | ||
499 | }; | 503 | }; |
500 | 504 | ||
501 | static struct pl330_of_quirks { | 505 | static struct pl330_of_quirks { |
@@ -3024,6 +3028,32 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3024 | 3028 | ||
3025 | amba_set_drvdata(adev, pl330); | 3029 | amba_set_drvdata(adev, pl330); |
3026 | 3030 | ||
3031 | pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); | ||
3032 | if (IS_ERR(pl330->rstc)) { | ||
3033 | if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER) | ||
3034 | dev_err(&adev->dev, "Failed to get reset!\n"); | ||
3035 | return PTR_ERR(pl330->rstc); | ||
3036 | } else { | ||
3037 | ret = reset_control_deassert(pl330->rstc); | ||
3038 | if (ret) { | ||
3039 | dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); | ||
3040 | return ret; | ||
3041 | } | ||
3042 | } | ||
3043 | |||
3044 | pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); | ||
3045 | if (IS_ERR(pl330->rstc_ocp)) { | ||
3046 | if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER) | ||
3047 | dev_err(&adev->dev, "Failed to get OCP reset!\n"); | ||
3048 | return PTR_ERR(pl330->rstc_ocp); | ||
3049 | } else { | ||
3050 | ret = reset_control_deassert(pl330->rstc_ocp); | ||
3051 | if (ret) { | ||
3052 | dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); | ||
3053 | return ret; | ||
3054 | } | ||
3055 | } | ||
3056 | |||
3027 | for (i = 0; i < AMBA_NR_IRQS; i++) { | 3057 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
3028 | irq = adev->irq[i]; | 3058 | irq = adev->irq[i]; |
3029 | if (irq) { | 3059 | if (irq) { |
@@ -3164,6 +3194,11 @@ probe_err3: | |||
3164 | probe_err2: | 3194 | probe_err2: |
3165 | pl330_del(pl330); | 3195 | pl330_del(pl330); |
3166 | 3196 | ||
3197 | if (pl330->rstc_ocp) | ||
3198 | reset_control_assert(pl330->rstc_ocp); | ||
3199 | |||
3200 | if (pl330->rstc) | ||
3201 | reset_control_assert(pl330->rstc); | ||
3167 | return ret; | 3202 | return ret; |
3168 | } | 3203 | } |
3169 | 3204 | ||
@@ -3202,6 +3237,11 @@ static int pl330_remove(struct amba_device *adev) | |||
3202 | 3237 | ||
3203 | pl330_del(pl330); | 3238 | pl330_del(pl330); |
3204 | 3239 | ||
3240 | if (pl330->rstc_ocp) | ||
3241 | reset_control_assert(pl330->rstc_ocp); | ||
3242 | |||
3243 | if (pl330->rstc) | ||
3244 | reset_control_assert(pl330->rstc); | ||
3205 | return 0; | 3245 | return 0; |
3206 | } | 3246 | } |
3207 | 3247 | ||
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 468c234cb3be..349fb312c872 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
@@ -129,7 +129,6 @@ struct pxad_device { | |||
129 | spinlock_t phy_lock; /* Phy association */ | 129 | spinlock_t phy_lock; /* Phy association */ |
130 | #ifdef CONFIG_DEBUG_FS | 130 | #ifdef CONFIG_DEBUG_FS |
131 | struct dentry *dbgfs_root; | 131 | struct dentry *dbgfs_root; |
132 | struct dentry *dbgfs_state; | ||
133 | struct dentry **dbgfs_chan; | 132 | struct dentry **dbgfs_chan; |
134 | #endif | 133 | #endif |
135 | }; | 134 | }; |
@@ -323,31 +322,18 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, | |||
323 | int ch, struct dentry *chandir) | 322 | int ch, struct dentry *chandir) |
324 | { | 323 | { |
325 | char chan_name[11]; | 324 | char chan_name[11]; |
326 | struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; | 325 | struct dentry *chan; |
327 | struct dentry *chan_reqs = NULL; | ||
328 | void *dt; | 326 | void *dt; |
329 | 327 | ||
330 | scnprintf(chan_name, sizeof(chan_name), "%d", ch); | 328 | scnprintf(chan_name, sizeof(chan_name), "%d", ch); |
331 | chan = debugfs_create_dir(chan_name, chandir); | 329 | chan = debugfs_create_dir(chan_name, chandir); |
332 | dt = (void *)&pdev->phys[ch]; | 330 | dt = (void *)&pdev->phys[ch]; |
333 | 331 | ||
334 | if (chan) | 332 | debugfs_create_file("state", 0400, chan, dt, &chan_state_fops); |
335 | chan_state = debugfs_create_file("state", 0400, chan, dt, | 333 | debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops); |
336 | &chan_state_fops); | 334 | debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops); |
337 | if (chan_state) | ||
338 | chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, | ||
339 | &descriptors_fops); | ||
340 | if (chan_descr) | ||
341 | chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, | ||
342 | &requester_chan_fops); | ||
343 | if (!chan_reqs) | ||
344 | goto err_state; | ||
345 | 335 | ||
346 | return chan; | 336 | return chan; |
347 | |||
348 | err_state: | ||
349 | debugfs_remove_recursive(chan); | ||
350 | return NULL; | ||
351 | } | 337 | } |
352 | 338 | ||
353 | static void pxad_init_debugfs(struct pxad_device *pdev) | 339 | static void pxad_init_debugfs(struct pxad_device *pdev) |
@@ -355,40 +341,20 @@ static void pxad_init_debugfs(struct pxad_device *pdev) | |||
355 | int i; | 341 | int i; |
356 | struct dentry *chandir; | 342 | struct dentry *chandir; |
357 | 343 | ||
358 | pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); | ||
359 | if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) | ||
360 | goto err_root; | ||
361 | |||
362 | pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, | ||
363 | pdev, &state_fops); | ||
364 | if (!pdev->dbgfs_state) | ||
365 | goto err_state; | ||
366 | |||
367 | pdev->dbgfs_chan = | 344 | pdev->dbgfs_chan = |
368 | kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), | 345 | kmalloc_array(pdev->nr_chans, sizeof(struct dentry *), |
369 | GFP_KERNEL); | 346 | GFP_KERNEL); |
370 | if (!pdev->dbgfs_chan) | 347 | if (!pdev->dbgfs_chan) |
371 | goto err_alloc; | 348 | return; |
349 | |||
350 | pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); | ||
351 | |||
352 | debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops); | ||
372 | 353 | ||
373 | chandir = debugfs_create_dir("channels", pdev->dbgfs_root); | 354 | chandir = debugfs_create_dir("channels", pdev->dbgfs_root); |
374 | if (!chandir) | ||
375 | goto err_chandir; | ||
376 | 355 | ||
377 | for (i = 0; i < pdev->nr_chans; i++) { | 356 | for (i = 0; i < pdev->nr_chans; i++) |
378 | pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); | 357 | pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); |
379 | if (!pdev->dbgfs_chan[i]) | ||
380 | goto err_chans; | ||
381 | } | ||
382 | |||
383 | return; | ||
384 | err_chans: | ||
385 | err_chandir: | ||
386 | kfree(pdev->dbgfs_chan); | ||
387 | err_alloc: | ||
388 | err_state: | ||
389 | debugfs_remove_recursive(pdev->dbgfs_root); | ||
390 | err_root: | ||
391 | pr_err("pxad: debugfs is not available\n"); | ||
392 | } | 358 | } |
393 | 359 | ||
394 | static void pxad_cleanup_debugfs(struct pxad_device *pdev) | 360 | static void pxad_cleanup_debugfs(struct pxad_device *pdev) |
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h index f337e2789ddc..f212466744f3 100644 --- a/drivers/dma/qcom/hidma.h +++ b/drivers/dma/qcom/hidma.h | |||
@@ -93,8 +93,6 @@ struct hidma_chan { | |||
93 | * It is used by the DMA complete notification to | 93 | * It is used by the DMA complete notification to |
94 | * locate the descriptor that initiated the transfer. | 94 | * locate the descriptor that initiated the transfer. |
95 | */ | 95 | */ |
96 | struct dentry *debugfs; | ||
97 | struct dentry *stats; | ||
98 | struct hidma_dev *dmadev; | 96 | struct hidma_dev *dmadev; |
99 | struct hidma_desc *running; | 97 | struct hidma_desc *running; |
100 | 98 | ||
@@ -126,7 +124,6 @@ struct hidma_dev { | |||
126 | struct dma_device ddev; | 124 | struct dma_device ddev; |
127 | 125 | ||
128 | struct dentry *debugfs; | 126 | struct dentry *debugfs; |
129 | struct dentry *stats; | ||
130 | 127 | ||
131 | /* sysfs entry for the channel id */ | 128 | /* sysfs entry for the channel id */ |
132 | struct device_attribute *chid_attrs; | 129 | struct device_attribute *chid_attrs; |
@@ -158,6 +155,6 @@ irqreturn_t hidma_ll_inthandler(int irq, void *arg); | |||
158 | irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); | 155 | irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); |
159 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | 156 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, |
160 | u8 err_code); | 157 | u8 err_code); |
161 | int hidma_debug_init(struct hidma_dev *dmadev); | 158 | void hidma_debug_init(struct hidma_dev *dmadev); |
162 | void hidma_debug_uninit(struct hidma_dev *dmadev); | 159 | void hidma_debug_uninit(struct hidma_dev *dmadev); |
163 | #endif | 160 | #endif |
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index 75b0691a670d..ce87c7937a0e 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c | |||
@@ -138,17 +138,13 @@ void hidma_debug_uninit(struct hidma_dev *dmadev) | |||
138 | debugfs_remove_recursive(dmadev->debugfs); | 138 | debugfs_remove_recursive(dmadev->debugfs); |
139 | } | 139 | } |
140 | 140 | ||
141 | int hidma_debug_init(struct hidma_dev *dmadev) | 141 | void hidma_debug_init(struct hidma_dev *dmadev) |
142 | { | 142 | { |
143 | int rc = 0; | ||
144 | int chidx = 0; | 143 | int chidx = 0; |
145 | struct list_head *position = NULL; | 144 | struct list_head *position = NULL; |
145 | struct dentry *dir; | ||
146 | 146 | ||
147 | dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); | 147 | dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL); |
148 | if (!dmadev->debugfs) { | ||
149 | rc = -ENODEV; | ||
150 | return rc; | ||
151 | } | ||
152 | 148 | ||
153 | /* walk through the virtual channel list */ | 149 | /* walk through the virtual channel list */ |
154 | list_for_each(position, &dmadev->ddev.channels) { | 150 | list_for_each(position, &dmadev->ddev.channels) { |
@@ -157,32 +153,13 @@ int hidma_debug_init(struct hidma_dev *dmadev) | |||
157 | chan = list_entry(position, struct hidma_chan, | 153 | chan = list_entry(position, struct hidma_chan, |
158 | chan.device_node); | 154 | chan.device_node); |
159 | sprintf(chan->dbg_name, "chan%d", chidx); | 155 | sprintf(chan->dbg_name, "chan%d", chidx); |
160 | chan->debugfs = debugfs_create_dir(chan->dbg_name, | 156 | dir = debugfs_create_dir(chan->dbg_name, |
161 | dmadev->debugfs); | 157 | dmadev->debugfs); |
162 | if (!chan->debugfs) { | 158 | debugfs_create_file("stats", S_IRUGO, dir, chan, |
163 | rc = -ENOMEM; | 159 | &hidma_chan_fops); |
164 | goto cleanup; | ||
165 | } | ||
166 | chan->stats = debugfs_create_file("stats", S_IRUGO, | ||
167 | chan->debugfs, chan, | ||
168 | &hidma_chan_fops); | ||
169 | if (!chan->stats) { | ||
170 | rc = -ENOMEM; | ||
171 | goto cleanup; | ||
172 | } | ||
173 | chidx++; | 160 | chidx++; |
174 | } | 161 | } |
175 | 162 | ||
176 | dmadev->stats = debugfs_create_file("stats", S_IRUGO, | 163 | debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev, |
177 | dmadev->debugfs, dmadev, | 164 | &hidma_dma_fops); |
178 | &hidma_dma_fops); | ||
179 | if (!dmadev->stats) { | ||
180 | rc = -ENOMEM; | ||
181 | goto cleanup; | ||
182 | } | ||
183 | |||
184 | return 0; | ||
185 | cleanup: | ||
186 | hidma_debug_uninit(dmadev); | ||
187 | return rc; | ||
188 | } | 165 | } |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 4d6b02b3b1f1..54d5d0369d3c 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -47,9 +47,3 @@ config RENESAS_USB_DMAC | |||
47 | help | 47 | help |
48 | This driver supports the USB-DMA controller found in the Renesas | 48 | This driver supports the USB-DMA controller found in the Renesas |
49 | SoCs. | 49 | SoCs. |
50 | |||
51 | config SUDMAC | ||
52 | tristate "Renesas SUDMAC support" | ||
53 | depends on SH_DMAE_BASE | ||
54 | help | ||
55 | Enable support for the Renesas SUDMAC controllers. | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 42110dd57a56..112fbd22bb3f 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -15,4 +15,3 @@ obj-$(CONFIG_SH_DMAE) += shdma.o | |||
15 | 15 | ||
16 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o | 16 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o |
17 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o | 17 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o |
18 | obj-$(CONFIG_SUDMAC) += sudmac.o | ||
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 33ab1b607e2b..9c41a4e42575 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1165 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | 1165 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); |
1166 | 1166 | ||
1167 | /* Someone calling slave DMA on a generic channel? */ | 1167 | /* Someone calling slave DMA on a generic channel? */ |
1168 | if (rchan->mid_rid < 0 || !sg_len) { | 1168 | if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) { |
1169 | dev_warn(chan->device->dev, | 1169 | dev_warn(chan->device->dev, |
1170 | "%s: bad parameter: len=%d, id=%d\n", | 1170 | "%s: bad parameter: len=%d, id=%d\n", |
1171 | __func__, sg_len, rchan->mid_rid); | 1171 | __func__, sg_len, rchan->mid_rid); |
@@ -1654,8 +1654,7 @@ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) | |||
1654 | * Forcing it to call dma_request_channel() and iterate through all | 1654 | * Forcing it to call dma_request_channel() and iterate through all |
1655 | * channels from all controllers is just pointless. | 1655 | * channels from all controllers is just pointless. |
1656 | */ | 1656 | */ |
1657 | if (chan->device->device_config != rcar_dmac_device_config || | 1657 | if (chan->device->device_config != rcar_dmac_device_config) |
1658 | dma_spec->np != chan->device->dev->of_node) | ||
1659 | return false; | 1658 | return false; |
1660 | 1659 | ||
1661 | return !test_and_set_bit(dma_spec->args[0], dmac->modules); | 1660 | return !test_and_set_bit(dma_spec->args[0], dmac->modules); |
@@ -1675,7 +1674,8 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, | |||
1675 | dma_cap_zero(mask); | 1674 | dma_cap_zero(mask); |
1676 | dma_cap_set(DMA_SLAVE, mask); | 1675 | dma_cap_set(DMA_SLAVE, mask); |
1677 | 1676 | ||
1678 | chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); | 1677 | chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec, |
1678 | ofdma->of_node); | ||
1679 | if (!chan) | 1679 | if (!chan) |
1680 | return NULL; | 1680 | return NULL; |
1681 | 1681 | ||
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c deleted file mode 100644 index 30cc3553cb8b..000000000000 --- a/drivers/dma/sh/sudmac.c +++ /dev/null | |||
@@ -1,414 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Renesas SUDMAC support | ||
4 | * | ||
5 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
6 | * | ||
7 | * based on drivers/dma/sh/shdma.c: | ||
8 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
9 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
10 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
11 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
12 | */ | ||
13 | |||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/sudmac.h> | ||
22 | |||
23 | struct sudmac_chan { | ||
24 | struct shdma_chan shdma_chan; | ||
25 | void __iomem *base; | ||
26 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
27 | |||
28 | u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ | ||
29 | u32 cfg; | ||
30 | u32 dint_end_bit; | ||
31 | }; | ||
32 | |||
33 | struct sudmac_device { | ||
34 | struct shdma_dev shdma_dev; | ||
35 | struct sudmac_pdata *pdata; | ||
36 | void __iomem *chan_reg; | ||
37 | }; | ||
38 | |||
39 | struct sudmac_regs { | ||
40 | u32 base_addr; | ||
41 | u32 base_byte_count; | ||
42 | }; | ||
43 | |||
44 | struct sudmac_desc { | ||
45 | struct sudmac_regs hw; | ||
46 | struct shdma_desc shdma_desc; | ||
47 | }; | ||
48 | |||
49 | #define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) | ||
50 | #define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) | ||
51 | #define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ | ||
52 | struct sudmac_device, shdma_dev.dma_dev) | ||
53 | |||
54 | /* SUDMAC register */ | ||
55 | #define SUDMAC_CH0CFG 0x00 | ||
56 | #define SUDMAC_CH0BA 0x10 | ||
57 | #define SUDMAC_CH0BBC 0x18 | ||
58 | #define SUDMAC_CH0CA 0x20 | ||
59 | #define SUDMAC_CH0CBC 0x28 | ||
60 | #define SUDMAC_CH0DEN 0x30 | ||
61 | #define SUDMAC_DSTSCLR 0x38 | ||
62 | #define SUDMAC_DBUFCTRL 0x3C | ||
63 | #define SUDMAC_DINTCTRL 0x40 | ||
64 | #define SUDMAC_DINTSTS 0x44 | ||
65 | #define SUDMAC_DINTSTSCLR 0x48 | ||
66 | #define SUDMAC_CH0SHCTRL 0x50 | ||
67 | |||
68 | /* Definitions for the sudmac_channel.config */ | ||
69 | #define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ | ||
70 | #define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ | ||
71 | #define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ | ||
72 | |||
73 | /* Definitions for the sudmac_channel.dint_end_bit */ | ||
74 | #define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ | ||
75 | #define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ | ||
76 | |||
77 | #define SUDMAC_DRV_NAME "sudmac" | ||
78 | |||
79 | static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) | ||
80 | { | ||
81 | iowrite32(data, sc->base + reg); | ||
82 | } | ||
83 | |||
84 | static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) | ||
85 | { | ||
86 | return ioread32(sc->base + reg); | ||
87 | } | ||
88 | |||
89 | static bool sudmac_is_busy(struct sudmac_chan *sc) | ||
90 | { | ||
91 | u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); | ||
92 | |||
93 | if (den) | ||
94 | return true; /* working */ | ||
95 | |||
96 | return false; /* waiting */ | ||
97 | } | ||
98 | |||
99 | static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, | ||
100 | struct shdma_desc *sdesc) | ||
101 | { | ||
102 | sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); | ||
103 | sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); | ||
104 | sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); | ||
105 | } | ||
106 | |||
107 | static void sudmac_start(struct sudmac_chan *sc) | ||
108 | { | ||
109 | u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); | ||
110 | |||
111 | sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); | ||
112 | sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); | ||
113 | } | ||
114 | |||
115 | static void sudmac_start_xfer(struct shdma_chan *schan, | ||
116 | struct shdma_desc *sdesc) | ||
117 | { | ||
118 | struct sudmac_chan *sc = to_chan(schan); | ||
119 | struct sudmac_desc *sd = to_desc(sdesc); | ||
120 | |||
121 | sudmac_set_reg(sc, &sd->hw, sdesc); | ||
122 | sudmac_start(sc); | ||
123 | } | ||
124 | |||
125 | static bool sudmac_channel_busy(struct shdma_chan *schan) | ||
126 | { | ||
127 | struct sudmac_chan *sc = to_chan(schan); | ||
128 | |||
129 | return sudmac_is_busy(sc); | ||
130 | } | ||
131 | |||
132 | static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) | ||
133 | { | ||
134 | } | ||
135 | |||
136 | static const struct sudmac_slave_config *sudmac_find_slave( | ||
137 | struct sudmac_chan *sc, int slave_id) | ||
138 | { | ||
139 | struct sudmac_device *sdev = to_sdev(sc); | ||
140 | struct sudmac_pdata *pdata = sdev->pdata; | ||
141 | const struct sudmac_slave_config *cfg; | ||
142 | int i; | ||
143 | |||
144 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
145 | if (cfg->slave_id == slave_id) | ||
146 | return cfg; | ||
147 | |||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, | ||
152 | dma_addr_t slave_addr, bool try) | ||
153 | { | ||
154 | struct sudmac_chan *sc = to_chan(schan); | ||
155 | const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); | ||
156 | |||
157 | if (!cfg) | ||
158 | return -ENODEV; | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static inline void sudmac_dma_halt(struct sudmac_chan *sc) | ||
164 | { | ||
165 | u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); | ||
166 | |||
167 | sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); | ||
168 | sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); | ||
169 | sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); | ||
170 | } | ||
171 | |||
172 | static int sudmac_desc_setup(struct shdma_chan *schan, | ||
173 | struct shdma_desc *sdesc, | ||
174 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
175 | { | ||
176 | struct sudmac_chan *sc = to_chan(schan); | ||
177 | struct sudmac_desc *sd = to_desc(sdesc); | ||
178 | |||
179 | dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n", | ||
180 | __func__, &src, &dst, *len); | ||
181 | |||
182 | if (*len > schan->max_xfer_len) | ||
183 | *len = schan->max_xfer_len; | ||
184 | |||
185 | if (dst) | ||
186 | sd->hw.base_addr = dst; | ||
187 | else if (src) | ||
188 | sd->hw.base_addr = src; | ||
189 | sd->hw.base_byte_count = *len; | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static void sudmac_halt(struct shdma_chan *schan) | ||
195 | { | ||
196 | struct sudmac_chan *sc = to_chan(schan); | ||
197 | |||
198 | sudmac_dma_halt(sc); | ||
199 | } | ||
200 | |||
201 | static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) | ||
202 | { | ||
203 | struct sudmac_chan *sc = to_chan(schan); | ||
204 | u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); | ||
205 | |||
206 | if (!(dintsts & sc->dint_end_bit)) | ||
207 | return false; | ||
208 | |||
209 | /* DMA stop */ | ||
210 | sudmac_dma_halt(sc); | ||
211 | |||
212 | return true; | ||
213 | } | ||
214 | |||
215 | static size_t sudmac_get_partial(struct shdma_chan *schan, | ||
216 | struct shdma_desc *sdesc) | ||
217 | { | ||
218 | struct sudmac_chan *sc = to_chan(schan); | ||
219 | struct sudmac_desc *sd = to_desc(sdesc); | ||
220 | u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); | ||
221 | |||
222 | return sd->hw.base_byte_count - current_byte_count; | ||
223 | } | ||
224 | |||
225 | static bool sudmac_desc_completed(struct shdma_chan *schan, | ||
226 | struct shdma_desc *sdesc) | ||
227 | { | ||
228 | struct sudmac_chan *sc = to_chan(schan); | ||
229 | struct sudmac_desc *sd = to_desc(sdesc); | ||
230 | u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); | ||
231 | |||
232 | return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; | ||
233 | } | ||
234 | |||
235 | static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, | ||
236 | unsigned long flags) | ||
237 | { | ||
238 | struct shdma_dev *sdev = &su_dev->shdma_dev; | ||
239 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); | ||
240 | struct sudmac_chan *sc; | ||
241 | struct shdma_chan *schan; | ||
242 | int err; | ||
243 | |||
244 | sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); | ||
245 | if (!sc) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | schan = &sc->shdma_chan; | ||
249 | schan->max_xfer_len = 64 * 1024 * 1024 - 1; | ||
250 | |||
251 | shdma_chan_probe(sdev, schan, id); | ||
252 | |||
253 | sc->base = su_dev->chan_reg; | ||
254 | |||
255 | /* get platform_data */ | ||
256 | sc->offset = su_dev->pdata->channel->offset; | ||
257 | if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) | ||
258 | sc->cfg |= SUDMAC_SENDBUFM; | ||
259 | if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) | ||
260 | sc->cfg |= SUDMAC_RCVENDM; | ||
261 | sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; | ||
262 | |||
263 | if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) | ||
264 | sc->dint_end_bit |= SUDMAC_CH0ENDE; | ||
265 | if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) | ||
266 | sc->dint_end_bit |= SUDMAC_CH1ENDE; | ||
267 | |||
268 | /* set up channel irq */ | ||
269 | if (pdev->id >= 0) | ||
270 | snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", | ||
271 | pdev->id, id); | ||
272 | else | ||
273 | snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); | ||
274 | |||
275 | err = shdma_request_irq(schan, irq, flags, sc->dev_id); | ||
276 | if (err) { | ||
277 | dev_err(sdev->dma_dev.dev, | ||
278 | "DMA channel %d request_irq failed %d\n", id, err); | ||
279 | goto err_no_irq; | ||
280 | } | ||
281 | |||
282 | return 0; | ||
283 | |||
284 | err_no_irq: | ||
285 | /* remove from dmaengine device node */ | ||
286 | shdma_chan_remove(schan); | ||
287 | return err; | ||
288 | } | ||
289 | |||
290 | static void sudmac_chan_remove(struct sudmac_device *su_dev) | ||
291 | { | ||
292 | struct shdma_chan *schan; | ||
293 | int i; | ||
294 | |||
295 | shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { | ||
296 | BUG_ON(!schan); | ||
297 | |||
298 | shdma_chan_remove(schan); | ||
299 | } | ||
300 | } | ||
301 | |||
302 | static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) | ||
303 | { | ||
304 | /* SUDMAC doesn't need the address */ | ||
305 | return 0; | ||
306 | } | ||
307 | |||
308 | static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) | ||
309 | { | ||
310 | return &((struct sudmac_desc *)buf)[i].shdma_desc; | ||
311 | } | ||
312 | |||
313 | static const struct shdma_ops sudmac_shdma_ops = { | ||
314 | .desc_completed = sudmac_desc_completed, | ||
315 | .halt_channel = sudmac_halt, | ||
316 | .channel_busy = sudmac_channel_busy, | ||
317 | .slave_addr = sudmac_slave_addr, | ||
318 | .desc_setup = sudmac_desc_setup, | ||
319 | .set_slave = sudmac_set_slave, | ||
320 | .setup_xfer = sudmac_setup_xfer, | ||
321 | .start_xfer = sudmac_start_xfer, | ||
322 | .embedded_desc = sudmac_embedded_desc, | ||
323 | .chan_irq = sudmac_chan_irq, | ||
324 | .get_partial = sudmac_get_partial, | ||
325 | }; | ||
326 | |||
327 | static int sudmac_probe(struct platform_device *pdev) | ||
328 | { | ||
329 | struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev); | ||
330 | int err, i; | ||
331 | struct sudmac_device *su_dev; | ||
332 | struct dma_device *dma_dev; | ||
333 | struct resource *chan, *irq_res; | ||
334 | |||
335 | /* get platform data */ | ||
336 | if (!pdata) | ||
337 | return -ENODEV; | ||
338 | |||
339 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
340 | if (!irq_res) | ||
341 | return -ENODEV; | ||
342 | |||
343 | err = -ENOMEM; | ||
344 | su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), | ||
345 | GFP_KERNEL); | ||
346 | if (!su_dev) | ||
347 | return err; | ||
348 | |||
349 | dma_dev = &su_dev->shdma_dev.dma_dev; | ||
350 | |||
351 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
352 | su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); | ||
353 | if (IS_ERR(su_dev->chan_reg)) | ||
354 | return PTR_ERR(su_dev->chan_reg); | ||
355 | |||
356 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
357 | |||
358 | su_dev->shdma_dev.ops = &sudmac_shdma_ops; | ||
359 | su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); | ||
360 | err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); | ||
361 | if (err < 0) | ||
362 | return err; | ||
363 | |||
364 | /* platform data */ | ||
365 | su_dev->pdata = dev_get_platdata(&pdev->dev); | ||
366 | |||
367 | platform_set_drvdata(pdev, su_dev); | ||
368 | |||
369 | /* Create DMA Channel */ | ||
370 | for (i = 0; i < pdata->channel_num; i++) { | ||
371 | err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); | ||
372 | if (err) | ||
373 | goto chan_probe_err; | ||
374 | } | ||
375 | |||
376 | err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); | ||
377 | if (err < 0) | ||
378 | goto chan_probe_err; | ||
379 | |||
380 | return err; | ||
381 | |||
382 | chan_probe_err: | ||
383 | sudmac_chan_remove(su_dev); | ||
384 | |||
385 | shdma_cleanup(&su_dev->shdma_dev); | ||
386 | |||
387 | return err; | ||
388 | } | ||
389 | |||
390 | static int sudmac_remove(struct platform_device *pdev) | ||
391 | { | ||
392 | struct sudmac_device *su_dev = platform_get_drvdata(pdev); | ||
393 | struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; | ||
394 | |||
395 | dma_async_device_unregister(dma_dev); | ||
396 | sudmac_chan_remove(su_dev); | ||
397 | shdma_cleanup(&su_dev->shdma_dev); | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static struct platform_driver sudmac_driver = { | ||
403 | .driver = { | ||
404 | .name = SUDMAC_DRV_NAME, | ||
405 | }, | ||
406 | .probe = sudmac_probe, | ||
407 | .remove = sudmac_remove, | ||
408 | }; | ||
409 | module_platform_driver(sudmac_driver); | ||
410 | |||
411 | MODULE_AUTHOR("Yoshihiro Shimoda"); | ||
412 | MODULE_DESCRIPTION("Renesas SUDMAC driver"); | ||
413 | MODULE_LICENSE("GPL v2"); | ||
414 | MODULE_ALIAS("platform:" SUDMAC_DRV_NAME); | ||
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 59403f6d008a..17063aaf51bc 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -57,7 +57,7 @@ struct usb_dmac_desc { | |||
57 | u32 residue; | 57 | u32 residue; |
58 | struct list_head node; | 58 | struct list_head node; |
59 | dma_cookie_t done_cookie; | 59 | dma_cookie_t done_cookie; |
60 | struct usb_dmac_sg sg[0]; | 60 | struct usb_dmac_sg sg[]; |
61 | }; | 61 | }; |
62 | 62 | ||
63 | #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) | 63 | #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) |
@@ -636,9 +636,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg) | |||
636 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | 636 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); |
637 | struct of_phandle_args *dma_spec = arg; | 637 | struct of_phandle_args *dma_spec = arg; |
638 | 638 | ||
639 | if (dma_spec->np != chan->device->dev->of_node) | ||
640 | return false; | ||
641 | |||
642 | /* USB-DMAC should be used with fixed usb controller's FIFO */ | 639 | /* USB-DMAC should be used with fixed usb controller's FIFO */ |
643 | if (uchan->index != dma_spec->args[0]) | 640 | if (uchan->index != dma_spec->args[0]) |
644 | return false; | 641 | return false; |
@@ -659,7 +656,8 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, | |||
659 | dma_cap_zero(mask); | 656 | dma_cap_zero(mask); |
660 | dma_cap_set(DMA_SLAVE, mask); | 657 | dma_cap_set(DMA_SLAVE, mask); |
661 | 658 | ||
662 | chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec); | 659 | chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec, |
660 | ofdma->of_node); | ||
663 | if (!chan) | 661 | if (!chan) |
664 | return NULL; | 662 | return NULL; |
665 | 663 | ||
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index da41bab98f5b..ef4d109e7189 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c | |||
@@ -1365,7 +1365,6 @@ static int stm32_dma_probe(struct platform_device *pdev) | |||
1365 | 1365 | ||
1366 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | 1366 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { |
1367 | chan = &dmadev->chan[i]; | 1367 | chan = &dmadev->chan[i]; |
1368 | chan->irq = platform_get_irq(pdev, i); | ||
1369 | ret = platform_get_irq(pdev, i); | 1368 | ret = platform_get_irq(pdev, i); |
1370 | if (ret < 0) { | 1369 | if (ret < 0) { |
1371 | if (ret != -EPROBE_DEFER) | 1370 | if (ret != -EPROBE_DEFER) |
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index 715aad7a9192..b552949da14b 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c | |||
@@ -295,8 +295,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) | |||
295 | #ifdef CONFIG_PM | 295 | #ifdef CONFIG_PM |
296 | static int stm32_dmamux_runtime_suspend(struct device *dev) | 296 | static int stm32_dmamux_runtime_suspend(struct device *dev) |
297 | { | 297 | { |
298 | struct platform_device *pdev = | 298 | struct platform_device *pdev = to_platform_device(dev); |
299 | container_of(dev, struct platform_device, dev); | ||
300 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); | 299 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); |
301 | 300 | ||
302 | clk_disable_unprepare(stm32_dmamux->clk); | 301 | clk_disable_unprepare(stm32_dmamux->clk); |
@@ -306,8 +305,7 @@ static int stm32_dmamux_runtime_suspend(struct device *dev) | |||
306 | 305 | ||
307 | static int stm32_dmamux_runtime_resume(struct device *dev) | 306 | static int stm32_dmamux_runtime_resume(struct device *dev) |
308 | { | 307 | { |
309 | struct platform_device *pdev = | 308 | struct platform_device *pdev = to_platform_device(dev); |
310 | container_of(dev, struct platform_device, dev); | ||
311 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); | 309 | struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); |
312 | int ret; | 310 | int ret; |
313 | 311 | ||
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index e8fcc69b1de9..ed5b68dcfe50 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -64,17 +64,20 @@ | |||
64 | #define DMA_CHAN_LLI_ADDR 0x08 | 64 | #define DMA_CHAN_LLI_ADDR 0x08 |
65 | 65 | ||
66 | #define DMA_CHAN_CUR_CFG 0x0c | 66 | #define DMA_CHAN_CUR_CFG 0x0c |
67 | #define DMA_CHAN_MAX_DRQ 0x1f | 67 | #define DMA_CHAN_MAX_DRQ_A31 0x1f |
68 | #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ) | 68 | #define DMA_CHAN_MAX_DRQ_H6 0x3f |
69 | #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) | 69 | #define DMA_CHAN_CFG_SRC_DRQ_A31(x) ((x) & DMA_CHAN_MAX_DRQ_A31) |
70 | #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) | 70 | #define DMA_CHAN_CFG_SRC_DRQ_H6(x) ((x) & DMA_CHAN_MAX_DRQ_H6) |
71 | #define DMA_CHAN_CFG_SRC_MODE_A31(x) (((x) & 0x1) << 5) | ||
72 | #define DMA_CHAN_CFG_SRC_MODE_H6(x) (((x) & 0x1) << 8) | ||
71 | #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) | 73 | #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) |
72 | #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) | 74 | #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) |
73 | #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) | 75 | #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) |
74 | 76 | ||
75 | #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) | 77 | #define DMA_CHAN_CFG_DST_DRQ_A31(x) (DMA_CHAN_CFG_SRC_DRQ_A31(x) << 16) |
76 | #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) | 78 | #define DMA_CHAN_CFG_DST_DRQ_H6(x) (DMA_CHAN_CFG_SRC_DRQ_H6(x) << 16) |
77 | #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) | 79 | #define DMA_CHAN_CFG_DST_MODE_A31(x) (DMA_CHAN_CFG_SRC_MODE_A31(x) << 16) |
80 | #define DMA_CHAN_CFG_DST_MODE_H6(x) (DMA_CHAN_CFG_SRC_MODE_H6(x) << 16) | ||
78 | #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) | 81 | #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) |
79 | #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) | 82 | #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) |
80 | #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) | 83 | #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) |
@@ -94,6 +97,8 @@ | |||
94 | #define LLI_LAST_ITEM 0xfffff800 | 97 | #define LLI_LAST_ITEM 0xfffff800 |
95 | #define NORMAL_WAIT 8 | 98 | #define NORMAL_WAIT 8 |
96 | #define DRQ_SDRAM 1 | 99 | #define DRQ_SDRAM 1 |
100 | #define LINEAR_MODE 0 | ||
101 | #define IO_MODE 1 | ||
97 | 102 | ||
98 | /* forward declaration */ | 103 | /* forward declaration */ |
99 | struct sun6i_dma_dev; | 104 | struct sun6i_dma_dev; |
@@ -121,10 +126,13 @@ struct sun6i_dma_config { | |||
121 | */ | 126 | */ |
122 | void (*clock_autogate_enable)(struct sun6i_dma_dev *); | 127 | void (*clock_autogate_enable)(struct sun6i_dma_dev *); |
123 | void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); | 128 | void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); |
129 | void (*set_drq)(u32 *p_cfg, s8 src_drq, s8 dst_drq); | ||
130 | void (*set_mode)(u32 *p_cfg, s8 src_mode, s8 dst_mode); | ||
124 | u32 src_burst_lengths; | 131 | u32 src_burst_lengths; |
125 | u32 dst_burst_lengths; | 132 | u32 dst_burst_lengths; |
126 | u32 src_addr_widths; | 133 | u32 src_addr_widths; |
127 | u32 dst_addr_widths; | 134 | u32 dst_addr_widths; |
135 | bool has_mbus_clk; | ||
128 | }; | 136 | }; |
129 | 137 | ||
130 | /* | 138 | /* |
@@ -178,6 +186,7 @@ struct sun6i_dma_dev { | |||
178 | struct dma_device slave; | 186 | struct dma_device slave; |
179 | void __iomem *base; | 187 | void __iomem *base; |
180 | struct clk *clk; | 188 | struct clk *clk; |
189 | struct clk *clk_mbus; | ||
181 | int irq; | 190 | int irq; |
182 | spinlock_t lock; | 191 | spinlock_t lock; |
183 | struct reset_control *rstc; | 192 | struct reset_control *rstc; |
@@ -305,6 +314,30 @@ static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) | |||
305 | DMA_CHAN_CFG_DST_BURST_H3(dst_burst); | 314 | DMA_CHAN_CFG_DST_BURST_H3(dst_burst); |
306 | } | 315 | } |
307 | 316 | ||
317 | static void sun6i_set_drq_a31(u32 *p_cfg, s8 src_drq, s8 dst_drq) | ||
318 | { | ||
319 | *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_A31(src_drq) | | ||
320 | DMA_CHAN_CFG_DST_DRQ_A31(dst_drq); | ||
321 | } | ||
322 | |||
323 | static void sun6i_set_drq_h6(u32 *p_cfg, s8 src_drq, s8 dst_drq) | ||
324 | { | ||
325 | *p_cfg |= DMA_CHAN_CFG_SRC_DRQ_H6(src_drq) | | ||
326 | DMA_CHAN_CFG_DST_DRQ_H6(dst_drq); | ||
327 | } | ||
328 | |||
329 | static void sun6i_set_mode_a31(u32 *p_cfg, s8 src_mode, s8 dst_mode) | ||
330 | { | ||
331 | *p_cfg |= DMA_CHAN_CFG_SRC_MODE_A31(src_mode) | | ||
332 | DMA_CHAN_CFG_DST_MODE_A31(dst_mode); | ||
333 | } | ||
334 | |||
335 | static void sun6i_set_mode_h6(u32 *p_cfg, s8 src_mode, s8 dst_mode) | ||
336 | { | ||
337 | *p_cfg |= DMA_CHAN_CFG_SRC_MODE_H6(src_mode) | | ||
338 | DMA_CHAN_CFG_DST_MODE_H6(dst_mode); | ||
339 | } | ||
340 | |||
308 | static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) | 341 | static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) |
309 | { | 342 | { |
310 | struct sun6i_desc *txd = pchan->desc; | 343 | struct sun6i_desc *txd = pchan->desc; |
@@ -628,14 +661,12 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( | |||
628 | 661 | ||
629 | burst = convert_burst(8); | 662 | burst = convert_burst(8); |
630 | width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); | 663 | width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); |
631 | v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | 664 | v_lli->cfg = DMA_CHAN_CFG_SRC_WIDTH(width) | |
632 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | ||
633 | DMA_CHAN_CFG_DST_LINEAR_MODE | | ||
634 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | ||
635 | DMA_CHAN_CFG_SRC_WIDTH(width) | | ||
636 | DMA_CHAN_CFG_DST_WIDTH(width); | 665 | DMA_CHAN_CFG_DST_WIDTH(width); |
637 | 666 | ||
638 | sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); | 667 | sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); |
668 | sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, DRQ_SDRAM); | ||
669 | sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, LINEAR_MODE); | ||
639 | 670 | ||
640 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); | 671 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); |
641 | 672 | ||
@@ -687,11 +718,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | |||
687 | if (dir == DMA_MEM_TO_DEV) { | 718 | if (dir == DMA_MEM_TO_DEV) { |
688 | v_lli->src = sg_dma_address(sg); | 719 | v_lli->src = sg_dma_address(sg); |
689 | v_lli->dst = sconfig->dst_addr; | 720 | v_lli->dst = sconfig->dst_addr; |
690 | v_lli->cfg = lli_cfg | | 721 | v_lli->cfg = lli_cfg; |
691 | DMA_CHAN_CFG_DST_IO_MODE | | 722 | sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); |
692 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 723 | sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); |
693 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | ||
694 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | ||
695 | 724 | ||
696 | dev_dbg(chan2dev(chan), | 725 | dev_dbg(chan2dev(chan), |
697 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", | 726 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", |
@@ -702,11 +731,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | |||
702 | } else { | 731 | } else { |
703 | v_lli->src = sconfig->src_addr; | 732 | v_lli->src = sconfig->src_addr; |
704 | v_lli->dst = sg_dma_address(sg); | 733 | v_lli->dst = sg_dma_address(sg); |
705 | v_lli->cfg = lli_cfg | | 734 | v_lli->cfg = lli_cfg; |
706 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 735 | sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); |
707 | DMA_CHAN_CFG_SRC_IO_MODE | | 736 | sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); |
708 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | ||
709 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | ||
710 | 737 | ||
711 | dev_dbg(chan2dev(chan), | 738 | dev_dbg(chan2dev(chan), |
712 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", | 739 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", |
@@ -772,19 +799,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( | |||
772 | if (dir == DMA_MEM_TO_DEV) { | 799 | if (dir == DMA_MEM_TO_DEV) { |
773 | v_lli->src = buf_addr + period_len * i; | 800 | v_lli->src = buf_addr + period_len * i; |
774 | v_lli->dst = sconfig->dst_addr; | 801 | v_lli->dst = sconfig->dst_addr; |
775 | v_lli->cfg = lli_cfg | | 802 | v_lli->cfg = lli_cfg; |
776 | DMA_CHAN_CFG_DST_IO_MODE | | 803 | sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); |
777 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 804 | sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); |
778 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | ||
779 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | ||
780 | } else { | 805 | } else { |
781 | v_lli->src = sconfig->src_addr; | 806 | v_lli->src = sconfig->src_addr; |
782 | v_lli->dst = buf_addr + period_len * i; | 807 | v_lli->dst = buf_addr + period_len * i; |
783 | v_lli->cfg = lli_cfg | | 808 | v_lli->cfg = lli_cfg; |
784 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 809 | sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); |
785 | DMA_CHAN_CFG_SRC_IO_MODE | | 810 | sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); |
786 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | ||
787 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | ||
788 | } | 811 | } |
789 | 812 | ||
790 | prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); | 813 | prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); |
@@ -1049,6 +1072,8 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { | |||
1049 | .nr_max_requests = 30, | 1072 | .nr_max_requests = 30, |
1050 | .nr_max_vchans = 53, | 1073 | .nr_max_vchans = 53, |
1051 | .set_burst_length = sun6i_set_burst_length_a31, | 1074 | .set_burst_length = sun6i_set_burst_length_a31, |
1075 | .set_drq = sun6i_set_drq_a31, | ||
1076 | .set_mode = sun6i_set_mode_a31, | ||
1052 | .src_burst_lengths = BIT(1) | BIT(8), | 1077 | .src_burst_lengths = BIT(1) | BIT(8), |
1053 | .dst_burst_lengths = BIT(1) | BIT(8), | 1078 | .dst_burst_lengths = BIT(1) | BIT(8), |
1054 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1079 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
@@ -1070,6 +1095,8 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { | |||
1070 | .nr_max_vchans = 37, | 1095 | .nr_max_vchans = 37, |
1071 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | 1096 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, |
1072 | .set_burst_length = sun6i_set_burst_length_a31, | 1097 | .set_burst_length = sun6i_set_burst_length_a31, |
1098 | .set_drq = sun6i_set_drq_a31, | ||
1099 | .set_mode = sun6i_set_mode_a31, | ||
1073 | .src_burst_lengths = BIT(1) | BIT(8), | 1100 | .src_burst_lengths = BIT(1) | BIT(8), |
1074 | .dst_burst_lengths = BIT(1) | BIT(8), | 1101 | .dst_burst_lengths = BIT(1) | BIT(8), |
1075 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1102 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
@@ -1086,6 +1113,8 @@ static struct sun6i_dma_config sun8i_a83t_dma_cfg = { | |||
1086 | .nr_max_vchans = 39, | 1113 | .nr_max_vchans = 39, |
1087 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | 1114 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, |
1088 | .set_burst_length = sun6i_set_burst_length_a31, | 1115 | .set_burst_length = sun6i_set_burst_length_a31, |
1116 | .set_drq = sun6i_set_drq_a31, | ||
1117 | .set_mode = sun6i_set_mode_a31, | ||
1089 | .src_burst_lengths = BIT(1) | BIT(8), | 1118 | .src_burst_lengths = BIT(1) | BIT(8), |
1090 | .dst_burst_lengths = BIT(1) | BIT(8), | 1119 | .dst_burst_lengths = BIT(1) | BIT(8), |
1091 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1120 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
@@ -1109,6 +1138,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { | |||
1109 | .nr_max_vchans = 34, | 1138 | .nr_max_vchans = 34, |
1110 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | 1139 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, |
1111 | .set_burst_length = sun6i_set_burst_length_h3, | 1140 | .set_burst_length = sun6i_set_burst_length_h3, |
1141 | .set_drq = sun6i_set_drq_a31, | ||
1142 | .set_mode = sun6i_set_mode_a31, | ||
1112 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 1143 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), |
1113 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 1144 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), |
1114 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1145 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
@@ -1128,6 +1159,8 @@ static struct sun6i_dma_config sun8i_h3_dma_cfg = { | |||
1128 | static struct sun6i_dma_config sun50i_a64_dma_cfg = { | 1159 | static struct sun6i_dma_config sun50i_a64_dma_cfg = { |
1129 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | 1160 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, |
1130 | .set_burst_length = sun6i_set_burst_length_h3, | 1161 | .set_burst_length = sun6i_set_burst_length_h3, |
1162 | .set_drq = sun6i_set_drq_a31, | ||
1163 | .set_mode = sun6i_set_mode_a31, | ||
1131 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 1164 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), |
1132 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | 1165 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), |
1133 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1166 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
@@ -1141,6 +1174,28 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { | |||
1141 | }; | 1174 | }; |
1142 | 1175 | ||
1143 | /* | 1176 | /* |
1177 | * The H6 binding uses the number of dma channels from the | ||
1178 | * device tree node. | ||
1179 | */ | ||
1180 | static struct sun6i_dma_config sun50i_h6_dma_cfg = { | ||
1181 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | ||
1182 | .set_burst_length = sun6i_set_burst_length_h3, | ||
1183 | .set_drq = sun6i_set_drq_h6, | ||
1184 | .set_mode = sun6i_set_mode_h6, | ||
1185 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | ||
1186 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | ||
1187 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1188 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1189 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1190 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | ||
1191 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1192 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1193 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1194 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | ||
1195 | .has_mbus_clk = true, | ||
1196 | }; | ||
1197 | |||
1198 | /* | ||
1144 | * The V3s have only 8 physical channels, a maximum DRQ port id of 23, | 1199 | * The V3s have only 8 physical channels, a maximum DRQ port id of 23, |
1145 | * and a total of 24 usable source and destination endpoints. | 1200 | * and a total of 24 usable source and destination endpoints. |
1146 | */ | 1201 | */ |
@@ -1151,6 +1206,8 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { | |||
1151 | .nr_max_vchans = 24, | 1206 | .nr_max_vchans = 24, |
1152 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | 1207 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, |
1153 | .set_burst_length = sun6i_set_burst_length_a31, | 1208 | .set_burst_length = sun6i_set_burst_length_a31, |
1209 | .set_drq = sun6i_set_drq_a31, | ||
1210 | .set_mode = sun6i_set_mode_a31, | ||
1154 | .src_burst_lengths = BIT(1) | BIT(8), | 1211 | .src_burst_lengths = BIT(1) | BIT(8), |
1155 | .dst_burst_lengths = BIT(1) | BIT(8), | 1212 | .dst_burst_lengths = BIT(1) | BIT(8), |
1156 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1213 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
@@ -1168,6 +1225,7 @@ static const struct of_device_id sun6i_dma_match[] = { | |||
1168 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, | 1225 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, |
1169 | { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, | 1226 | { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, |
1170 | { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, | 1227 | { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, |
1228 | { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, | ||
1171 | { /* sentinel */ } | 1229 | { /* sentinel */ } |
1172 | }; | 1230 | }; |
1173 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); | 1231 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); |
@@ -1204,6 +1262,14 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1204 | return PTR_ERR(sdc->clk); | 1262 | return PTR_ERR(sdc->clk); |
1205 | } | 1263 | } |
1206 | 1264 | ||
1265 | if (sdc->cfg->has_mbus_clk) { | ||
1266 | sdc->clk_mbus = devm_clk_get(&pdev->dev, "mbus"); | ||
1267 | if (IS_ERR(sdc->clk_mbus)) { | ||
1268 | dev_err(&pdev->dev, "No mbus clock specified\n"); | ||
1269 | return PTR_ERR(sdc->clk_mbus); | ||
1270 | } | ||
1271 | } | ||
1272 | |||
1207 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); | 1273 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); |
1208 | if (IS_ERR(sdc->rstc)) { | 1274 | if (IS_ERR(sdc->rstc)) { |
1209 | dev_err(&pdev->dev, "No reset controller specified\n"); | 1275 | dev_err(&pdev->dev, "No reset controller specified\n"); |
@@ -1258,8 +1324,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1258 | ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); | 1324 | ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); |
1259 | if (ret && !sdc->max_request) { | 1325 | if (ret && !sdc->max_request) { |
1260 | dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", | 1326 | dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", |
1261 | DMA_CHAN_MAX_DRQ); | 1327 | DMA_CHAN_MAX_DRQ_A31); |
1262 | sdc->max_request = DMA_CHAN_MAX_DRQ; | 1328 | sdc->max_request = DMA_CHAN_MAX_DRQ_A31; |
1263 | } | 1329 | } |
1264 | 1330 | ||
1265 | /* | 1331 | /* |
@@ -1308,11 +1374,19 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1308 | goto err_reset_assert; | 1374 | goto err_reset_assert; |
1309 | } | 1375 | } |
1310 | 1376 | ||
1377 | if (sdc->cfg->has_mbus_clk) { | ||
1378 | ret = clk_prepare_enable(sdc->clk_mbus); | ||
1379 | if (ret) { | ||
1380 | dev_err(&pdev->dev, "Couldn't enable mbus clock\n"); | ||
1381 | goto err_clk_disable; | ||
1382 | } | ||
1383 | } | ||
1384 | |||
1311 | ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, | 1385 | ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, |
1312 | dev_name(&pdev->dev), sdc); | 1386 | dev_name(&pdev->dev), sdc); |
1313 | if (ret) { | 1387 | if (ret) { |
1314 | dev_err(&pdev->dev, "Cannot request IRQ\n"); | 1388 | dev_err(&pdev->dev, "Cannot request IRQ\n"); |
1315 | goto err_clk_disable; | 1389 | goto err_mbus_clk_disable; |
1316 | } | 1390 | } |
1317 | 1391 | ||
1318 | ret = dma_async_device_register(&sdc->slave); | 1392 | ret = dma_async_device_register(&sdc->slave); |
@@ -1337,6 +1411,8 @@ err_dma_unregister: | |||
1337 | dma_async_device_unregister(&sdc->slave); | 1411 | dma_async_device_unregister(&sdc->slave); |
1338 | err_irq_disable: | 1412 | err_irq_disable: |
1339 | sun6i_kill_tasklet(sdc); | 1413 | sun6i_kill_tasklet(sdc); |
1414 | err_mbus_clk_disable: | ||
1415 | clk_disable_unprepare(sdc->clk_mbus); | ||
1340 | err_clk_disable: | 1416 | err_clk_disable: |
1341 | clk_disable_unprepare(sdc->clk); | 1417 | clk_disable_unprepare(sdc->clk); |
1342 | err_reset_assert: | 1418 | err_reset_assert: |
@@ -1355,6 +1431,7 @@ static int sun6i_dma_remove(struct platform_device *pdev) | |||
1355 | 1431 | ||
1356 | sun6i_kill_tasklet(sdc); | 1432 | sun6i_kill_tasklet(sdc); |
1357 | 1433 | ||
1434 | clk_disable_unprepare(sdc->clk_mbus); | ||
1358 | clk_disable_unprepare(sdc->clk); | 1435 | clk_disable_unprepare(sdc->clk); |
1359 | reset_control_assert(sdc->rstc); | 1436 | reset_control_assert(sdc->rstc); |
1360 | 1437 | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index ef317c90fbe1..79e9593815f1 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -977,8 +977,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
977 | csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | 977 | csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; |
978 | } | 978 | } |
979 | 979 | ||
980 | if (flags & DMA_PREP_INTERRUPT) | 980 | if (flags & DMA_PREP_INTERRUPT) { |
981 | csr |= TEGRA_APBDMA_CSR_IE_EOC; | 981 | csr |= TEGRA_APBDMA_CSR_IE_EOC; |
982 | } else { | ||
983 | WARN_ON_ONCE(1); | ||
984 | return NULL; | ||
985 | } | ||
982 | 986 | ||
983 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | 987 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; |
984 | 988 | ||
@@ -1120,8 +1124,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | |||
1120 | csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; | 1124 | csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; |
1121 | } | 1125 | } |
1122 | 1126 | ||
1123 | if (flags & DMA_PREP_INTERRUPT) | 1127 | if (flags & DMA_PREP_INTERRUPT) { |
1124 | csr |= TEGRA_APBDMA_CSR_IE_EOC; | 1128 | csr |= TEGRA_APBDMA_CSR_IE_EOC; |
1129 | } else { | ||
1130 | WARN_ON_ONCE(1); | ||
1131 | return NULL; | ||
1132 | } | ||
1125 | 1133 | ||
1126 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; | 1134 | apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; |
1127 | 1135 | ||
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index bb5390847257..ec4adf4260a0 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c | |||
@@ -98,7 +98,7 @@ static void vchan_complete(unsigned long arg) | |||
98 | } | 98 | } |
99 | spin_unlock_irq(&vc->lock); | 99 | spin_unlock_irq(&vc->lock); |
100 | 100 | ||
101 | dmaengine_desc_callback_invoke(&cb, NULL); | 101 | dmaengine_desc_callback_invoke(&cb, &vd->tx_result); |
102 | 102 | ||
103 | list_for_each_entry_safe(vd, _vd, &head, node) { | 103 | list_for_each_entry_safe(vd, _vd, &head, node) { |
104 | dmaengine_desc_get_callback(&vd->tx, &cb); | 104 | dmaengine_desc_get_callback(&vd->tx, &cb); |
@@ -106,7 +106,7 @@ static void vchan_complete(unsigned long arg) | |||
106 | list_del(&vd->node); | 106 | list_del(&vd->node); |
107 | vchan_vdesc_fini(vd); | 107 | vchan_vdesc_fini(vd); |
108 | 108 | ||
109 | dmaengine_desc_callback_invoke(&cb, NULL); | 109 | dmaengine_desc_callback_invoke(&cb, &vd->tx_result); |
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 23342ca23d4a..ab158bac03a7 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | struct virt_dma_desc { | 15 | struct virt_dma_desc { |
16 | struct dma_async_tx_descriptor tx; | 16 | struct dma_async_tx_descriptor tx; |
17 | struct dmaengine_result tx_result; | ||
17 | /* protected by vc.lock */ | 18 | /* protected by vc.lock */ |
18 | struct list_head node; | 19 | struct list_head node; |
19 | }; | 20 | }; |
@@ -62,6 +63,9 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan | |||
62 | vd->tx.tx_submit = vchan_tx_submit; | 63 | vd->tx.tx_submit = vchan_tx_submit; |
63 | vd->tx.desc_free = vchan_tx_desc_free; | 64 | vd->tx.desc_free = vchan_tx_desc_free; |
64 | 65 | ||
66 | vd->tx_result.result = DMA_TRANS_NOERROR; | ||
67 | vd->tx_result.residue = 0; | ||
68 | |||
65 | spin_lock_irqsave(&vc->lock, flags); | 69 | spin_lock_irqsave(&vc->lock, flags); |
66 | list_add_tail(&vd->node, &vc->desc_allocated); | 70 | list_add_tail(&vd->node, &vc->desc_allocated); |
67 | spin_unlock_irqrestore(&vc->lock, flags); | 71 | spin_unlock_irqrestore(&vc->lock, flags); |
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 36c092349b5b..e7dc3c4dc8e0 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
@@ -1095,7 +1095,7 @@ static void xilinx_dma_start(struct xilinx_dma_chan *chan) | |||
1095 | static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | 1095 | static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) |
1096 | { | 1096 | { |
1097 | struct xilinx_vdma_config *config = &chan->config; | 1097 | struct xilinx_vdma_config *config = &chan->config; |
1098 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; | 1098 | struct xilinx_dma_tx_descriptor *desc; |
1099 | u32 reg, j; | 1099 | u32 reg, j; |
1100 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 1100 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
1101 | int i = 0; | 1101 | int i = 0; |
@@ -1112,8 +1112,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) | |||
1112 | 1112 | ||
1113 | desc = list_first_entry(&chan->pending_list, | 1113 | desc = list_first_entry(&chan->pending_list, |
1114 | struct xilinx_dma_tx_descriptor, node); | 1114 | struct xilinx_dma_tx_descriptor, node); |
1115 | tail_desc = list_last_entry(&chan->pending_list, | ||
1116 | struct xilinx_dma_tx_descriptor, node); | ||
1117 | 1115 | ||
1118 | /* Configure the hardware using info in the config structure */ | 1116 | /* Configure the hardware using info in the config structure */ |
1119 | if (chan->has_vflip) { | 1117 | if (chan->has_vflip) { |
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 6765f10837ac..6e208a060a58 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c | |||
@@ -793,7 +793,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { | |||
793 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, | 793 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, |
794 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, | 794 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, |
795 | { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, | 795 | { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, |
796 | { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, | 796 | { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, |
797 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), | 797 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), |
798 | .driver_data = (kernel_ulong_t)&am654_data | 798 | .driver_data = (kernel_ulong_t)&am654_data |
799 | }, | 799 | }, |
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c index 4bb16e9bc297..d4aef9c4a94c 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra20.c +++ b/drivers/soc/tegra/fuse/fuse-tegra20.c | |||
@@ -99,7 +99,7 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse) | |||
99 | dma_cap_zero(mask); | 99 | dma_cap_zero(mask); |
100 | dma_cap_set(DMA_SLAVE, mask); | 100 | dma_cap_set(DMA_SLAVE, mask); |
101 | 101 | ||
102 | fuse->apbdma.chan = __dma_request_channel(&mask, dma_filter, NULL); | 102 | fuse->apbdma.chan = dma_request_channel(mask, dma_filter, NULL); |
103 | if (!fuse->apbdma.chan) | 103 | if (!fuse->apbdma.chan) |
104 | return -EPROBE_DEFER; | 104 | return -EPROBE_DEFER; |
105 | 105 | ||
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h new file mode 100644 index 000000000000..cab6e18773da --- /dev/null +++ b/include/linux/dma/edma.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates. | ||
4 | * Synopsys DesignWare eDMA core driver | ||
5 | * | ||
6 | * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _DW_EDMA_H | ||
10 | #define _DW_EDMA_H | ||
11 | |||
12 | #include <linux/device.h> | ||
13 | #include <linux/dmaengine.h> | ||
14 | |||
15 | struct dw_edma; | ||
16 | |||
17 | /** | ||
18 | * struct dw_edma_chip - representation of DesignWare eDMA controller hardware | ||
19 | * @dev: struct device of the eDMA controller | ||
20 | * @id: instance ID | ||
21 | * @irq: irq line | ||
22 | * @dw: struct dw_edma that is filed by dw_edma_probe() | ||
23 | */ | ||
24 | struct dw_edma_chip { | ||
25 | struct device *dev; | ||
26 | int id; | ||
27 | int irq; | ||
28 | struct dw_edma *dw; | ||
29 | }; | ||
30 | |||
31 | /* Export to the platform drivers */ | ||
32 | #if IS_ENABLED(CONFIG_DW_EDMA) | ||
33 | int dw_edma_probe(struct dw_edma_chip *chip); | ||
34 | int dw_edma_remove(struct dw_edma_chip *chip); | ||
35 | #else | ||
36 | static inline int dw_edma_probe(struct dw_edma_chip *chip) | ||
37 | { | ||
38 | return -ENODEV; | ||
39 | } | ||
40 | |||
41 | static inline int dw_edma_remove(struct dw_edma_chip *chip) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | #endif /* CONFIG_DW_EDMA */ | ||
46 | |||
47 | #endif /* _DW_EDMA_H */ | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c952f987ee57..8fcdee1c0cf9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -1302,7 +1302,8 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | |||
1302 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 1302 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
1303 | void dma_issue_pending_all(void); | 1303 | void dma_issue_pending_all(void); |
1304 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 1304 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
1305 | dma_filter_fn fn, void *fn_param); | 1305 | dma_filter_fn fn, void *fn_param, |
1306 | struct device_node *np); | ||
1306 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | 1307 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); |
1307 | 1308 | ||
1308 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); | 1309 | struct dma_chan *dma_request_chan(struct device *dev, const char *name); |
@@ -1327,7 +1328,9 @@ static inline void dma_issue_pending_all(void) | |||
1327 | { | 1328 | { |
1328 | } | 1329 | } |
1329 | static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | 1330 | static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
1330 | dma_filter_fn fn, void *fn_param) | 1331 | dma_filter_fn fn, |
1332 | void *fn_param, | ||
1333 | struct device_node *np) | ||
1331 | { | 1334 | { |
1332 | return NULL; | 1335 | return NULL; |
1333 | } | 1336 | } |
@@ -1399,7 +1402,8 @@ void dma_async_device_unregister(struct dma_device *device); | |||
1399 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 1402 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
1400 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); | 1403 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); |
1401 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); | 1404 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); |
1402 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 1405 | #define dma_request_channel(mask, x, y) \ |
1406 | __dma_request_channel(&(mask), x, y, NULL) | ||
1403 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ | 1407 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |
1404 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) | 1408 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) |
1405 | 1409 | ||
@@ -1417,6 +1421,6 @@ static inline struct dma_chan | |||
1417 | if (!fn || !fn_param) | 1421 | if (!fn || !fn_param) |
1418 | return NULL; | 1422 | return NULL; |
1419 | 1423 | ||
1420 | return __dma_request_channel(mask, fn, fn_param); | 1424 | return __dma_request_channel(mask, fn, fn_param, NULL); |
1421 | } | 1425 | } |
1422 | #endif /* DMAENGINE_H */ | 1426 | #endif /* DMAENGINE_H */ |
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h new file mode 100644 index 000000000000..7fc95d5c95bb --- /dev/null +++ b/include/linux/fpga/adi-axi-common.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * Analog Devices AXI common registers & definitions | ||
4 | * | ||
5 | * Copyright 2019 Analog Devices Inc. | ||
6 | * | ||
7 | * https://wiki.analog.com/resources/fpga/docs/axi_ip | ||
8 | * https://wiki.analog.com/resources/fpga/docs/hdl/regmap | ||
9 | */ | ||
10 | |||
11 | #ifndef ADI_AXI_COMMON_H_ | ||
12 | #define ADI_AXI_COMMON_H_ | ||
13 | |||
14 | #define ADI_AXI_REG_VERSION 0x0000 | ||
15 | |||
16 | #define ADI_AXI_PCORE_VER(major, minor, patch) \ | ||
17 | (((major) << 16) | ((minor) << 8) | (patch)) | ||
18 | |||
19 | #endif /* ADI_AXI_COMMON_H_ */ | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 40015609c4b5..c842735a4f45 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2367,6 +2367,7 @@ | |||
2367 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd | 2367 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd |
2368 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce | 2368 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce |
2369 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf | 2369 | #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf |
2370 | #define PCI_DEVICE_ID_SYNOPSYS_EDDA 0xedda | ||
2370 | 2371 | ||
2371 | #define PCI_VENDOR_ID_USR 0x16ec | 2372 | #define PCI_VENDOR_ID_USR 0x16ec |
2372 | 2373 | ||
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 80f9be858bd0..281adbb26e6b 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h | |||
@@ -52,7 +52,6 @@ struct imx_dma_data { | |||
52 | int dma_request2; /* secondary DMA request line */ | 52 | int dma_request2; /* secondary DMA request line */ |
53 | enum sdma_peripheral_type peripheral_type; | 53 | enum sdma_peripheral_type peripheral_type; |
54 | int priority; | 54 | int priority; |
55 | struct device_node *of_node; | ||
56 | }; | 55 | }; |
57 | 56 | ||
58 | static inline int imx_dma_is_ipu(struct dma_chan *chan) | 57 | static inline int imx_dma_is_ipu(struct dma_chan *chan) |
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h deleted file mode 100644 index cccc0a665d26..000000000000 --- a/include/linux/sudmac.h +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | ||
2 | /* | ||
3 | * Header for the SUDMAC driver | ||
4 | * | ||
5 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
6 | */ | ||
7 | #ifndef SUDMAC_H | ||
8 | #define SUDMAC_H | ||
9 | |||
10 | #include <linux/dmaengine.h> | ||
11 | #include <linux/shdma-base.h> | ||
12 | #include <linux/types.h> | ||
13 | |||
14 | /* Used by slave DMA clients to request DMA to/from a specific peripheral */ | ||
15 | struct sudmac_slave { | ||
16 | struct shdma_slave shdma_slave; /* Set by the platform */ | ||
17 | }; | ||
18 | |||
19 | /* | ||
20 | * Supplied by platforms to specify, how a DMA channel has to be configured for | ||
21 | * a certain peripheral | ||
22 | */ | ||
23 | struct sudmac_slave_config { | ||
24 | int slave_id; | ||
25 | }; | ||
26 | |||
27 | struct sudmac_channel { | ||
28 | unsigned long offset; | ||
29 | unsigned long config; | ||
30 | unsigned long wait; /* The configuable range is 0 to 3 */ | ||
31 | unsigned long dint_end_bit; | ||
32 | }; | ||
33 | |||
34 | struct sudmac_pdata { | ||
35 | const struct sudmac_slave_config *slave; | ||
36 | int slave_num; | ||
37 | const struct sudmac_channel *channel; | ||
38 | int channel_num; | ||
39 | }; | ||
40 | |||
41 | /* Definitions for the sudmac_channel.config */ | ||
42 | #define SUDMAC_TX_BUFFER_MODE BIT(0) | ||
43 | #define SUDMAC_RX_END_MODE BIT(1) | ||
44 | |||
45 | /* Definitions for the sudmac_channel.dint_end_bit */ | ||
46 | #define SUDMAC_DMA_BIT_CH0 BIT(0) | ||
47 | #define SUDMAC_DMA_BIT_CH1 BIT(1) | ||
48 | |||
49 | #endif | ||