diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-01-01 18:45:48 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-01-01 18:45:48 -0500 |
| commit | 78e8696c234ab637c4dd516cabeac344d84ec10b (patch) | |
| tree | 95b553e6d5640ad52cf86986e009e7eef86f916e | |
| parent | fcf010449ebe1db0cb68b2c6410972a782f2bd14 (diff) | |
| parent | 660611827c03afeb0eec178dc1fb9f842332d908 (diff) | |
Merge tag 'dmaengine-4.21-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This includes a new driver, removes R-Mobile APE6 as it is no longer
used, sprd cyclic dma support, last batch of dma_slave_config
direction removal and random updates to bunch of drivers.
Summary:
- New driver for UniPhier MIO DMA controller
- Remove R-Mobile APE6 support
- Sprd driver updates and support for cyclic link-list
- Remove dma_slave_config direction usage from rest of drivers
- Minor updates to dmatest, dw-dmac, zynqmp and bcm dma drivers"
* tag 'dmaengine-4.21-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (48 commits)
dmaengine: qcom_hidma: convert to DEFINE_SHOW_ATTRIBUTE
dmaengine: pxa: remove DBGFS_FUNC_DECL()
dmaengine: mic_x100_dma: convert to DEFINE_SHOW_ATTRIBUTE
dmaengine: amba-pl08x: convert to DEFINE_SHOW_ATTRIBUTE
dmaengine: Documentation: Add documentation for multi chan testing
dmaengine: dmatest: Add transfer_size parameter
dmaengine: dmatest: Add alignment parameter
dmaengine: dmatest: Use fixed point div to calculate iops
dmaengine: dmatest: Add support for multi channel testing
dmaengine: rcar-dmac: Document R8A774C0 bindings
dt-bindings: dmaengine: usb-dmac: Add binding for r8a774c0
dmaengine: zynqmp_dma: replace spin_lock_bh with spin_lock_irqsave
dmaengine: sprd: Add me as one of the module authors
dmaengine: sprd: Support DMA 2-stage transfer mode
dmaengine: sprd: Support DMA link-list cyclic callback
dmaengine: sprd: Set cur_desc as NULL when free or terminate one dma channel
dmaengine: sprd: Fix the last link-list configuration
dmaengine: sprd: Get transfer residue depending on the transfer direction
dmaengine: sprd: Remove direction usage from struct dma_slave_config
dmaengine: dmatest: fix a small memory leak in dmatest_func()
...
41 files changed, 2296 insertions, 394 deletions
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index a5a7c3f5a1e3..5a512c5ea76a 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | * Renesas R-Car (RZ/G) DMA Controller Device Tree bindings | 1 | * Renesas R-Car (RZ/G) DMA Controller Device Tree bindings |
| 2 | 2 | ||
| 3 | Renesas R-Car Generation 2 SoCs have multiple multi-channel DMA | 3 | Renesas R-Car (Gen 2/3) and RZ/G SoCs have multiple multi-channel DMA |
| 4 | controller instances named DMAC capable of serving multiple clients. Channels | 4 | controller instances named DMAC capable of serving multiple clients. Channels |
| 5 | can be dedicated to specific clients or shared between a large number of | 5 | can be dedicated to specific clients or shared between a large number of |
| 6 | clients. | 6 | clients. |
| @@ -20,6 +20,8 @@ Required Properties: | |||
| 20 | - "renesas,dmac-r8a7744" (RZ/G1N) | 20 | - "renesas,dmac-r8a7744" (RZ/G1N) |
| 21 | - "renesas,dmac-r8a7745" (RZ/G1E) | 21 | - "renesas,dmac-r8a7745" (RZ/G1E) |
| 22 | - "renesas,dmac-r8a77470" (RZ/G1C) | 22 | - "renesas,dmac-r8a77470" (RZ/G1C) |
| 23 | - "renesas,dmac-r8a774a1" (RZ/G2M) | ||
| 24 | - "renesas,dmac-r8a774c0" (RZ/G2E) | ||
| 23 | - "renesas,dmac-r8a7790" (R-Car H2) | 25 | - "renesas,dmac-r8a7790" (R-Car H2) |
| 24 | - "renesas,dmac-r8a7791" (R-Car M2-W) | 26 | - "renesas,dmac-r8a7791" (R-Car M2-W) |
| 25 | - "renesas,dmac-r8a7792" (R-Car V2H) | 27 | - "renesas,dmac-r8a7792" (R-Car V2H) |
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt index 1743017bd948..372f0eeb5a2a 100644 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt | |||
| @@ -6,6 +6,9 @@ Required Properties: | |||
| 6 | - "renesas,r8a7743-usb-dmac" (RZ/G1M) | 6 | - "renesas,r8a7743-usb-dmac" (RZ/G1M) |
| 7 | - "renesas,r8a7744-usb-dmac" (RZ/G1N) | 7 | - "renesas,r8a7744-usb-dmac" (RZ/G1N) |
| 8 | - "renesas,r8a7745-usb-dmac" (RZ/G1E) | 8 | - "renesas,r8a7745-usb-dmac" (RZ/G1E) |
| 9 | - "renesas,r8a77470-usb-dmac" (RZ/G1C) | ||
| 10 | - "renesas,r8a774a1-usb-dmac" (RZ/G2M) | ||
| 11 | - "renesas,r8a774c0-usb-dmac" (RZ/G2E) | ||
| 9 | - "renesas,r8a7790-usb-dmac" (R-Car H2) | 12 | - "renesas,r8a7790-usb-dmac" (R-Car H2) |
| 10 | - "renesas,r8a7791-usb-dmac" (R-Car M2-W) | 13 | - "renesas,r8a7791-usb-dmac" (R-Car M2-W) |
| 11 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) | 14 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) |
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index 39e2b26be344..db757df7057d 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt | |||
| @@ -27,6 +27,10 @@ Optional properties: | |||
| 27 | general purpose DMA channel allocator. False if not passed. | 27 | general purpose DMA channel allocator. False if not passed. |
| 28 | - multi-block: Multi block transfers supported by hardware. Array property with | 28 | - multi-block: Multi block transfers supported by hardware. Array property with |
| 29 | one cell per channel. 0: not supported, 1 (default): supported. | 29 | one cell per channel. 0: not supported, 1 (default): supported. |
| 30 | - snps,dma-protection-control: AHB HPROT[3:1] protection setting. | ||
| 31 | The default value is 0 (for non-cacheable, non-buffered, | ||
| 32 | unprivileged data access). | ||
| 33 | Refer to include/dt-bindings/dma/dw-dmac.h for possible values. | ||
| 30 | 34 | ||
| 31 | Example: | 35 | Example: |
| 32 | 36 | ||
diff --git a/Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt b/Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt new file mode 100644 index 000000000000..b12388dc7eac --- /dev/null +++ b/Documentation/devicetree/bindings/dma/uniphier-mio-dmac.txt | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | UniPhier Media IO DMA controller | ||
| 2 | |||
| 3 | This works as an external DMA engine for SD/eMMC controllers etc. | ||
| 4 | found in UniPhier LD4, Pro4, sLD8 SoCs. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | - compatible: should be "socionext,uniphier-mio-dmac". | ||
| 8 | - reg: offset and length of the register set for the device. | ||
| 9 | - interrupts: a list of interrupt specifiers associated with the DMA channels. | ||
| 10 | - clocks: a single clock specifier. | ||
| 11 | - #dma-cells: should be <1>. The single cell represents the channel index. | ||
| 12 | |||
| 13 | Example: | ||
| 14 | dmac: dma-controller@5a000000 { | ||
| 15 | compatible = "socionext,uniphier-mio-dmac"; | ||
| 16 | reg = <0x5a000000 0x1000>; | ||
| 17 | interrupts = <0 68 4>, <0 68 4>, <0 69 4>, <0 70 4>, | ||
| 18 | <0 71 4>, <0 72 4>, <0 73 4>, <0 74 4>; | ||
| 19 | clocks = <&mio_clk 7>; | ||
| 20 | #dma-cells = <1>; | ||
| 21 | }; | ||
| 22 | |||
| 23 | Note: | ||
| 24 | In the example above, "interrupts = <0 68 4>, <0 68 4>, ..." is not a typo. | ||
| 25 | The first two channels share a single interrupt line. | ||
diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst index 49efebd2c043..8d81f1a7169b 100644 --- a/Documentation/driver-api/dmaengine/dmatest.rst +++ b/Documentation/driver-api/dmaengine/dmatest.rst | |||
| @@ -30,28 +30,43 @@ Part 2 - When dmatest is built as a module | |||
| 30 | 30 | ||
| 31 | Example of usage:: | 31 | Example of usage:: |
| 32 | 32 | ||
| 33 | % modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1 | 33 | % modprobe dmatest timeout=2000 iterations=1 channel=dma0chan0 run=1 |
| 34 | 34 | ||
| 35 | ...or:: | 35 | ...or:: |
| 36 | 36 | ||
| 37 | % modprobe dmatest | 37 | % modprobe dmatest |
| 38 | % echo dma0chan0 > /sys/module/dmatest/parameters/channel | ||
| 39 | % echo 2000 > /sys/module/dmatest/parameters/timeout | 38 | % echo 2000 > /sys/module/dmatest/parameters/timeout |
| 40 | % echo 1 > /sys/module/dmatest/parameters/iterations | 39 | % echo 1 > /sys/module/dmatest/parameters/iterations |
| 40 | % echo dma0chan0 > /sys/module/dmatest/parameters/channel | ||
| 41 | % echo 1 > /sys/module/dmatest/parameters/run | 41 | % echo 1 > /sys/module/dmatest/parameters/run |
| 42 | 42 | ||
| 43 | ...or on the kernel command line:: | 43 | ...or on the kernel command line:: |
| 44 | 44 | ||
| 45 | dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1 | 45 | dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1 |
| 46 | |||
| 47 | Example of multi-channel test usage: | ||
| 48 | % modprobe dmatest | ||
| 49 | % echo 2000 > /sys/module/dmatest/parameters/timeout | ||
| 50 | % echo 1 > /sys/module/dmatest/parameters/iterations | ||
| 51 | % echo dma0chan0 > /sys/module/dmatest/parameters/channel | ||
| 52 | % echo dma0chan1 > /sys/module/dmatest/parameters/channel | ||
| 53 | % echo dma0chan2 > /sys/module/dmatest/parameters/channel | ||
| 54 | % echo 1 > /sys/module/dmatest/parameters/run | ||
| 46 | 55 | ||
| 56 | Note: the channel parameter should always be the last parameter set prior to | ||
| 57 | running the test (setting run=1), this is because upon setting the channel | ||
| 58 | parameter, that specific channel is requested using the dmaengine and a thread | ||
| 59 | is created with the existing parameters. This thread is set as pending | ||
| 60 | and will be executed once run is set to 1. Any parameters set after the thread | ||
| 61 | is created are not applied. | ||
| 47 | .. hint:: | 62 | .. hint:: |
| 48 | available channel list could be extracted by running the following command:: | 63 | available channel list could be extracted by running the following command:: |
| 49 | 64 | ||
| 50 | % ls -1 /sys/class/dma/ | 65 | % ls -1 /sys/class/dma/ |
| 51 | 66 | ||
| 52 | Once started a message like "dmatest: Started 1 threads using dma0chan0" is | 67 | Once started a message like " dmatest: Added 1 threads using dma0chan0" is |
| 53 | emitted. After that only test failure messages are reported until the test | 68 | emitted. A thread for that specific channel is created and is now pending, the |
| 54 | stops. | 69 | pending thread is started once run is to 1. |
| 55 | 70 | ||
| 56 | Note that running a new test will not stop any in progress test. | 71 | Note that running a new test will not stop any in progress test. |
| 57 | 72 | ||
| @@ -116,3 +131,85 @@ Example:: | |||
| 116 | 131 | ||
| 117 | The details of a data miscompare error are also emitted, but do not follow the | 132 | The details of a data miscompare error are also emitted, but do not follow the |
| 118 | above format. | 133 | above format. |
| 134 | |||
| 135 | Part 5 - Handling channel allocation | ||
| 136 | ==================================== | ||
| 137 | |||
| 138 | Allocating Channels | ||
| 139 | ------------------- | ||
| 140 | |||
| 141 | Channels are required to be configured prior to starting the test run. | ||
| 142 | Attempting to run the test without configuring the channels will fail. | ||
| 143 | |||
| 144 | Example:: | ||
| 145 | |||
| 146 | % echo 1 > /sys/module/dmatest/parameters/run | ||
| 147 | dmatest: Could not start test, no channels configured | ||
| 148 | |||
| 149 | Channels are registered using the "channel" parameter. Channels can be requested by their | ||
| 150 | name, once requested, the channel is registered and a pending thread is added to the test list. | ||
| 151 | |||
| 152 | Example:: | ||
| 153 | |||
| 154 | % echo dma0chan2 > /sys/module/dmatest/parameters/channel | ||
| 155 | dmatest: Added 1 threads using dma0chan2 | ||
| 156 | |||
| 157 | More channels can be added by repeating the example above. | ||
| 158 | Reading back the channel parameter will return the name of last channel that was added successfully. | ||
| 159 | |||
| 160 | Example:: | ||
| 161 | |||
| 162 | % echo dma0chan1 > /sys/module/dmatest/parameters/channel | ||
| 163 | dmatest: Added 1 threads using dma0chan1 | ||
| 164 | % echo dma0chan2 > /sys/module/dmatest/parameters/channel | ||
| 165 | dmatest: Added 1 threads using dma0chan2 | ||
| 166 | % cat /sys/module/dmatest/parameters/channel | ||
| 167 | dma0chan2 | ||
| 168 | |||
| 169 | Another method of requesting channels is to request a channel with an empty string, Doing so | ||
| 170 | will request all channels available to be tested: | ||
| 171 | |||
| 172 | Example:: | ||
| 173 | |||
| 174 | % echo "" > /sys/module/dmatest/parameters/channel | ||
| 175 | dmatest: Added 1 threads using dma0chan0 | ||
| 176 | dmatest: Added 1 threads using dma0chan3 | ||
| 177 | dmatest: Added 1 threads using dma0chan4 | ||
| 178 | dmatest: Added 1 threads using dma0chan5 | ||
| 179 | dmatest: Added 1 threads using dma0chan6 | ||
| 180 | dmatest: Added 1 threads using dma0chan7 | ||
| 181 | dmatest: Added 1 threads using dma0chan8 | ||
| 182 | |||
| 183 | At any point during the test configuration, reading the "test_list" parameter will | ||
| 184 | print the list of currently pending tests. | ||
| 185 | |||
| 186 | Example:: | ||
| 187 | |||
| 188 | % cat /sys/module/dmatest/parameters/test_list | ||
| 189 | dmatest: 1 threads using dma0chan0 | ||
| 190 | dmatest: 1 threads using dma0chan3 | ||
| 191 | dmatest: 1 threads using dma0chan4 | ||
| 192 | dmatest: 1 threads using dma0chan5 | ||
| 193 | dmatest: 1 threads using dma0chan6 | ||
| 194 | dmatest: 1 threads using dma0chan7 | ||
| 195 | dmatest: 1 threads using dma0chan8 | ||
| 196 | |||
| 197 | Note: Channels will have to be configured for each test run as channel configurations do not | ||
| 198 | carry across to the next test run. | ||
| 199 | |||
| 200 | Releasing Channels | ||
| 201 | ------------------- | ||
| 202 | |||
| 203 | Channels can be freed by setting run to 0. | ||
| 204 | |||
| 205 | Example:: | ||
| 206 | % echo dma0chan1 > /sys/module/dmatest/parameters/channel | ||
| 207 | dmatest: Added 1 threads using dma0chan1 | ||
| 208 | % cat /sys/class/dma/dma0chan1/in_use | ||
| 209 | 1 | ||
| 210 | % echo 0 > /sys/module/dmatest/parameters/run | ||
| 211 | % cat /sys/class/dma/dma0chan1/in_use | ||
| 212 | 0 | ||
| 213 | |||
| 214 | Channels allocated by previous test runs are automatically freed when a new | ||
| 215 | channel is requested after completing a successful test run. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 9acc41305a49..7ba42fbb2c4a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2279,6 +2279,7 @@ F: arch/arm/mm/cache-uniphier.c | |||
| 2279 | F: arch/arm64/boot/dts/socionext/uniphier* | 2279 | F: arch/arm64/boot/dts/socionext/uniphier* |
| 2280 | F: drivers/bus/uniphier-system-bus.c | 2280 | F: drivers/bus/uniphier-system-bus.c |
| 2281 | F: drivers/clk/uniphier/ | 2281 | F: drivers/clk/uniphier/ |
| 2282 | F: drivers/dmaengine/uniphier-mdmac.c | ||
| 2282 | F: drivers/gpio/gpio-uniphier.c | 2283 | F: drivers/gpio/gpio-uniphier.c |
| 2283 | F: drivers/i2c/busses/i2c-uniphier* | 2284 | F: drivers/i2c/busses/i2c-uniphier* |
| 2284 | F: drivers/irqchip/irq-uniphier-aidet.c | 2285 | F: drivers/irqchip/irq-uniphier-aidet.c |
| @@ -14628,9 +14629,11 @@ SYNOPSYS DESIGNWARE DMAC DRIVER | |||
| 14628 | M: Viresh Kumar <vireshk@kernel.org> | 14629 | M: Viresh Kumar <vireshk@kernel.org> |
| 14629 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 14630 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
| 14630 | S: Maintained | 14631 | S: Maintained |
| 14632 | F: Documentation/devicetree/bindings/dma/snps-dma.txt | ||
| 14633 | F: drivers/dma/dw/ | ||
| 14634 | F: include/dt-bindings/dma/dw-dmac.h | ||
| 14631 | F: include/linux/dma/dw.h | 14635 | F: include/linux/dma/dw.h |
| 14632 | F: include/linux/platform_data/dma-dw.h | 14636 | F: include/linux/platform_data/dma-dw.h |
| 14633 | F: drivers/dma/dw/ | ||
| 14634 | 14637 | ||
| 14635 | SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER | 14638 | SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER |
| 14636 | M: Jose Abreu <Jose.Abreu@synopsys.com> | 14639 | M: Jose Abreu <Jose.Abreu@synopsys.com> |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index de511db021cc..d2286c7f7222 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -587,6 +587,17 @@ config TIMB_DMA | |||
| 587 | help | 587 | help |
| 588 | Enable support for the Timberdale FPGA DMA engine. | 588 | Enable support for the Timberdale FPGA DMA engine. |
| 589 | 589 | ||
| 590 | config UNIPHIER_MDMAC | ||
| 591 | tristate "UniPhier MIO DMAC" | ||
| 592 | depends on ARCH_UNIPHIER || COMPILE_TEST | ||
| 593 | depends on OF | ||
| 594 | select DMA_ENGINE | ||
| 595 | select DMA_VIRTUAL_CHANNELS | ||
| 596 | help | ||
| 597 | Enable support for the MIO DMAC (Media I/O DMA controller) on the | ||
| 598 | UniPhier platform. This DMA controller is used as the external | ||
| 599 | DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs. | ||
| 600 | |||
| 590 | config XGENE_DMA | 601 | config XGENE_DMA |
| 591 | tristate "APM X-Gene DMA support" | 602 | tristate "APM X-Gene DMA support" |
| 592 | depends on ARCH_XGENE || COMPILE_TEST | 603 | depends on ARCH_XGENE || COMPILE_TEST |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 7fcc4d8e336d..09571a81353d 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -70,6 +70,7 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | |||
| 70 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 70 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
| 71 | obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o | 71 | obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o |
| 72 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 72 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
| 73 | obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o | ||
| 73 | obj-$(CONFIG_XGENE_DMA) += xgene-dma.o | 74 | obj-$(CONFIG_XGENE_DMA) += xgene-dma.o |
| 74 | obj-$(CONFIG_ZX_DMA) += zx_dma.o | 75 | obj-$(CONFIG_ZX_DMA) += zx_dma.o |
| 75 | obj-$(CONFIG_ST_FDMA) += st_fdma.o | 76 | obj-$(CONFIG_ST_FDMA) += st_fdma.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 97483df1f82e..fc8c2bab563c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -2505,24 +2505,14 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) | |||
| 2505 | return 0; | 2505 | return 0; |
| 2506 | } | 2506 | } |
| 2507 | 2507 | ||
| 2508 | static int pl08x_debugfs_open(struct inode *inode, struct file *file) | 2508 | DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); |
| 2509 | { | ||
| 2510 | return single_open(file, pl08x_debugfs_show, inode->i_private); | ||
| 2511 | } | ||
| 2512 | |||
| 2513 | static const struct file_operations pl08x_debugfs_operations = { | ||
| 2514 | .open = pl08x_debugfs_open, | ||
| 2515 | .read = seq_read, | ||
| 2516 | .llseek = seq_lseek, | ||
| 2517 | .release = single_release, | ||
| 2518 | }; | ||
| 2519 | 2509 | ||
| 2520 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | 2510 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) |
| 2521 | { | 2511 | { |
| 2522 | /* Expose a simple debugfs interface to view all clocks */ | 2512 | /* Expose a simple debugfs interface to view all clocks */ |
| 2523 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), | 2513 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), |
| 2524 | S_IFREG | S_IRUGO, NULL, pl08x, | 2514 | S_IFREG | S_IRUGO, NULL, pl08x, |
| 2525 | &pl08x_debugfs_operations); | 2515 | &pl08x_debugfs_fops); |
| 2526 | } | 2516 | } |
| 2527 | 2517 | ||
| 2528 | #else | 2518 | #else |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index cad55ab80d41..1a44c8086d77 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 1 | /* | 2 | /* |
| 2 | * BCM2835 DMA engine support | 3 | * BCM2835 DMA engine support |
| 3 | * | 4 | * |
| @@ -18,16 +19,6 @@ | |||
| 18 | * | 19 | * |
| 19 | * MARVELL MMP Peripheral DMA Driver | 20 | * MARVELL MMP Peripheral DMA Driver |
| 20 | * Copyright 2012 Marvell International Ltd. | 21 | * Copyright 2012 Marvell International Ltd. |
| 21 | * | ||
| 22 | * This program is free software; you can redistribute it and/or modify | ||
| 23 | * it under the terms of the GNU General Public License as published by | ||
| 24 | * the Free Software Foundation; either version 2 of the License, or | ||
| 25 | * (at your option) any later version. | ||
| 26 | * | ||
| 27 | * This program is distributed in the hope that it will be useful, | ||
| 28 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 29 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 30 | * GNU General Public License for more details. | ||
| 31 | */ | 22 | */ |
| 32 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
| 33 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
| @@ -1056,4 +1047,4 @@ module_platform_driver(bcm2835_dma_driver); | |||
| 1056 | MODULE_ALIAS("platform:bcm2835-dma"); | 1047 | MODULE_ALIAS("platform:bcm2835-dma"); |
| 1057 | MODULE_DESCRIPTION("BCM2835 DMA engine driver"); | 1048 | MODULE_DESCRIPTION("BCM2835 DMA engine driver"); |
| 1058 | MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); | 1049 | MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); |
| 1059 | MODULE_LICENSE("GPL v2"); | 1050 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index eebaba3d9e78..b69d66e44052 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -1802,13 +1802,10 @@ static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, | |||
| 1802 | static int coh901318_config(struct coh901318_chan *cohc, | 1802 | static int coh901318_config(struct coh901318_chan *cohc, |
| 1803 | struct coh901318_params *param) | 1803 | struct coh901318_params *param) |
| 1804 | { | 1804 | { |
| 1805 | unsigned long flags; | ||
| 1806 | const struct coh901318_params *p; | 1805 | const struct coh901318_params *p; |
| 1807 | int channel = cohc->id; | 1806 | int channel = cohc->id; |
| 1808 | void __iomem *virtbase = cohc->base->virtbase; | 1807 | void __iomem *virtbase = cohc->base->virtbase; |
| 1809 | 1808 | ||
| 1810 | spin_lock_irqsave(&cohc->lock, flags); | ||
| 1811 | |||
| 1812 | if (param) | 1809 | if (param) |
| 1813 | p = param; | 1810 | p = param; |
| 1814 | else | 1811 | else |
| @@ -1828,8 +1825,6 @@ static int coh901318_config(struct coh901318_chan *cohc, | |||
| 1828 | coh901318_set_conf(cohc, p->config); | 1825 | coh901318_set_conf(cohc, p->config); |
| 1829 | coh901318_set_ctrl(cohc, p->ctrl_lli_last); | 1826 | coh901318_set_ctrl(cohc, p->ctrl_lli_last); |
| 1830 | 1827 | ||
| 1831 | spin_unlock_irqrestore(&cohc->lock, flags); | ||
| 1832 | |||
| 1833 | return 0; | 1828 | return 0; |
| 1834 | } | 1829 | } |
| 1835 | 1830 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index aa1712beb0cc..2eea4ef72915 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -27,11 +27,6 @@ static unsigned int test_buf_size = 16384; | |||
| 27 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); | 27 | module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); |
| 28 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); | 28 | MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); |
| 29 | 29 | ||
| 30 | static char test_channel[20]; | ||
| 31 | module_param_string(channel, test_channel, sizeof(test_channel), | ||
| 32 | S_IRUGO | S_IWUSR); | ||
| 33 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); | ||
| 34 | |||
| 35 | static char test_device[32]; | 30 | static char test_device[32]; |
| 36 | module_param_string(device, test_device, sizeof(test_device), | 31 | module_param_string(device, test_device, sizeof(test_device), |
| 37 | S_IRUGO | S_IWUSR); | 32 | S_IRUGO | S_IWUSR); |
| @@ -84,6 +79,14 @@ static bool verbose; | |||
| 84 | module_param(verbose, bool, S_IRUGO | S_IWUSR); | 79 | module_param(verbose, bool, S_IRUGO | S_IWUSR); |
| 85 | MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); | 80 | MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); |
| 86 | 81 | ||
| 82 | static int alignment = -1; | ||
| 83 | module_param(alignment, int, 0644); | ||
| 84 | MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))"); | ||
| 85 | |||
| 86 | static unsigned int transfer_size; | ||
| 87 | module_param(transfer_size, uint, 0644); | ||
| 88 | MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))"); | ||
| 89 | |||
| 87 | /** | 90 | /** |
| 88 | * struct dmatest_params - test parameters. | 91 | * struct dmatest_params - test parameters. |
| 89 | * @buf_size: size of the memcpy test buffer | 92 | * @buf_size: size of the memcpy test buffer |
| @@ -108,6 +111,8 @@ struct dmatest_params { | |||
| 108 | int timeout; | 111 | int timeout; |
| 109 | bool noverify; | 112 | bool noverify; |
| 110 | bool norandom; | 113 | bool norandom; |
| 114 | int alignment; | ||
| 115 | unsigned int transfer_size; | ||
| 111 | }; | 116 | }; |
| 112 | 117 | ||
| 113 | /** | 118 | /** |
| @@ -139,6 +144,28 @@ static bool dmatest_run; | |||
| 139 | module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); | 144 | module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); |
| 140 | MODULE_PARM_DESC(run, "Run the test (default: false)"); | 145 | MODULE_PARM_DESC(run, "Run the test (default: false)"); |
| 141 | 146 | ||
| 147 | static int dmatest_chan_set(const char *val, const struct kernel_param *kp); | ||
| 148 | static int dmatest_chan_get(char *val, const struct kernel_param *kp); | ||
| 149 | static const struct kernel_param_ops multi_chan_ops = { | ||
| 150 | .set = dmatest_chan_set, | ||
| 151 | .get = dmatest_chan_get, | ||
| 152 | }; | ||
| 153 | |||
| 154 | static char test_channel[20]; | ||
| 155 | static struct kparam_string newchan_kps = { | ||
| 156 | .string = test_channel, | ||
| 157 | .maxlen = 20, | ||
| 158 | }; | ||
| 159 | module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644); | ||
| 160 | MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); | ||
| 161 | |||
| 162 | static int dmatest_test_list_get(char *val, const struct kernel_param *kp); | ||
| 163 | static const struct kernel_param_ops test_list_ops = { | ||
| 164 | .get = dmatest_test_list_get, | ||
| 165 | }; | ||
| 166 | module_param_cb(test_list, &test_list_ops, NULL, 0444); | ||
| 167 | MODULE_PARM_DESC(test_list, "Print current test list"); | ||
| 168 | |||
| 142 | /* Maximum amount of mismatched bytes in buffer to print */ | 169 | /* Maximum amount of mismatched bytes in buffer to print */ |
| 143 | #define MAX_ERROR_COUNT 32 | 170 | #define MAX_ERROR_COUNT 32 |
| 144 | 171 | ||
| @@ -160,6 +187,13 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); | |||
| 160 | #define PATTERN_COUNT_MASK 0x1f | 187 | #define PATTERN_COUNT_MASK 0x1f |
| 161 | #define PATTERN_MEMSET_IDX 0x01 | 188 | #define PATTERN_MEMSET_IDX 0x01 |
| 162 | 189 | ||
| 190 | /* Fixed point arithmetic ops */ | ||
| 191 | #define FIXPT_SHIFT 8 | ||
| 192 | #define FIXPNT_MASK 0xFF | ||
| 193 | #define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT) | ||
| 194 | #define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT) | ||
| 195 | #define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT) | ||
| 196 | |||
| 163 | /* poor man's completion - we want to use wait_event_freezable() on it */ | 197 | /* poor man's completion - we want to use wait_event_freezable() on it */ |
| 164 | struct dmatest_done { | 198 | struct dmatest_done { |
| 165 | bool done; | 199 | bool done; |
| @@ -179,6 +213,7 @@ struct dmatest_thread { | |||
| 179 | wait_queue_head_t done_wait; | 213 | wait_queue_head_t done_wait; |
| 180 | struct dmatest_done test_done; | 214 | struct dmatest_done test_done; |
| 181 | bool done; | 215 | bool done; |
| 216 | bool pending; | ||
| 182 | }; | 217 | }; |
| 183 | 218 | ||
| 184 | struct dmatest_chan { | 219 | struct dmatest_chan { |
| @@ -206,6 +241,22 @@ static bool is_threaded_test_run(struct dmatest_info *info) | |||
| 206 | return false; | 241 | return false; |
| 207 | } | 242 | } |
| 208 | 243 | ||
| 244 | static bool is_threaded_test_pending(struct dmatest_info *info) | ||
| 245 | { | ||
| 246 | struct dmatest_chan *dtc; | ||
| 247 | |||
| 248 | list_for_each_entry(dtc, &info->channels, node) { | ||
| 249 | struct dmatest_thread *thread; | ||
| 250 | |||
| 251 | list_for_each_entry(thread, &dtc->threads, node) { | ||
| 252 | if (thread->pending) | ||
| 253 | return true; | ||
| 254 | } | ||
| 255 | } | ||
| 256 | |||
| 257 | return false; | ||
| 258 | } | ||
| 259 | |||
| 209 | static int dmatest_wait_get(char *val, const struct kernel_param *kp) | 260 | static int dmatest_wait_get(char *val, const struct kernel_param *kp) |
| 210 | { | 261 | { |
| 211 | struct dmatest_info *info = &test_info; | 262 | struct dmatest_info *info = &test_info; |
| @@ -419,13 +470,15 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val) | |||
| 419 | } | 470 | } |
| 420 | 471 | ||
| 421 | per_sec *= val; | 472 | per_sec *= val; |
| 473 | per_sec = INT_TO_FIXPT(per_sec); | ||
| 422 | do_div(per_sec, runtime); | 474 | do_div(per_sec, runtime); |
| 475 | |||
| 423 | return per_sec; | 476 | return per_sec; |
| 424 | } | 477 | } |
| 425 | 478 | ||
| 426 | static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) | 479 | static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) |
| 427 | { | 480 | { |
| 428 | return dmatest_persec(runtime, len >> 10); | 481 | return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); |
| 429 | } | 482 | } |
| 430 | 483 | ||
| 431 | /* | 484 | /* |
| @@ -466,6 +519,7 @@ static int dmatest_func(void *data) | |||
| 466 | ktime_t comparetime = 0; | 519 | ktime_t comparetime = 0; |
| 467 | s64 runtime = 0; | 520 | s64 runtime = 0; |
| 468 | unsigned long long total_len = 0; | 521 | unsigned long long total_len = 0; |
| 522 | unsigned long long iops = 0; | ||
| 469 | u8 align = 0; | 523 | u8 align = 0; |
| 470 | bool is_memset = false; | 524 | bool is_memset = false; |
| 471 | dma_addr_t *srcs; | 525 | dma_addr_t *srcs; |
| @@ -476,27 +530,32 @@ static int dmatest_func(void *data) | |||
| 476 | ret = -ENOMEM; | 530 | ret = -ENOMEM; |
| 477 | 531 | ||
| 478 | smp_rmb(); | 532 | smp_rmb(); |
| 533 | thread->pending = false; | ||
| 479 | info = thread->info; | 534 | info = thread->info; |
| 480 | params = &info->params; | 535 | params = &info->params; |
| 481 | chan = thread->chan; | 536 | chan = thread->chan; |
| 482 | dev = chan->device; | 537 | dev = chan->device; |
| 483 | if (thread->type == DMA_MEMCPY) { | 538 | if (thread->type == DMA_MEMCPY) { |
| 484 | align = dev->copy_align; | 539 | align = params->alignment < 0 ? dev->copy_align : |
| 540 | params->alignment; | ||
| 485 | src_cnt = dst_cnt = 1; | 541 | src_cnt = dst_cnt = 1; |
| 486 | } else if (thread->type == DMA_MEMSET) { | 542 | } else if (thread->type == DMA_MEMSET) { |
| 487 | align = dev->fill_align; | 543 | align = params->alignment < 0 ? dev->fill_align : |
| 544 | params->alignment; | ||
| 488 | src_cnt = dst_cnt = 1; | 545 | src_cnt = dst_cnt = 1; |
| 489 | is_memset = true; | 546 | is_memset = true; |
| 490 | } else if (thread->type == DMA_XOR) { | 547 | } else if (thread->type == DMA_XOR) { |
| 491 | /* force odd to ensure dst = src */ | 548 | /* force odd to ensure dst = src */ |
| 492 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); | 549 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
| 493 | dst_cnt = 1; | 550 | dst_cnt = 1; |
| 494 | align = dev->xor_align; | 551 | align = params->alignment < 0 ? dev->xor_align : |
| 552 | params->alignment; | ||
| 495 | } else if (thread->type == DMA_PQ) { | 553 | } else if (thread->type == DMA_PQ) { |
| 496 | /* force odd to ensure dst = src */ | 554 | /* force odd to ensure dst = src */ |
| 497 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); | 555 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); |
| 498 | dst_cnt = 2; | 556 | dst_cnt = 2; |
| 499 | align = dev->pq_align; | 557 | align = params->alignment < 0 ? dev->pq_align : |
| 558 | params->alignment; | ||
| 500 | 559 | ||
| 501 | pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL); | 560 | pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL); |
| 502 | if (!pq_coefs) | 561 | if (!pq_coefs) |
| @@ -507,9 +566,22 @@ static int dmatest_func(void *data) | |||
| 507 | } else | 566 | } else |
| 508 | goto err_thread_type; | 567 | goto err_thread_type; |
| 509 | 568 | ||
| 569 | /* Check if buffer count fits into map count variable (u8) */ | ||
| 570 | if ((src_cnt + dst_cnt) >= 255) { | ||
| 571 | pr_err("too many buffers (%d of 255 supported)\n", | ||
| 572 | src_cnt + dst_cnt); | ||
| 573 | goto err_free_coefs; | ||
| 574 | } | ||
| 575 | |||
| 576 | if (1 << align > params->buf_size) { | ||
| 577 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | ||
| 578 | params->buf_size, 1 << align); | ||
| 579 | goto err_free_coefs; | ||
| 580 | } | ||
| 581 | |||
| 510 | thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | 582 | thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); |
| 511 | if (!thread->srcs) | 583 | if (!thread->srcs) |
| 512 | goto err_srcs; | 584 | goto err_free_coefs; |
| 513 | 585 | ||
| 514 | thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); | 586 | thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); |
| 515 | if (!thread->usrcs) | 587 | if (!thread->usrcs) |
| @@ -576,28 +648,25 @@ static int dmatest_func(void *data) | |||
| 576 | 648 | ||
| 577 | total_tests++; | 649 | total_tests++; |
| 578 | 650 | ||
| 579 | /* Check if buffer count fits into map count variable (u8) */ | 651 | if (params->transfer_size) { |
| 580 | if ((src_cnt + dst_cnt) >= 255) { | 652 | if (params->transfer_size >= params->buf_size) { |
| 581 | pr_err("too many buffers (%d of 255 supported)\n", | 653 | pr_err("%u-byte transfer size must be lower than %u-buffer size\n", |
| 582 | src_cnt + dst_cnt); | 654 | params->transfer_size, params->buf_size); |
| 583 | break; | 655 | break; |
| 584 | } | 656 | } |
| 585 | 657 | len = params->transfer_size; | |
| 586 | if (1 << align > params->buf_size) { | 658 | } else if (params->norandom) { |
| 587 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | ||
| 588 | params->buf_size, 1 << align); | ||
| 589 | break; | ||
| 590 | } | ||
| 591 | |||
| 592 | if (params->norandom) | ||
| 593 | len = params->buf_size; | 659 | len = params->buf_size; |
| 594 | else | 660 | } else { |
| 595 | len = dmatest_random() % params->buf_size + 1; | 661 | len = dmatest_random() % params->buf_size + 1; |
| 662 | } | ||
| 596 | 663 | ||
| 597 | len = (len >> align) << align; | 664 | /* Do not alter transfer size explicitly defined by user */ |
| 598 | if (!len) | 665 | if (!params->transfer_size) { |
| 599 | len = 1 << align; | 666 | len = (len >> align) << align; |
| 600 | 667 | if (!len) | |
| 668 | len = 1 << align; | ||
| 669 | } | ||
| 601 | total_len += len; | 670 | total_len += len; |
| 602 | 671 | ||
| 603 | if (params->norandom) { | 672 | if (params->norandom) { |
| @@ -721,14 +790,14 @@ static int dmatest_func(void *data) | |||
| 721 | 790 | ||
| 722 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 791 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
| 723 | 792 | ||
| 793 | dmaengine_unmap_put(um); | ||
| 794 | |||
| 724 | if (!done->done) { | 795 | if (!done->done) { |
| 725 | dmaengine_unmap_put(um); | ||
| 726 | result("test timed out", total_tests, src_off, dst_off, | 796 | result("test timed out", total_tests, src_off, dst_off, |
| 727 | len, 0); | 797 | len, 0); |
| 728 | failed_tests++; | 798 | failed_tests++; |
| 729 | continue; | 799 | continue; |
| 730 | } else if (status != DMA_COMPLETE) { | 800 | } else if (status != DMA_COMPLETE) { |
| 731 | dmaengine_unmap_put(um); | ||
| 732 | result(status == DMA_ERROR ? | 801 | result(status == DMA_ERROR ? |
| 733 | "completion error status" : | 802 | "completion error status" : |
| 734 | "completion busy status", total_tests, src_off, | 803 | "completion busy status", total_tests, src_off, |
| @@ -737,8 +806,6 @@ static int dmatest_func(void *data) | |||
| 737 | continue; | 806 | continue; |
| 738 | } | 807 | } |
| 739 | 808 | ||
| 740 | dmaengine_unmap_put(um); | ||
| 741 | |||
| 742 | if (params->noverify) { | 809 | if (params->noverify) { |
| 743 | verbose_result("test passed", total_tests, src_off, | 810 | verbose_result("test passed", total_tests, src_off, |
| 744 | dst_off, len, 0); | 811 | dst_off, len, 0); |
| @@ -802,17 +869,18 @@ err_srcbuf: | |||
| 802 | kfree(thread->usrcs); | 869 | kfree(thread->usrcs); |
| 803 | err_usrcs: | 870 | err_usrcs: |
| 804 | kfree(thread->srcs); | 871 | kfree(thread->srcs); |
| 805 | err_srcs: | 872 | err_free_coefs: |
| 806 | kfree(pq_coefs); | 873 | kfree(pq_coefs); |
| 807 | err_thread_type: | 874 | err_thread_type: |
| 808 | pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", | 875 | iops = dmatest_persec(runtime, total_tests); |
| 876 | pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n", | ||
| 809 | current->comm, total_tests, failed_tests, | 877 | current->comm, total_tests, failed_tests, |
| 810 | dmatest_persec(runtime, total_tests), | 878 | FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops), |
| 811 | dmatest_KBs(runtime, total_len), ret); | 879 | dmatest_KBs(runtime, total_len), ret); |
| 812 | 880 | ||
| 813 | /* terminate all transfers on specified channels */ | 881 | /* terminate all transfers on specified channels */ |
| 814 | if (ret || failed_tests) | 882 | if (ret || failed_tests) |
| 815 | dmaengine_terminate_all(chan); | 883 | dmaengine_terminate_sync(chan); |
| 816 | 884 | ||
| 817 | thread->done = true; | 885 | thread->done = true; |
| 818 | wake_up(&thread_wait); | 886 | wake_up(&thread_wait); |
| @@ -836,7 +904,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
| 836 | } | 904 | } |
| 837 | 905 | ||
| 838 | /* terminate all transfers on specified channels */ | 906 | /* terminate all transfers on specified channels */ |
| 839 | dmaengine_terminate_all(dtc->chan); | 907 | dmaengine_terminate_sync(dtc->chan); |
| 840 | 908 | ||
| 841 | kfree(dtc); | 909 | kfree(dtc); |
| 842 | } | 910 | } |
| @@ -886,7 +954,7 @@ static int dmatest_add_threads(struct dmatest_info *info, | |||
| 886 | /* srcbuf and dstbuf are allocated by the thread itself */ | 954 | /* srcbuf and dstbuf are allocated by the thread itself */ |
| 887 | get_task_struct(thread->task); | 955 | get_task_struct(thread->task); |
| 888 | list_add_tail(&thread->node, &dtc->threads); | 956 | list_add_tail(&thread->node, &dtc->threads); |
| 889 | wake_up_process(thread->task); | 957 | thread->pending = true; |
| 890 | } | 958 | } |
| 891 | 959 | ||
| 892 | return i; | 960 | return i; |
| @@ -932,7 +1000,7 @@ static int dmatest_add_channel(struct dmatest_info *info, | |||
| 932 | thread_count += cnt > 0 ? cnt : 0; | 1000 | thread_count += cnt > 0 ? cnt : 0; |
| 933 | } | 1001 | } |
| 934 | 1002 | ||
| 935 | pr_info("Started %u threads using %s\n", | 1003 | pr_info("Added %u threads using %s\n", |
| 936 | thread_count, dma_chan_name(chan)); | 1004 | thread_count, dma_chan_name(chan)); |
| 937 | 1005 | ||
| 938 | list_add_tail(&dtc->node, &info->channels); | 1006 | list_add_tail(&dtc->node, &info->channels); |
| @@ -977,7 +1045,7 @@ static void request_channels(struct dmatest_info *info, | |||
| 977 | } | 1045 | } |
| 978 | } | 1046 | } |
| 979 | 1047 | ||
| 980 | static void run_threaded_test(struct dmatest_info *info) | 1048 | static void add_threaded_test(struct dmatest_info *info) |
| 981 | { | 1049 | { |
| 982 | struct dmatest_params *params = &info->params; | 1050 | struct dmatest_params *params = &info->params; |
| 983 | 1051 | ||
| @@ -993,6 +1061,8 @@ static void run_threaded_test(struct dmatest_info *info) | |||
| 993 | params->timeout = timeout; | 1061 | params->timeout = timeout; |
| 994 | params->noverify = noverify; | 1062 | params->noverify = noverify; |
| 995 | params->norandom = norandom; | 1063 | params->norandom = norandom; |
| 1064 | params->alignment = alignment; | ||
| 1065 | params->transfer_size = transfer_size; | ||
| 996 | 1066 | ||
| 997 | request_channels(info, DMA_MEMCPY); | 1067 | request_channels(info, DMA_MEMCPY); |
| 998 | request_channels(info, DMA_MEMSET); | 1068 | request_channels(info, DMA_MEMSET); |
| @@ -1000,6 +1070,24 @@ static void run_threaded_test(struct dmatest_info *info) | |||
| 1000 | request_channels(info, DMA_PQ); | 1070 | request_channels(info, DMA_PQ); |
| 1001 | } | 1071 | } |
| 1002 | 1072 | ||
| 1073 | static void run_pending_tests(struct dmatest_info *info) | ||
| 1074 | { | ||
| 1075 | struct dmatest_chan *dtc; | ||
| 1076 | unsigned int thread_count = 0; | ||
| 1077 | |||
| 1078 | list_for_each_entry(dtc, &info->channels, node) { | ||
| 1079 | struct dmatest_thread *thread; | ||
| 1080 | |||
| 1081 | thread_count = 0; | ||
| 1082 | list_for_each_entry(thread, &dtc->threads, node) { | ||
| 1083 | wake_up_process(thread->task); | ||
| 1084 | thread_count++; | ||
| 1085 | } | ||
| 1086 | pr_info("Started %u threads using %s\n", | ||
| 1087 | thread_count, dma_chan_name(dtc->chan)); | ||
| 1088 | } | ||
| 1089 | } | ||
| 1090 | |||
| 1003 | static void stop_threaded_test(struct dmatest_info *info) | 1091 | static void stop_threaded_test(struct dmatest_info *info) |
| 1004 | { | 1092 | { |
| 1005 | struct dmatest_chan *dtc, *_dtc; | 1093 | struct dmatest_chan *dtc, *_dtc; |
| @@ -1016,7 +1104,7 @@ static void stop_threaded_test(struct dmatest_info *info) | |||
| 1016 | info->nr_channels = 0; | 1104 | info->nr_channels = 0; |
| 1017 | } | 1105 | } |
| 1018 | 1106 | ||
| 1019 | static void restart_threaded_test(struct dmatest_info *info, bool run) | 1107 | static void start_threaded_tests(struct dmatest_info *info) |
| 1020 | { | 1108 | { |
| 1021 | /* we might be called early to set run=, defer running until all | 1109 | /* we might be called early to set run=, defer running until all |
| 1022 | * parameters have been evaluated | 1110 | * parameters have been evaluated |
| @@ -1024,11 +1112,7 @@ static void restart_threaded_test(struct dmatest_info *info, bool run) | |||
| 1024 | if (!info->did_init) | 1112 | if (!info->did_init) |
| 1025 | return; | 1113 | return; |
| 1026 | 1114 | ||
| 1027 | /* Stop any running test first */ | 1115 | run_pending_tests(info); |
| 1028 | stop_threaded_test(info); | ||
| 1029 | |||
| 1030 | /* Run test with new parameters */ | ||
| 1031 | run_threaded_test(info); | ||
| 1032 | } | 1116 | } |
| 1033 | 1117 | ||
| 1034 | static int dmatest_run_get(char *val, const struct kernel_param *kp) | 1118 | static int dmatest_run_get(char *val, const struct kernel_param *kp) |
| @@ -1039,7 +1123,8 @@ static int dmatest_run_get(char *val, const struct kernel_param *kp) | |||
| 1039 | if (is_threaded_test_run(info)) { | 1123 | if (is_threaded_test_run(info)) { |
| 1040 | dmatest_run = true; | 1124 | dmatest_run = true; |
| 1041 | } else { | 1125 | } else { |
| 1042 | stop_threaded_test(info); | 1126 | if (!is_threaded_test_pending(info)) |
| 1127 | stop_threaded_test(info); | ||
| 1043 | dmatest_run = false; | 1128 | dmatest_run = false; |
| 1044 | } | 1129 | } |
| 1045 | mutex_unlock(&info->lock); | 1130 | mutex_unlock(&info->lock); |
| @@ -1057,18 +1142,125 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) | |||
| 1057 | if (ret) { | 1142 | if (ret) { |
| 1058 | mutex_unlock(&info->lock); | 1143 | mutex_unlock(&info->lock); |
| 1059 | return ret; | 1144 | return ret; |
| 1145 | } else if (dmatest_run) { | ||
| 1146 | if (is_threaded_test_pending(info)) | ||
| 1147 | start_threaded_tests(info); | ||
| 1148 | else | ||
| 1149 | pr_info("Could not start test, no channels configured\n"); | ||
| 1150 | } else { | ||
| 1151 | stop_threaded_test(info); | ||
| 1060 | } | 1152 | } |
| 1061 | 1153 | ||
| 1062 | if (is_threaded_test_run(info)) | 1154 | mutex_unlock(&info->lock); |
| 1155 | |||
| 1156 | return ret; | ||
| 1157 | } | ||
| 1158 | |||
| 1159 | static int dmatest_chan_set(const char *val, const struct kernel_param *kp) | ||
| 1160 | { | ||
| 1161 | struct dmatest_info *info = &test_info; | ||
| 1162 | struct dmatest_chan *dtc; | ||
| 1163 | char chan_reset_val[20]; | ||
| 1164 | int ret = 0; | ||
| 1165 | |||
| 1166 | mutex_lock(&info->lock); | ||
| 1167 | ret = param_set_copystring(val, kp); | ||
| 1168 | if (ret) { | ||
| 1169 | mutex_unlock(&info->lock); | ||
| 1170 | return ret; | ||
| 1171 | } | ||
| 1172 | /*Clear any previously run threads */ | ||
| 1173 | if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) | ||
| 1174 | stop_threaded_test(info); | ||
| 1175 | /* Reject channels that are already registered */ | ||
| 1176 | if (is_threaded_test_pending(info)) { | ||
| 1177 | list_for_each_entry(dtc, &info->channels, node) { | ||
| 1178 | if (strcmp(dma_chan_name(dtc->chan), | ||
| 1179 | strim(test_channel)) == 0) { | ||
| 1180 | dtc = list_last_entry(&info->channels, | ||
| 1181 | struct dmatest_chan, | ||
| 1182 | node); | ||
| 1183 | strlcpy(chan_reset_val, | ||
| 1184 | dma_chan_name(dtc->chan), | ||
| 1185 | sizeof(chan_reset_val)); | ||
| 1186 | ret = -EBUSY; | ||
| 1187 | goto add_chan_err; | ||
| 1188 | } | ||
| 1189 | } | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | add_threaded_test(info); | ||
| 1193 | |||
| 1194 | /* Check if channel was added successfully */ | ||
| 1195 | dtc = list_last_entry(&info->channels, struct dmatest_chan, node); | ||
| 1196 | |||
| 1197 | if (dtc->chan) { | ||
| 1198 | /* | ||
| 1199 | * if new channel was not successfully added, revert the | ||
| 1200 | * "test_channel" string to the name of the last successfully | ||
| 1201 | * added channel. exception for when users issues empty string | ||
| 1202 | * to channel parameter. | ||
| 1203 | */ | ||
| 1204 | if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) | ||
| 1205 | && (strcmp("", strim(test_channel)) != 0)) { | ||
| 1206 | ret = -EINVAL; | ||
| 1207 | strlcpy(chan_reset_val, dma_chan_name(dtc->chan), | ||
| 1208 | sizeof(chan_reset_val)); | ||
| 1209 | goto add_chan_err; | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | } else { | ||
| 1213 | /* Clear test_channel if no channels were added successfully */ | ||
| 1214 | strlcpy(chan_reset_val, "", sizeof(chan_reset_val)); | ||
| 1063 | ret = -EBUSY; | 1215 | ret = -EBUSY; |
| 1064 | else if (dmatest_run) | 1216 | goto add_chan_err; |
| 1065 | restart_threaded_test(info, dmatest_run); | 1217 | } |
| 1218 | |||
| 1219 | mutex_unlock(&info->lock); | ||
| 1220 | |||
| 1221 | return ret; | ||
| 1066 | 1222 | ||
| 1223 | add_chan_err: | ||
| 1224 | param_set_copystring(chan_reset_val, kp); | ||
| 1067 | mutex_unlock(&info->lock); | 1225 | mutex_unlock(&info->lock); |
| 1068 | 1226 | ||
| 1069 | return ret; | 1227 | return ret; |
| 1070 | } | 1228 | } |
| 1071 | 1229 | ||
| 1230 | static int dmatest_chan_get(char *val, const struct kernel_param *kp) | ||
| 1231 | { | ||
| 1232 | struct dmatest_info *info = &test_info; | ||
| 1233 | |||
| 1234 | mutex_lock(&info->lock); | ||
| 1235 | if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) { | ||
| 1236 | stop_threaded_test(info); | ||
| 1237 | strlcpy(test_channel, "", sizeof(test_channel)); | ||
| 1238 | } | ||
| 1239 | mutex_unlock(&info->lock); | ||
| 1240 | |||
| 1241 | return param_get_string(val, kp); | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | static int dmatest_test_list_get(char *val, const struct kernel_param *kp) | ||
| 1245 | { | ||
| 1246 | struct dmatest_info *info = &test_info; | ||
| 1247 | struct dmatest_chan *dtc; | ||
| 1248 | unsigned int thread_count = 0; | ||
| 1249 | |||
| 1250 | list_for_each_entry(dtc, &info->channels, node) { | ||
| 1251 | struct dmatest_thread *thread; | ||
| 1252 | |||
| 1253 | thread_count = 0; | ||
| 1254 | list_for_each_entry(thread, &dtc->threads, node) { | ||
| 1255 | thread_count++; | ||
| 1256 | } | ||
| 1257 | pr_info("%u threads using %s\n", | ||
| 1258 | thread_count, dma_chan_name(dtc->chan)); | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | return 0; | ||
| 1262 | } | ||
| 1263 | |||
| 1072 | static int __init dmatest_init(void) | 1264 | static int __init dmatest_init(void) |
| 1073 | { | 1265 | { |
| 1074 | struct dmatest_info *info = &test_info; | 1266 | struct dmatest_info *info = &test_info; |
| @@ -1076,7 +1268,8 @@ static int __init dmatest_init(void) | |||
| 1076 | 1268 | ||
| 1077 | if (dmatest_run) { | 1269 | if (dmatest_run) { |
| 1078 | mutex_lock(&info->lock); | 1270 | mutex_lock(&info->lock); |
| 1079 | run_threaded_test(info); | 1271 | add_threaded_test(info); |
| 1272 | run_pending_tests(info); | ||
| 1080 | mutex_unlock(&info->lock); | 1273 | mutex_unlock(&info->lock); |
| 1081 | } | 1274 | } |
| 1082 | 1275 | ||
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 1fc488e90f36..dc053e62f894 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
| @@ -160,12 +160,14 @@ static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) | |||
| 160 | 160 | ||
| 161 | static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) | 161 | static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) |
| 162 | { | 162 | { |
| 163 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
| 163 | u32 cfghi = DWC_CFGH_FIFO_MODE; | 164 | u32 cfghi = DWC_CFGH_FIFO_MODE; |
| 164 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | 165 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); |
| 165 | bool hs_polarity = dwc->dws.hs_polarity; | 166 | bool hs_polarity = dwc->dws.hs_polarity; |
| 166 | 167 | ||
| 167 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); | 168 | cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); |
| 168 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); | 169 | cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); |
| 170 | cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); | ||
| 169 | 171 | ||
| 170 | /* Set polarity of handshake interface */ | 172 | /* Set polarity of handshake interface */ |
| 171 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; | 173 | cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; |
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index f01b2c173fa6..31ff8113c3de 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c | |||
| @@ -162,6 +162,12 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
| 162 | pdata->multi_block[tmp] = 1; | 162 | pdata->multi_block[tmp] = 1; |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { | ||
| 166 | if (tmp > CHAN_PROTCTL_MASK) | ||
| 167 | return NULL; | ||
| 168 | pdata->protctl = tmp; | ||
| 169 | } | ||
| 170 | |||
| 165 | return pdata; | 171 | return pdata; |
| 166 | } | 172 | } |
| 167 | #else | 173 | #else |
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 09e7dfdbb790..646c9c960c07 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
| @@ -200,6 +200,10 @@ enum dw_dma_msize { | |||
| 200 | #define DWC_CFGH_FCMODE (1 << 0) | 200 | #define DWC_CFGH_FCMODE (1 << 0) |
| 201 | #define DWC_CFGH_FIFO_MODE (1 << 1) | 201 | #define DWC_CFGH_FIFO_MODE (1 << 1) |
| 202 | #define DWC_CFGH_PROTCTL(x) ((x) << 2) | 202 | #define DWC_CFGH_PROTCTL(x) ((x) << 2) |
| 203 | #define DWC_CFGH_PROTCTL_DATA (0 << 2) /* data access - always set */ | ||
| 204 | #define DWC_CFGH_PROTCTL_PRIV (1 << 2) /* privileged -> AHB HPROT[1] */ | ||
| 205 | #define DWC_CFGH_PROTCTL_BUFFER (2 << 2) /* bufferable -> AHB HPROT[2] */ | ||
| 206 | #define DWC_CFGH_PROTCTL_CACHE (4 << 2) /* cacheable -> AHB HPROT[3] */ | ||
| 203 | #define DWC_CFGH_DS_UPD_EN (1 << 5) | 207 | #define DWC_CFGH_DS_UPD_EN (1 << 5) |
| 204 | #define DWC_CFGH_SS_UPD_EN (1 << 6) | 208 | #define DWC_CFGH_SS_UPD_EN (1 << 6) |
| 205 | #define DWC_CFGH_SRC_PER(x) ((x) << 7) | 209 | #define DWC_CFGH_SRC_PER(x) ((x) << 7) |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index f674eb5fbbef..594a88f4f99c 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
| @@ -997,7 +997,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |||
| 997 | for (offset = 0; offset < len; offset += bytes) { | 997 | for (offset = 0; offset < len; offset += bytes) { |
| 998 | desc = ep93xx_dma_desc_get(edmac); | 998 | desc = ep93xx_dma_desc_get(edmac); |
| 999 | if (!desc) { | 999 | if (!desc) { |
| 1000 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | 1000 | dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); |
| 1001 | goto fail; | 1001 | goto fail; |
| 1002 | } | 1002 | } |
| 1003 | 1003 | ||
| @@ -1069,7 +1069,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 1069 | 1069 | ||
| 1070 | desc = ep93xx_dma_desc_get(edmac); | 1070 | desc = ep93xx_dma_desc_get(edmac); |
| 1071 | if (!desc) { | 1071 | if (!desc) { |
| 1072 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | 1072 | dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); |
| 1073 | goto fail; | 1073 | goto fail; |
| 1074 | } | 1074 | } |
| 1075 | 1075 | ||
| @@ -1149,7 +1149,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
| 1149 | for (offset = 0; offset < buf_len; offset += period_len) { | 1149 | for (offset = 0; offset < buf_len; offset += period_len) { |
| 1150 | desc = ep93xx_dma_desc_get(edmac); | 1150 | desc = ep93xx_dma_desc_get(edmac); |
| 1151 | if (!desc) { | 1151 | if (!desc) { |
| 1152 | dev_warn(chan2dev(edmac), "couln't get descriptor\n"); | 1152 | dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); |
| 1153 | goto fail; | 1153 | goto fail; |
| 1154 | } | 1154 | } |
| 1155 | 1155 | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index cb1b44d78a1f..a2b0a0e71168 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -335,6 +335,7 @@ struct sdma_desc { | |||
| 335 | * @sdma: pointer to the SDMA engine for this channel | 335 | * @sdma: pointer to the SDMA engine for this channel |
| 336 | * @channel: the channel number, matches dmaengine chan_id + 1 | 336 | * @channel: the channel number, matches dmaengine chan_id + 1 |
| 337 | * @direction: transfer type. Needed for setting SDMA script | 337 | * @direction: transfer type. Needed for setting SDMA script |
| 338 | * @slave_config Slave configuration | ||
| 338 | * @peripheral_type: Peripheral type. Needed for setting SDMA script | 339 | * @peripheral_type: Peripheral type. Needed for setting SDMA script |
| 339 | * @event_id0: aka dma request line | 340 | * @event_id0: aka dma request line |
| 340 | * @event_id1: for channels that use 2 events | 341 | * @event_id1: for channels that use 2 events |
| @@ -362,6 +363,7 @@ struct sdma_channel { | |||
| 362 | struct sdma_engine *sdma; | 363 | struct sdma_engine *sdma; |
| 363 | unsigned int channel; | 364 | unsigned int channel; |
| 364 | enum dma_transfer_direction direction; | 365 | enum dma_transfer_direction direction; |
| 366 | struct dma_slave_config slave_config; | ||
| 365 | enum sdma_peripheral_type peripheral_type; | 367 | enum sdma_peripheral_type peripheral_type; |
| 366 | unsigned int event_id0; | 368 | unsigned int event_id0; |
| 367 | unsigned int event_id1; | 369 | unsigned int event_id1; |
| @@ -440,6 +442,10 @@ struct sdma_engine { | |||
| 440 | struct sdma_buffer_descriptor *bd0; | 442 | struct sdma_buffer_descriptor *bd0; |
| 441 | }; | 443 | }; |
| 442 | 444 | ||
| 445 | static int sdma_config_write(struct dma_chan *chan, | ||
| 446 | struct dma_slave_config *dmaengine_cfg, | ||
| 447 | enum dma_transfer_direction direction); | ||
| 448 | |||
| 443 | static struct sdma_driver_data sdma_imx31 = { | 449 | static struct sdma_driver_data sdma_imx31 = { |
| 444 | .chnenbl0 = SDMA_CHNENBL0_IMX31, | 450 | .chnenbl0 = SDMA_CHNENBL0_IMX31, |
| 445 | .num_events = 32, | 451 | .num_events = 32, |
| @@ -671,9 +677,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | |||
| 671 | int ret; | 677 | int ret; |
| 672 | unsigned long flags; | 678 | unsigned long flags; |
| 673 | 679 | ||
| 674 | buf_virt = dma_alloc_coherent(NULL, | 680 | buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); |
| 675 | size, | ||
| 676 | &buf_phys, GFP_KERNEL); | ||
| 677 | if (!buf_virt) { | 681 | if (!buf_virt) { |
| 678 | return -ENOMEM; | 682 | return -ENOMEM; |
| 679 | } | 683 | } |
| @@ -1122,18 +1126,6 @@ static int sdma_config_channel(struct dma_chan *chan) | |||
| 1122 | sdmac->shp_addr = 0; | 1126 | sdmac->shp_addr = 0; |
| 1123 | sdmac->per_addr = 0; | 1127 | sdmac->per_addr = 0; |
| 1124 | 1128 | ||
| 1125 | if (sdmac->event_id0) { | ||
| 1126 | if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) | ||
| 1127 | return -EINVAL; | ||
| 1128 | sdma_event_enable(sdmac, sdmac->event_id0); | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | if (sdmac->event_id1) { | ||
| 1132 | if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) | ||
| 1133 | return -EINVAL; | ||
| 1134 | sdma_event_enable(sdmac, sdmac->event_id1); | ||
| 1135 | } | ||
| 1136 | |||
| 1137 | switch (sdmac->peripheral_type) { | 1129 | switch (sdmac->peripheral_type) { |
| 1138 | case IMX_DMATYPE_DSP: | 1130 | case IMX_DMATYPE_DSP: |
| 1139 | sdma_config_ownership(sdmac, false, true, true); | 1131 | sdma_config_ownership(sdmac, false, true, true); |
| @@ -1431,6 +1423,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
| 1431 | struct scatterlist *sg; | 1423 | struct scatterlist *sg; |
| 1432 | struct sdma_desc *desc; | 1424 | struct sdma_desc *desc; |
| 1433 | 1425 | ||
| 1426 | sdma_config_write(chan, &sdmac->slave_config, direction); | ||
| 1427 | |||
| 1434 | desc = sdma_transfer_init(sdmac, direction, sg_len); | 1428 | desc = sdma_transfer_init(sdmac, direction, sg_len); |
| 1435 | if (!desc) | 1429 | if (!desc) |
| 1436 | goto err_out; | 1430 | goto err_out; |
| @@ -1515,6 +1509,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
| 1515 | 1509 | ||
| 1516 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | 1510 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); |
| 1517 | 1511 | ||
| 1512 | sdma_config_write(chan, &sdmac->slave_config, direction); | ||
| 1513 | |||
| 1518 | desc = sdma_transfer_init(sdmac, direction, num_periods); | 1514 | desc = sdma_transfer_init(sdmac, direction, num_periods); |
| 1519 | if (!desc) | 1515 | if (!desc) |
| 1520 | goto err_out; | 1516 | goto err_out; |
| @@ -1570,17 +1566,18 @@ err_out: | |||
| 1570 | return NULL; | 1566 | return NULL; |
| 1571 | } | 1567 | } |
| 1572 | 1568 | ||
| 1573 | static int sdma_config(struct dma_chan *chan, | 1569 | static int sdma_config_write(struct dma_chan *chan, |
| 1574 | struct dma_slave_config *dmaengine_cfg) | 1570 | struct dma_slave_config *dmaengine_cfg, |
| 1571 | enum dma_transfer_direction direction) | ||
| 1575 | { | 1572 | { |
| 1576 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1573 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
| 1577 | 1574 | ||
| 1578 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { | 1575 | if (direction == DMA_DEV_TO_MEM) { |
| 1579 | sdmac->per_address = dmaengine_cfg->src_addr; | 1576 | sdmac->per_address = dmaengine_cfg->src_addr; |
| 1580 | sdmac->watermark_level = dmaengine_cfg->src_maxburst * | 1577 | sdmac->watermark_level = dmaengine_cfg->src_maxburst * |
| 1581 | dmaengine_cfg->src_addr_width; | 1578 | dmaengine_cfg->src_addr_width; |
| 1582 | sdmac->word_size = dmaengine_cfg->src_addr_width; | 1579 | sdmac->word_size = dmaengine_cfg->src_addr_width; |
| 1583 | } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { | 1580 | } else if (direction == DMA_DEV_TO_DEV) { |
| 1584 | sdmac->per_address2 = dmaengine_cfg->src_addr; | 1581 | sdmac->per_address2 = dmaengine_cfg->src_addr; |
| 1585 | sdmac->per_address = dmaengine_cfg->dst_addr; | 1582 | sdmac->per_address = dmaengine_cfg->dst_addr; |
| 1586 | sdmac->watermark_level = dmaengine_cfg->src_maxburst & | 1583 | sdmac->watermark_level = dmaengine_cfg->src_maxburst & |
| @@ -1594,10 +1591,33 @@ static int sdma_config(struct dma_chan *chan, | |||
| 1594 | dmaengine_cfg->dst_addr_width; | 1591 | dmaengine_cfg->dst_addr_width; |
| 1595 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | 1592 | sdmac->word_size = dmaengine_cfg->dst_addr_width; |
| 1596 | } | 1593 | } |
| 1597 | sdmac->direction = dmaengine_cfg->direction; | 1594 | sdmac->direction = direction; |
| 1598 | return sdma_config_channel(chan); | 1595 | return sdma_config_channel(chan); |
| 1599 | } | 1596 | } |
| 1600 | 1597 | ||
| 1598 | static int sdma_config(struct dma_chan *chan, | ||
| 1599 | struct dma_slave_config *dmaengine_cfg) | ||
| 1600 | { | ||
| 1601 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 1602 | |||
| 1603 | memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); | ||
| 1604 | |||
| 1605 | /* Set ENBLn earlier to make sure dma request triggered after that */ | ||
| 1606 | if (sdmac->event_id0) { | ||
| 1607 | if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) | ||
| 1608 | return -EINVAL; | ||
| 1609 | sdma_event_enable(sdmac, sdmac->event_id0); | ||
| 1610 | } | ||
| 1611 | |||
| 1612 | if (sdmac->event_id1) { | ||
| 1613 | if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) | ||
| 1614 | return -EINVAL; | ||
| 1615 | sdma_event_enable(sdmac, sdmac->event_id1); | ||
| 1616 | } | ||
| 1617 | |||
| 1618 | return 0; | ||
| 1619 | } | ||
| 1620 | |||
| 1601 | static enum dma_status sdma_tx_status(struct dma_chan *chan, | 1621 | static enum dma_status sdma_tx_status(struct dma_chan *chan, |
| 1602 | dma_cookie_t cookie, | 1622 | dma_cookie_t cookie, |
| 1603 | struct dma_tx_state *txstate) | 1623 | struct dma_tx_state *txstate) |
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig index 27bac0bba09e..680fc0572d87 100644 --- a/drivers/dma/mediatek/Kconfig +++ b/drivers/dma/mediatek/Kconfig | |||
| @@ -11,3 +11,16 @@ config MTK_HSDMA | |||
| 11 | This controller provides the channels which is dedicated to | 11 | This controller provides the channels which is dedicated to |
| 12 | memory-to-memory transfer to offload from CPU through ring- | 12 | memory-to-memory transfer to offload from CPU through ring- |
| 13 | based descriptor management. | 13 | based descriptor management. |
| 14 | |||
| 15 | config MTK_CQDMA | ||
| 16 | tristate "MediaTek Command-Queue DMA controller support" | ||
| 17 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 18 | select DMA_ENGINE | ||
| 19 | select DMA_VIRTUAL_CHANNELS | ||
| 20 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 21 | help | ||
| 22 | Enable support for Command-Queue DMA controller on MediaTek | ||
| 23 | SoCs. | ||
| 24 | |||
| 25 | This controller provides the channels which is dedicated to | ||
| 26 | memory-to-memory transfer to offload from CPU. | ||
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile index 6e778f842f01..41bb3815f636 100644 --- a/drivers/dma/mediatek/Makefile +++ b/drivers/dma/mediatek/Makefile | |||
| @@ -1 +1,2 @@ | |||
| 1 | obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o | 1 | obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o |
| 2 | obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o | ||
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c new file mode 100644 index 000000000000..131f3974740d --- /dev/null +++ b/drivers/dma/mediatek/mtk-cqdma.c | |||
| @@ -0,0 +1,951 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (c) 2018-2019 MediaTek Inc. | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Driver for MediaTek Command-Queue DMA Controller | ||
| 6 | * | ||
| 7 | * Author: Shun-Chih Yu <shun-chih.yu@mediatek.com> | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/bitops.h> | ||
| 12 | #include <linux/clk.h> | ||
| 13 | #include <linux/dmaengine.h> | ||
| 14 | #include <linux/dma-mapping.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | #include <linux/iopoll.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/list.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/of.h> | ||
| 21 | #include <linux/of_device.h> | ||
| 22 | #include <linux/of_dma.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/pm_runtime.h> | ||
| 25 | #include <linux/refcount.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | |||
| 28 | #include "../virt-dma.h" | ||
| 29 | |||
| 30 | #define MTK_CQDMA_USEC_POLL 10 | ||
| 31 | #define MTK_CQDMA_TIMEOUT_POLL 1000 | ||
| 32 | #define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
| 33 | #define MTK_CQDMA_ALIGN_SIZE 1 | ||
| 34 | |||
| 35 | /* The default number of virtual channel */ | ||
| 36 | #define MTK_CQDMA_NR_VCHANS 32 | ||
| 37 | |||
| 38 | /* The default number of physical channel */ | ||
| 39 | #define MTK_CQDMA_NR_PCHANS 3 | ||
| 40 | |||
| 41 | /* Registers for underlying dma manipulation */ | ||
| 42 | #define MTK_CQDMA_INT_FLAG 0x0 | ||
| 43 | #define MTK_CQDMA_INT_EN 0x4 | ||
| 44 | #define MTK_CQDMA_EN 0x8 | ||
| 45 | #define MTK_CQDMA_RESET 0xc | ||
| 46 | #define MTK_CQDMA_FLUSH 0x14 | ||
| 47 | #define MTK_CQDMA_SRC 0x1c | ||
| 48 | #define MTK_CQDMA_DST 0x20 | ||
| 49 | #define MTK_CQDMA_LEN1 0x24 | ||
| 50 | #define MTK_CQDMA_LEN2 0x28 | ||
| 51 | #define MTK_CQDMA_SRC2 0x60 | ||
| 52 | #define MTK_CQDMA_DST2 0x64 | ||
| 53 | |||
| 54 | /* Registers setting */ | ||
| 55 | #define MTK_CQDMA_EN_BIT BIT(0) | ||
| 56 | #define MTK_CQDMA_INT_FLAG_BIT BIT(0) | ||
| 57 | #define MTK_CQDMA_INT_EN_BIT BIT(0) | ||
| 58 | #define MTK_CQDMA_FLUSH_BIT BIT(0) | ||
| 59 | |||
| 60 | #define MTK_CQDMA_WARM_RST_BIT BIT(0) | ||
| 61 | #define MTK_CQDMA_HARD_RST_BIT BIT(1) | ||
| 62 | |||
| 63 | #define MTK_CQDMA_MAX_LEN GENMASK(27, 0) | ||
| 64 | #define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0) | ||
| 65 | #define MTK_CQDMA_ADDR2_SHFIT (32) | ||
| 66 | |||
| 67 | /** | ||
| 68 | * struct mtk_cqdma_vdesc - The struct holding info describing virtual | ||
| 69 | * descriptor (CVD) | ||
| 70 | * @vd: An instance for struct virt_dma_desc | ||
| 71 | * @len: The total data size device wants to move | ||
| 72 | * @residue: The remaining data size device will move | ||
| 73 | * @dest: The destination address device wants to move to | ||
| 74 | * @src: The source address device wants to move from | ||
| 75 | * @ch: The pointer to the corresponding dma channel | ||
| 76 | * @node: The lise_head struct to build link-list for VDs | ||
| 77 | * @parent: The pointer to the parent CVD | ||
| 78 | */ | ||
| 79 | struct mtk_cqdma_vdesc { | ||
| 80 | struct virt_dma_desc vd; | ||
| 81 | size_t len; | ||
| 82 | size_t residue; | ||
| 83 | dma_addr_t dest; | ||
| 84 | dma_addr_t src; | ||
| 85 | struct dma_chan *ch; | ||
| 86 | |||
| 87 | struct list_head node; | ||
| 88 | struct mtk_cqdma_vdesc *parent; | ||
| 89 | }; | ||
| 90 | |||
| 91 | /** | ||
| 92 | * struct mtk_cqdma_pchan - The struct holding info describing physical | ||
| 93 | * channel (PC) | ||
| 94 | * @queue: Queue for the PDs issued to this PC | ||
| 95 | * @base: The mapped register I/O base of this PC | ||
| 96 | * @irq: The IRQ that this PC are using | ||
| 97 | * @refcnt: Track how many VCs are using this PC | ||
| 98 | * @tasklet: Tasklet for this PC | ||
| 99 | * @lock: Lock protect agaisting multiple VCs access PC | ||
| 100 | */ | ||
| 101 | struct mtk_cqdma_pchan { | ||
| 102 | struct list_head queue; | ||
| 103 | void __iomem *base; | ||
| 104 | u32 irq; | ||
| 105 | |||
| 106 | refcount_t refcnt; | ||
| 107 | |||
| 108 | struct tasklet_struct tasklet; | ||
| 109 | |||
| 110 | /* lock to protect PC */ | ||
| 111 | spinlock_t lock; | ||
| 112 | }; | ||
| 113 | |||
| 114 | /** | ||
| 115 | * struct mtk_cqdma_vchan - The struct holding info describing virtual | ||
| 116 | * channel (VC) | ||
| 117 | * @vc: An instance for struct virt_dma_chan | ||
| 118 | * @pc: The pointer to the underlying PC | ||
| 119 | * @issue_completion: The wait for all issued descriptors completited | ||
| 120 | * @issue_synchronize: Bool indicating channel synchronization starts | ||
| 121 | */ | ||
| 122 | struct mtk_cqdma_vchan { | ||
| 123 | struct virt_dma_chan vc; | ||
| 124 | struct mtk_cqdma_pchan *pc; | ||
| 125 | struct completion issue_completion; | ||
| 126 | bool issue_synchronize; | ||
| 127 | }; | ||
| 128 | |||
| 129 | /** | ||
| 130 | * struct mtk_cqdma_device - The struct holding info describing CQDMA | ||
| 131 | * device | ||
| 132 | * @ddev: An instance for struct dma_device | ||
| 133 | * @clk: The clock that device internal is using | ||
| 134 | * @dma_requests: The number of VCs the device supports to | ||
| 135 | * @dma_channels: The number of PCs the device supports to | ||
| 136 | * @vc: The pointer to all available VCs | ||
| 137 | * @pc: The pointer to all the underlying PCs | ||
| 138 | */ | ||
| 139 | struct mtk_cqdma_device { | ||
| 140 | struct dma_device ddev; | ||
| 141 | struct clk *clk; | ||
| 142 | |||
| 143 | u32 dma_requests; | ||
| 144 | u32 dma_channels; | ||
| 145 | struct mtk_cqdma_vchan *vc; | ||
| 146 | struct mtk_cqdma_pchan **pc; | ||
| 147 | }; | ||
| 148 | |||
| 149 | static struct mtk_cqdma_device *to_cqdma_dev(struct dma_chan *chan) | ||
| 150 | { | ||
| 151 | return container_of(chan->device, struct mtk_cqdma_device, ddev); | ||
| 152 | } | ||
| 153 | |||
| 154 | static struct mtk_cqdma_vchan *to_cqdma_vchan(struct dma_chan *chan) | ||
| 155 | { | ||
| 156 | return container_of(chan, struct mtk_cqdma_vchan, vc.chan); | ||
| 157 | } | ||
| 158 | |||
| 159 | static struct mtk_cqdma_vdesc *to_cqdma_vdesc(struct virt_dma_desc *vd) | ||
| 160 | { | ||
| 161 | return container_of(vd, struct mtk_cqdma_vdesc, vd); | ||
| 162 | } | ||
| 163 | |||
| 164 | static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma) | ||
| 165 | { | ||
| 166 | return cqdma->ddev.dev; | ||
| 167 | } | ||
| 168 | |||
| 169 | static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg) | ||
| 170 | { | ||
| 171 | return readl(pc->base + reg); | ||
| 172 | } | ||
| 173 | |||
| 174 | static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) | ||
| 175 | { | ||
| 176 | writel_relaxed(val, pc->base + reg); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg, | ||
| 180 | u32 mask, u32 set) | ||
| 181 | { | ||
| 182 | u32 val; | ||
| 183 | |||
| 184 | val = mtk_dma_read(pc, reg); | ||
| 185 | val &= ~mask; | ||
| 186 | val |= set; | ||
| 187 | mtk_dma_write(pc, reg, val); | ||
| 188 | } | ||
| 189 | |||
| 190 | static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) | ||
| 191 | { | ||
| 192 | mtk_dma_rmw(pc, reg, 0, val); | ||
| 193 | } | ||
| 194 | |||
| 195 | static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) | ||
| 196 | { | ||
| 197 | mtk_dma_rmw(pc, reg, val, 0); | ||
| 198 | } | ||
| 199 | |||
| 200 | static void mtk_cqdma_vdesc_free(struct virt_dma_desc *vd) | ||
| 201 | { | ||
| 202 | kfree(to_cqdma_vdesc(vd)); | ||
| 203 | } | ||
| 204 | |||
| 205 | static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic) | ||
| 206 | { | ||
| 207 | u32 status = 0; | ||
| 208 | |||
| 209 | if (!atomic) | ||
| 210 | return readl_poll_timeout(pc->base + MTK_CQDMA_EN, | ||
| 211 | status, | ||
| 212 | !(status & MTK_CQDMA_EN_BIT), | ||
| 213 | MTK_CQDMA_USEC_POLL, | ||
| 214 | MTK_CQDMA_TIMEOUT_POLL); | ||
| 215 | |||
| 216 | return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN, | ||
| 217 | status, | ||
| 218 | !(status & MTK_CQDMA_EN_BIT), | ||
| 219 | MTK_CQDMA_USEC_POLL, | ||
| 220 | MTK_CQDMA_TIMEOUT_POLL); | ||
| 221 | } | ||
| 222 | |||
| 223 | static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) | ||
| 224 | { | ||
| 225 | mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); | ||
| 226 | mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); | ||
| 227 | |||
| 228 | return mtk_cqdma_poll_engine_done(pc, false); | ||
| 229 | } | ||
| 230 | |||
| 231 | static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, | ||
| 232 | struct mtk_cqdma_vdesc *cvd) | ||
| 233 | { | ||
| 234 | /* wait for the previous transaction done */ | ||
| 235 | if (mtk_cqdma_poll_engine_done(pc, true) < 0) | ||
| 236 | dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n"); | ||
| 237 | |||
| 238 | /* warm reset the dma engine for the new transaction */ | ||
| 239 | mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); | ||
| 240 | if (mtk_cqdma_poll_engine_done(pc, true) < 0) | ||
| 241 | dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n"); | ||
| 242 | |||
| 243 | /* setup the source */ | ||
| 244 | mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT); | ||
| 245 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
| 246 | mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT); | ||
| 247 | #else | ||
| 248 | mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); | ||
| 249 | #endif | ||
| 250 | |||
| 251 | /* setup the destination */ | ||
| 252 | mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT); | ||
| 253 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
| 254 | mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); | ||
| 255 | #else | ||
| 256 | mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); | ||
| 257 | #endif | ||
| 258 | |||
| 259 | /* setup the length */ | ||
| 260 | mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len); | ||
| 261 | |||
| 262 | /* start dma engine */ | ||
| 263 | mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT); | ||
| 264 | } | ||
| 265 | |||
| 266 | static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan *cvc) | ||
| 267 | { | ||
| 268 | struct virt_dma_desc *vd, *vd2; | ||
| 269 | struct mtk_cqdma_pchan *pc = cvc->pc; | ||
| 270 | struct mtk_cqdma_vdesc *cvd; | ||
| 271 | bool trigger_engine = false; | ||
| 272 | |||
| 273 | lockdep_assert_held(&cvc->vc.lock); | ||
| 274 | lockdep_assert_held(&pc->lock); | ||
| 275 | |||
| 276 | list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) { | ||
| 277 | /* need to trigger dma engine if PC's queue is empty */ | ||
| 278 | if (list_empty(&pc->queue)) | ||
| 279 | trigger_engine = true; | ||
| 280 | |||
| 281 | cvd = to_cqdma_vdesc(vd); | ||
| 282 | |||
| 283 | /* add VD into PC's queue */ | ||
| 284 | list_add_tail(&cvd->node, &pc->queue); | ||
| 285 | |||
| 286 | /* start the dma engine */ | ||
| 287 | if (trigger_engine) | ||
| 288 | mtk_cqdma_start(pc, cvd); | ||
| 289 | |||
| 290 | /* remove VD from list desc_issued */ | ||
| 291 | list_del(&vd->node); | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | ||
| 296 | * return true if this VC is active, | ||
| 297 | * meaning that there are VDs under processing by the PC | ||
| 298 | */ | ||
| 299 | static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc) | ||
| 300 | { | ||
| 301 | struct mtk_cqdma_vdesc *cvd; | ||
| 302 | |||
| 303 | list_for_each_entry(cvd, &cvc->pc->queue, node) | ||
| 304 | if (cvc == to_cqdma_vchan(cvd->ch)) | ||
| 305 | return true; | ||
| 306 | |||
| 307 | return false; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* | ||
| 311 | * return the pointer of the CVD that is just consumed by the PC | ||
| 312 | */ | ||
| 313 | static struct mtk_cqdma_vdesc | ||
| 314 | *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc) | ||
| 315 | { | ||
| 316 | struct mtk_cqdma_vchan *cvc; | ||
| 317 | struct mtk_cqdma_vdesc *cvd, *ret = NULL; | ||
| 318 | |||
| 319 | /* consume a CVD from PC's queue */ | ||
| 320 | cvd = list_first_entry_or_null(&pc->queue, | ||
| 321 | struct mtk_cqdma_vdesc, node); | ||
| 322 | if (unlikely(!cvd || !cvd->parent)) | ||
| 323 | return NULL; | ||
| 324 | |||
| 325 | cvc = to_cqdma_vchan(cvd->ch); | ||
| 326 | ret = cvd; | ||
| 327 | |||
| 328 | /* update residue of the parent CVD */ | ||
| 329 | cvd->parent->residue -= cvd->len; | ||
| 330 | |||
| 331 | /* delete CVD from PC's queue */ | ||
| 332 | list_del(&cvd->node); | ||
| 333 | |||
| 334 | spin_lock(&cvc->vc.lock); | ||
| 335 | |||
| 336 | /* check whether all the child CVDs completed */ | ||
| 337 | if (!cvd->parent->residue) { | ||
| 338 | /* add the parent VD into list desc_completed */ | ||
| 339 | vchan_cookie_complete(&cvd->parent->vd); | ||
| 340 | |||
| 341 | /* setup completion if this VC is under synchronization */ | ||
| 342 | if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) { | ||
| 343 | complete(&cvc->issue_completion); | ||
| 344 | cvc->issue_synchronize = false; | ||
| 345 | } | ||
| 346 | } | ||
| 347 | |||
| 348 | spin_unlock(&cvc->vc.lock); | ||
| 349 | |||
| 350 | /* start transaction for next CVD in the queue */ | ||
| 351 | cvd = list_first_entry_or_null(&pc->queue, | ||
| 352 | struct mtk_cqdma_vdesc, node); | ||
| 353 | if (cvd) | ||
| 354 | mtk_cqdma_start(pc, cvd); | ||
| 355 | |||
| 356 | return ret; | ||
| 357 | } | ||
| 358 | |||
| 359 | static void mtk_cqdma_tasklet_cb(unsigned long data) | ||
| 360 | { | ||
| 361 | struct mtk_cqdma_pchan *pc = (struct mtk_cqdma_pchan *)data; | ||
| 362 | struct mtk_cqdma_vdesc *cvd = NULL; | ||
| 363 | unsigned long flags; | ||
| 364 | |||
| 365 | spin_lock_irqsave(&pc->lock, flags); | ||
| 366 | /* consume the queue */ | ||
| 367 | cvd = mtk_cqdma_consume_work_queue(pc); | ||
| 368 | spin_unlock_irqrestore(&pc->lock, flags); | ||
| 369 | |||
| 370 | /* submit the next CVD */ | ||
| 371 | if (cvd) { | ||
| 372 | dma_run_dependencies(&cvd->vd.tx); | ||
| 373 | |||
| 374 | /* | ||
| 375 | * free child CVD after completion. | ||
| 376 | * the parent CVD would be freeed with desc_free by user. | ||
| 377 | */ | ||
| 378 | if (cvd->parent != cvd) | ||
| 379 | kfree(cvd); | ||
| 380 | } | ||
| 381 | |||
| 382 | /* re-enable interrupt before leaving tasklet */ | ||
| 383 | enable_irq(pc->irq); | ||
| 384 | } | ||
| 385 | |||
| 386 | static irqreturn_t mtk_cqdma_irq(int irq, void *devid) | ||
| 387 | { | ||
| 388 | struct mtk_cqdma_device *cqdma = devid; | ||
| 389 | irqreturn_t ret = IRQ_NONE; | ||
| 390 | bool schedule_tasklet = false; | ||
| 391 | u32 i; | ||
| 392 | |||
| 393 | /* clear interrupt flags for each PC */ | ||
| 394 | for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) { | ||
| 395 | spin_lock(&cqdma->pc[i]->lock); | ||
| 396 | if (mtk_dma_read(cqdma->pc[i], | ||
| 397 | MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) { | ||
| 398 | /* clear interrupt */ | ||
| 399 | mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG, | ||
| 400 | MTK_CQDMA_INT_FLAG_BIT); | ||
| 401 | |||
| 402 | schedule_tasklet = true; | ||
| 403 | ret = IRQ_HANDLED; | ||
| 404 | } | ||
| 405 | spin_unlock(&cqdma->pc[i]->lock); | ||
| 406 | |||
| 407 | if (schedule_tasklet) { | ||
| 408 | /* disable interrupt */ | ||
| 409 | disable_irq_nosync(cqdma->pc[i]->irq); | ||
| 410 | |||
| 411 | /* schedule the tasklet to handle the transactions */ | ||
| 412 | tasklet_schedule(&cqdma->pc[i]->tasklet); | ||
| 413 | } | ||
| 414 | } | ||
| 415 | |||
| 416 | return ret; | ||
| 417 | } | ||
| 418 | |||
| 419 | static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, | ||
| 420 | dma_cookie_t cookie) | ||
| 421 | { | ||
| 422 | struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); | ||
| 423 | struct virt_dma_desc *vd; | ||
| 424 | unsigned long flags; | ||
| 425 | |||
| 426 | spin_lock_irqsave(&cvc->pc->lock, flags); | ||
| 427 | list_for_each_entry(vd, &cvc->pc->queue, node) | ||
| 428 | if (vd->tx.cookie == cookie) { | ||
| 429 | spin_unlock_irqrestore(&cvc->pc->lock, flags); | ||
| 430 | return vd; | ||
| 431 | } | ||
| 432 | spin_unlock_irqrestore(&cvc->pc->lock, flags); | ||
| 433 | |||
| 434 | list_for_each_entry(vd, &cvc->vc.desc_issued, node) | ||
| 435 | if (vd->tx.cookie == cookie) | ||
| 436 | return vd; | ||
| 437 | |||
| 438 | return NULL; | ||
| 439 | } | ||
| 440 | |||
| 441 | static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, | ||
| 442 | dma_cookie_t cookie, | ||
| 443 | struct dma_tx_state *txstate) | ||
| 444 | { | ||
| 445 | struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); | ||
| 446 | struct mtk_cqdma_vdesc *cvd; | ||
| 447 | struct virt_dma_desc *vd; | ||
| 448 | enum dma_status ret; | ||
| 449 | unsigned long flags; | ||
| 450 | size_t bytes = 0; | ||
| 451 | |||
| 452 | ret = dma_cookie_status(c, cookie, txstate); | ||
| 453 | if (ret == DMA_COMPLETE || !txstate) | ||
| 454 | return ret; | ||
| 455 | |||
| 456 | spin_lock_irqsave(&cvc->vc.lock, flags); | ||
| 457 | vd = mtk_cqdma_find_active_desc(c, cookie); | ||
| 458 | spin_unlock_irqrestore(&cvc->vc.lock, flags); | ||
| 459 | |||
| 460 | if (vd) { | ||
| 461 | cvd = to_cqdma_vdesc(vd); | ||
| 462 | bytes = cvd->residue; | ||
| 463 | } | ||
| 464 | |||
| 465 | dma_set_residue(txstate, bytes); | ||
| 466 | |||
| 467 | return ret; | ||
| 468 | } | ||
| 469 | |||
| 470 | static void mtk_cqdma_issue_pending(struct dma_chan *c) | ||
| 471 | { | ||
| 472 | struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); | ||
| 473 | unsigned long pc_flags; | ||
| 474 | unsigned long vc_flags; | ||
| 475 | |||
| 476 | /* acquire PC's lock before VS's lock for lock dependency in tasklet */ | ||
| 477 | spin_lock_irqsave(&cvc->pc->lock, pc_flags); | ||
| 478 | spin_lock_irqsave(&cvc->vc.lock, vc_flags); | ||
| 479 | |||
| 480 | if (vchan_issue_pending(&cvc->vc)) | ||
| 481 | mtk_cqdma_issue_vchan_pending(cvc); | ||
| 482 | |||
| 483 | spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); | ||
| 484 | spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); | ||
| 485 | } | ||
| 486 | |||
| 487 | static struct dma_async_tx_descriptor * | ||
| 488 | mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, | ||
| 489 | dma_addr_t src, size_t len, unsigned long flags) | ||
| 490 | { | ||
| 491 | struct mtk_cqdma_vdesc **cvd; | ||
| 492 | struct dma_async_tx_descriptor *tx = NULL, *prev_tx = NULL; | ||
| 493 | size_t i, tlen, nr_vd; | ||
| 494 | |||
| 495 | /* | ||
| 496 | * In the case that trsanction length is larger than the | ||
| 497 | * DMA engine supports, a single memcpy transaction needs | ||
| 498 | * to be separated into several DMA transactions. | ||
| 499 | * Each DMA transaction would be described by a CVD, | ||
| 500 | * and the first one is referred as the parent CVD, | ||
| 501 | * while the others are child CVDs. | ||
| 502 | * The parent CVD's tx descriptor is the only tx descriptor | ||
| 503 | * returned to the DMA user, and it should not be completed | ||
| 504 | * until all the child CVDs completed. | ||
| 505 | */ | ||
| 506 | nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN); | ||
| 507 | cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT); | ||
| 508 | if (!cvd) | ||
| 509 | return NULL; | ||
| 510 | |||
| 511 | for (i = 0; i < nr_vd; ++i) { | ||
| 512 | cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT); | ||
| 513 | if (!cvd[i]) { | ||
| 514 | for (; i > 0; --i) | ||
| 515 | kfree(cvd[i - 1]); | ||
| 516 | return NULL; | ||
| 517 | } | ||
| 518 | |||
| 519 | /* setup dma channel */ | ||
| 520 | cvd[i]->ch = c; | ||
| 521 | |||
| 522 | /* setup sourece, destination, and length */ | ||
| 523 | tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len; | ||
| 524 | cvd[i]->len = tlen; | ||
| 525 | cvd[i]->src = src; | ||
| 526 | cvd[i]->dest = dest; | ||
| 527 | |||
| 528 | /* setup tx descriptor */ | ||
| 529 | tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags); | ||
| 530 | tx->next = NULL; | ||
| 531 | |||
| 532 | if (!i) { | ||
| 533 | cvd[0]->residue = len; | ||
| 534 | } else { | ||
| 535 | prev_tx->next = tx; | ||
| 536 | cvd[i]->residue = tlen; | ||
| 537 | } | ||
| 538 | |||
| 539 | cvd[i]->parent = cvd[0]; | ||
| 540 | |||
| 541 | /* update the src, dest, len, prev_tx for the next CVD */ | ||
| 542 | src += tlen; | ||
| 543 | dest += tlen; | ||
| 544 | len -= tlen; | ||
| 545 | prev_tx = tx; | ||
| 546 | } | ||
| 547 | |||
| 548 | return &cvd[0]->vd.tx; | ||
| 549 | } | ||
| 550 | |||
| 551 | static void mtk_cqdma_free_inactive_desc(struct dma_chan *c) | ||
| 552 | { | ||
| 553 | struct virt_dma_chan *vc = to_virt_chan(c); | ||
| 554 | unsigned long flags; | ||
| 555 | LIST_HEAD(head); | ||
| 556 | |||
| 557 | /* | ||
| 558 | * set desc_allocated, desc_submitted, | ||
| 559 | * and desc_issued as the candicates to be freed | ||
| 560 | */ | ||
| 561 | spin_lock_irqsave(&vc->lock, flags); | ||
| 562 | list_splice_tail_init(&vc->desc_allocated, &head); | ||
| 563 | list_splice_tail_init(&vc->desc_submitted, &head); | ||
| 564 | list_splice_tail_init(&vc->desc_issued, &head); | ||
| 565 | spin_unlock_irqrestore(&vc->lock, flags); | ||
| 566 | |||
| 567 | /* free descriptor lists */ | ||
| 568 | vchan_dma_desc_free_list(vc, &head); | ||
| 569 | } | ||
| 570 | |||
| 571 | static void mtk_cqdma_free_active_desc(struct dma_chan *c) | ||
| 572 | { | ||
| 573 | struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); | ||
| 574 | bool sync_needed = false; | ||
| 575 | unsigned long pc_flags; | ||
| 576 | unsigned long vc_flags; | ||
| 577 | |||
| 578 | /* acquire PC's lock first due to lock dependency in dma ISR */ | ||
| 579 | spin_lock_irqsave(&cvc->pc->lock, pc_flags); | ||
| 580 | spin_lock_irqsave(&cvc->vc.lock, vc_flags); | ||
| 581 | |||
| 582 | /* synchronization is required if this VC is active */ | ||
| 583 | if (mtk_cqdma_is_vchan_active(cvc)) { | ||
| 584 | cvc->issue_synchronize = true; | ||
| 585 | sync_needed = true; | ||
| 586 | } | ||
| 587 | |||
| 588 | spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); | ||
| 589 | spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); | ||
| 590 | |||
| 591 | /* waiting for the completion of this VC */ | ||
| 592 | if (sync_needed) | ||
| 593 | wait_for_completion(&cvc->issue_completion); | ||
| 594 | |||
| 595 | /* free all descriptors in list desc_completed */ | ||
| 596 | vchan_synchronize(&cvc->vc); | ||
| 597 | |||
| 598 | WARN_ONCE(!list_empty(&cvc->vc.desc_completed), | ||
| 599 | "Desc pending still in list desc_completed\n"); | ||
| 600 | } | ||
| 601 | |||
| 602 | static int mtk_cqdma_terminate_all(struct dma_chan *c) | ||
| 603 | { | ||
| 604 | /* free descriptors not processed yet by hardware */ | ||
| 605 | mtk_cqdma_free_inactive_desc(c); | ||
| 606 | |||
| 607 | /* free descriptors being processed by hardware */ | ||
| 608 | mtk_cqdma_free_active_desc(c); | ||
| 609 | |||
| 610 | return 0; | ||
| 611 | } | ||
| 612 | |||
| 613 | static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c) | ||
| 614 | { | ||
| 615 | struct mtk_cqdma_device *cqdma = to_cqdma_dev(c); | ||
| 616 | struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c); | ||
| 617 | struct mtk_cqdma_pchan *pc = NULL; | ||
| 618 | u32 i, min_refcnt = U32_MAX, refcnt; | ||
| 619 | unsigned long flags; | ||
| 620 | |||
| 621 | /* allocate PC with the minimun refcount */ | ||
| 622 | for (i = 0; i < cqdma->dma_channels; ++i) { | ||
| 623 | refcnt = refcount_read(&cqdma->pc[i]->refcnt); | ||
| 624 | if (refcnt < min_refcnt) { | ||
| 625 | pc = cqdma->pc[i]; | ||
| 626 | min_refcnt = refcnt; | ||
| 627 | } | ||
| 628 | } | ||
| 629 | |||
| 630 | if (!pc) | ||
| 631 | return -ENOSPC; | ||
| 632 | |||
| 633 | spin_lock_irqsave(&pc->lock, flags); | ||
| 634 | |||
| 635 | if (!refcount_read(&pc->refcnt)) { | ||
| 636 | /* allocate PC when the refcount is zero */ | ||
| 637 | mtk_cqdma_hard_reset(pc); | ||
| 638 | |||
| 639 | /* enable interrupt for this PC */ | ||
| 640 | mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); | ||
| 641 | |||
| 642 | /* | ||
| 643 | * refcount_inc would complain increment on 0; use-after-free. | ||
| 644 | * Thus, we need to explicitly set it as 1 initially. | ||
| 645 | */ | ||
| 646 | refcount_set(&pc->refcnt, 1); | ||
| 647 | } else { | ||
| 648 | refcount_inc(&pc->refcnt); | ||
| 649 | } | ||
| 650 | |||
| 651 | spin_unlock_irqrestore(&pc->lock, flags); | ||
| 652 | |||
| 653 | vc->pc = pc; | ||
| 654 | |||
| 655 | return 0; | ||
| 656 | } | ||
| 657 | |||
| 658 | static void mtk_cqdma_free_chan_resources(struct dma_chan *c) | ||
| 659 | { | ||
| 660 | struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); | ||
| 661 | unsigned long flags; | ||
| 662 | |||
| 663 | /* free all descriptors in all lists on the VC */ | ||
| 664 | mtk_cqdma_terminate_all(c); | ||
| 665 | |||
| 666 | spin_lock_irqsave(&cvc->pc->lock, flags); | ||
| 667 | |||
| 668 | /* PC is not freed until there is no VC mapped to it */ | ||
| 669 | if (refcount_dec_and_test(&cvc->pc->refcnt)) { | ||
| 670 | /* start the flush operation and stop the engine */ | ||
| 671 | mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); | ||
| 672 | |||
| 673 | /* wait for the completion of flush operation */ | ||
| 674 | if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0) | ||
| 675 | dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); | ||
| 676 | |||
| 677 | /* clear the flush bit and interrupt flag */ | ||
| 678 | mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); | ||
| 679 | mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG, | ||
| 680 | MTK_CQDMA_INT_FLAG_BIT); | ||
| 681 | |||
| 682 | /* disable interrupt for this PC */ | ||
| 683 | mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); | ||
| 684 | } | ||
| 685 | |||
| 686 | spin_unlock_irqrestore(&cvc->pc->lock, flags); | ||
| 687 | } | ||
| 688 | |||
| 689 | static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma) | ||
| 690 | { | ||
| 691 | unsigned long flags; | ||
| 692 | int err; | ||
| 693 | u32 i; | ||
| 694 | |||
| 695 | pm_runtime_enable(cqdma2dev(cqdma)); | ||
| 696 | pm_runtime_get_sync(cqdma2dev(cqdma)); | ||
| 697 | |||
| 698 | err = clk_prepare_enable(cqdma->clk); | ||
| 699 | |||
| 700 | if (err) { | ||
| 701 | pm_runtime_put_sync(cqdma2dev(cqdma)); | ||
| 702 | pm_runtime_disable(cqdma2dev(cqdma)); | ||
| 703 | return err; | ||
| 704 | } | ||
| 705 | |||
| 706 | /* reset all PCs */ | ||
| 707 | for (i = 0; i < cqdma->dma_channels; ++i) { | ||
| 708 | spin_lock_irqsave(&cqdma->pc[i]->lock, flags); | ||
| 709 | if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) { | ||
| 710 | dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); | ||
| 711 | spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); | ||
| 712 | |||
| 713 | clk_disable_unprepare(cqdma->clk); | ||
| 714 | pm_runtime_put_sync(cqdma2dev(cqdma)); | ||
| 715 | pm_runtime_disable(cqdma2dev(cqdma)); | ||
| 716 | return -EINVAL; | ||
| 717 | } | ||
| 718 | spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); | ||
| 719 | } | ||
| 720 | |||
| 721 | return 0; | ||
| 722 | } | ||
| 723 | |||
| 724 | static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma) | ||
| 725 | { | ||
| 726 | unsigned long flags; | ||
| 727 | u32 i; | ||
| 728 | |||
| 729 | /* reset all PCs */ | ||
| 730 | for (i = 0; i < cqdma->dma_channels; ++i) { | ||
| 731 | spin_lock_irqsave(&cqdma->pc[i]->lock, flags); | ||
| 732 | if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) | ||
| 733 | dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); | ||
| 734 | spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); | ||
| 735 | } | ||
| 736 | |||
| 737 | clk_disable_unprepare(cqdma->clk); | ||
| 738 | |||
| 739 | pm_runtime_put_sync(cqdma2dev(cqdma)); | ||
| 740 | pm_runtime_disable(cqdma2dev(cqdma)); | ||
| 741 | } | ||
| 742 | |||
| 743 | static const struct of_device_id mtk_cqdma_match[] = { | ||
| 744 | { .compatible = "mediatek,mt6765-cqdma" }, | ||
| 745 | { /* sentinel */ } | ||
| 746 | }; | ||
| 747 | MODULE_DEVICE_TABLE(of, mtk_cqdma_match); | ||
| 748 | |||
| 749 | static int mtk_cqdma_probe(struct platform_device *pdev) | ||
| 750 | { | ||
| 751 | struct mtk_cqdma_device *cqdma; | ||
| 752 | struct mtk_cqdma_vchan *vc; | ||
| 753 | struct dma_device *dd; | ||
| 754 | struct resource *res; | ||
| 755 | int err; | ||
| 756 | u32 i; | ||
| 757 | |||
| 758 | cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL); | ||
| 759 | if (!cqdma) | ||
| 760 | return -ENOMEM; | ||
| 761 | |||
| 762 | dd = &cqdma->ddev; | ||
| 763 | |||
| 764 | cqdma->clk = devm_clk_get(&pdev->dev, "cqdma"); | ||
| 765 | if (IS_ERR(cqdma->clk)) { | ||
| 766 | dev_err(&pdev->dev, "No clock for %s\n", | ||
| 767 | dev_name(&pdev->dev)); | ||
| 768 | return PTR_ERR(cqdma->clk); | ||
| 769 | } | ||
| 770 | |||
| 771 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | ||
| 772 | |||
| 773 | dd->copy_align = MTK_CQDMA_ALIGN_SIZE; | ||
| 774 | dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources; | ||
| 775 | dd->device_free_chan_resources = mtk_cqdma_free_chan_resources; | ||
| 776 | dd->device_tx_status = mtk_cqdma_tx_status; | ||
| 777 | dd->device_issue_pending = mtk_cqdma_issue_pending; | ||
| 778 | dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy; | ||
| 779 | dd->device_terminate_all = mtk_cqdma_terminate_all; | ||
| 780 | dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; | ||
| 781 | dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; | ||
| 782 | dd->directions = BIT(DMA_MEM_TO_MEM); | ||
| 783 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
| 784 | dd->dev = &pdev->dev; | ||
| 785 | INIT_LIST_HEAD(&dd->channels); | ||
| 786 | |||
| 787 | if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, | ||
| 788 | "dma-requests", | ||
| 789 | &cqdma->dma_requests)) { | ||
| 790 | dev_info(&pdev->dev, | ||
| 791 | "Using %u as missing dma-requests property\n", | ||
| 792 | MTK_CQDMA_NR_VCHANS); | ||
| 793 | |||
| 794 | cqdma->dma_requests = MTK_CQDMA_NR_VCHANS; | ||
| 795 | } | ||
| 796 | |||
| 797 | if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, | ||
| 798 | "dma-channels", | ||
| 799 | &cqdma->dma_channels)) { | ||
| 800 | dev_info(&pdev->dev, | ||
| 801 | "Using %u as missing dma-channels property\n", | ||
| 802 | MTK_CQDMA_NR_PCHANS); | ||
| 803 | |||
| 804 | cqdma->dma_channels = MTK_CQDMA_NR_PCHANS; | ||
| 805 | } | ||
| 806 | |||
| 807 | cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, | ||
| 808 | sizeof(*cqdma->pc), GFP_KERNEL); | ||
| 809 | if (!cqdma->pc) | ||
| 810 | return -ENOMEM; | ||
| 811 | |||
| 812 | /* initialization for PCs */ | ||
| 813 | for (i = 0; i < cqdma->dma_channels; ++i) { | ||
| 814 | cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, | ||
| 815 | sizeof(**cqdma->pc), GFP_KERNEL); | ||
| 816 | if (!cqdma->pc[i]) | ||
| 817 | return -ENOMEM; | ||
| 818 | |||
| 819 | INIT_LIST_HEAD(&cqdma->pc[i]->queue); | ||
| 820 | spin_lock_init(&cqdma->pc[i]->lock); | ||
| 821 | refcount_set(&cqdma->pc[i]->refcnt, 0); | ||
| 822 | |||
| 823 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); | ||
| 824 | if (!res) { | ||
| 825 | dev_err(&pdev->dev, "No mem resource for %s\n", | ||
| 826 | dev_name(&pdev->dev)); | ||
| 827 | return -EINVAL; | ||
| 828 | } | ||
| 829 | |||
| 830 | cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 831 | if (IS_ERR(cqdma->pc[i]->base)) | ||
| 832 | return PTR_ERR(cqdma->pc[i]->base); | ||
| 833 | |||
| 834 | /* allocate IRQ resource */ | ||
| 835 | res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | ||
| 836 | if (!res) { | ||
| 837 | dev_err(&pdev->dev, "No irq resource for %s\n", | ||
| 838 | dev_name(&pdev->dev)); | ||
| 839 | return -EINVAL; | ||
| 840 | } | ||
| 841 | cqdma->pc[i]->irq = res->start; | ||
| 842 | |||
| 843 | err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, | ||
| 844 | mtk_cqdma_irq, 0, dev_name(&pdev->dev), | ||
| 845 | cqdma); | ||
| 846 | if (err) { | ||
| 847 | dev_err(&pdev->dev, | ||
| 848 | "request_irq failed with err %d\n", err); | ||
| 849 | return -EINVAL; | ||
| 850 | } | ||
| 851 | } | ||
| 852 | |||
| 853 | /* allocate resource for VCs */ | ||
| 854 | cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests, | ||
| 855 | sizeof(*cqdma->vc), GFP_KERNEL); | ||
| 856 | if (!cqdma->vc) | ||
| 857 | return -ENOMEM; | ||
| 858 | |||
| 859 | for (i = 0; i < cqdma->dma_requests; i++) { | ||
| 860 | vc = &cqdma->vc[i]; | ||
| 861 | vc->vc.desc_free = mtk_cqdma_vdesc_free; | ||
| 862 | vchan_init(&vc->vc, dd); | ||
| 863 | init_completion(&vc->issue_completion); | ||
| 864 | } | ||
| 865 | |||
| 866 | err = dma_async_device_register(dd); | ||
| 867 | if (err) | ||
| 868 | return err; | ||
| 869 | |||
| 870 | err = of_dma_controller_register(pdev->dev.of_node, | ||
| 871 | of_dma_xlate_by_chan_id, cqdma); | ||
| 872 | if (err) { | ||
| 873 | dev_err(&pdev->dev, | ||
| 874 | "MediaTek CQDMA OF registration failed %d\n", err); | ||
| 875 | goto err_unregister; | ||
| 876 | } | ||
| 877 | |||
| 878 | err = mtk_cqdma_hw_init(cqdma); | ||
| 879 | if (err) { | ||
| 880 | dev_err(&pdev->dev, | ||
| 881 | "MediaTek CQDMA HW initialization failed %d\n", err); | ||
| 882 | goto err_unregister; | ||
| 883 | } | ||
| 884 | |||
| 885 | platform_set_drvdata(pdev, cqdma); | ||
| 886 | |||
| 887 | /* initialize tasklet for each PC */ | ||
| 888 | for (i = 0; i < cqdma->dma_channels; ++i) | ||
| 889 | tasklet_init(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb, | ||
| 890 | (unsigned long)cqdma->pc[i]); | ||
| 891 | |||
| 892 | dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n"); | ||
| 893 | |||
| 894 | return 0; | ||
| 895 | |||
| 896 | err_unregister: | ||
| 897 | dma_async_device_unregister(dd); | ||
| 898 | |||
| 899 | return err; | ||
| 900 | } | ||
| 901 | |||
| 902 | static int mtk_cqdma_remove(struct platform_device *pdev) | ||
| 903 | { | ||
| 904 | struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev); | ||
| 905 | struct mtk_cqdma_vchan *vc; | ||
| 906 | unsigned long flags; | ||
| 907 | int i; | ||
| 908 | |||
| 909 | /* kill VC task */ | ||
| 910 | for (i = 0; i < cqdma->dma_requests; i++) { | ||
| 911 | vc = &cqdma->vc[i]; | ||
| 912 | |||
| 913 | list_del(&vc->vc.chan.device_node); | ||
| 914 | tasklet_kill(&vc->vc.task); | ||
| 915 | } | ||
| 916 | |||
| 917 | /* disable interrupt */ | ||
| 918 | for (i = 0; i < cqdma->dma_channels; i++) { | ||
| 919 | spin_lock_irqsave(&cqdma->pc[i]->lock, flags); | ||
| 920 | mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN, | ||
| 921 | MTK_CQDMA_INT_EN_BIT); | ||
| 922 | spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); | ||
| 923 | |||
| 924 | /* Waits for any pending IRQ handlers to complete */ | ||
| 925 | synchronize_irq(cqdma->pc[i]->irq); | ||
| 926 | |||
| 927 | tasklet_kill(&cqdma->pc[i]->tasklet); | ||
| 928 | } | ||
| 929 | |||
| 930 | /* disable hardware */ | ||
| 931 | mtk_cqdma_hw_deinit(cqdma); | ||
| 932 | |||
| 933 | dma_async_device_unregister(&cqdma->ddev); | ||
| 934 | of_dma_controller_free(pdev->dev.of_node); | ||
| 935 | |||
| 936 | return 0; | ||
| 937 | } | ||
| 938 | |||
| 939 | static struct platform_driver mtk_cqdma_driver = { | ||
| 940 | .probe = mtk_cqdma_probe, | ||
| 941 | .remove = mtk_cqdma_remove, | ||
| 942 | .driver = { | ||
| 943 | .name = KBUILD_MODNAME, | ||
| 944 | .of_match_table = mtk_cqdma_match, | ||
| 945 | }, | ||
| 946 | }; | ||
| 947 | module_platform_driver(mtk_cqdma_driver); | ||
| 948 | |||
| 949 | MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver"); | ||
| 950 | MODULE_AUTHOR("Shun-Chih Yu <shun-chih.yu@mediatek.com>"); | ||
| 951 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index adfd316db1a8..6a91e28d537d 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c | |||
| @@ -676,7 +676,7 @@ static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) | |||
| 676 | } | 676 | } |
| 677 | 677 | ||
| 678 | /* DEBUGFS CODE */ | 678 | /* DEBUGFS CODE */ |
| 679 | static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) | 679 | static int mic_dma_reg_show(struct seq_file *s, void *pos) |
| 680 | { | 680 | { |
| 681 | struct mic_dma_device *mic_dma_dev = s->private; | 681 | struct mic_dma_device *mic_dma_dev = s->private; |
| 682 | int i, chan_num, first_chan = mic_dma_dev->start_ch; | 682 | int i, chan_num, first_chan = mic_dma_dev->start_ch; |
| @@ -707,23 +707,7 @@ static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) | |||
| 707 | return 0; | 707 | return 0; |
| 708 | } | 708 | } |
| 709 | 709 | ||
| 710 | static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) | 710 | DEFINE_SHOW_ATTRIBUTE(mic_dma_reg); |
| 711 | { | ||
| 712 | return single_open(file, mic_dma_reg_seq_show, inode->i_private); | ||
| 713 | } | ||
| 714 | |||
| 715 | static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) | ||
| 716 | { | ||
| 717 | return single_release(inode, file); | ||
| 718 | } | ||
| 719 | |||
| 720 | static const struct file_operations mic_dma_reg_ops = { | ||
| 721 | .owner = THIS_MODULE, | ||
| 722 | .open = mic_dma_reg_debug_open, | ||
| 723 | .read = seq_read, | ||
| 724 | .llseek = seq_lseek, | ||
| 725 | .release = mic_dma_reg_debug_release | ||
| 726 | }; | ||
| 727 | 711 | ||
| 728 | /* Debugfs parent dir */ | 712 | /* Debugfs parent dir */ |
| 729 | static struct dentry *mic_dma_dbg; | 713 | static struct dentry *mic_dma_dbg; |
| @@ -747,7 +731,7 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev) | |||
| 747 | if (mic_dma_dev->dbg_dir) | 731 | if (mic_dma_dev->dbg_dir) |
| 748 | debugfs_create_file("mic_dma_reg", 0444, | 732 | debugfs_create_file("mic_dma_reg", 0444, |
| 749 | mic_dma_dev->dbg_dir, mic_dma_dev, | 733 | mic_dma_dev->dbg_dir, mic_dma_dev, |
| 750 | &mic_dma_reg_ops); | 734 | &mic_dma_reg_fops); |
| 751 | } | 735 | } |
| 752 | return 0; | 736 | return 0; |
| 753 | } | 737 | } |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index eb3a1f42ab06..334bab92d26d 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
| @@ -96,6 +96,7 @@ struct mmp_pdma_chan { | |||
| 96 | struct dma_async_tx_descriptor desc; | 96 | struct dma_async_tx_descriptor desc; |
| 97 | struct mmp_pdma_phy *phy; | 97 | struct mmp_pdma_phy *phy; |
| 98 | enum dma_transfer_direction dir; | 98 | enum dma_transfer_direction dir; |
| 99 | struct dma_slave_config slave_config; | ||
| 99 | 100 | ||
| 100 | struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel | 101 | struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel |
| 101 | * is in cyclic mode */ | 102 | * is in cyclic mode */ |
| @@ -140,6 +141,10 @@ struct mmp_pdma_device { | |||
| 140 | #define to_mmp_pdma_dev(dmadev) \ | 141 | #define to_mmp_pdma_dev(dmadev) \ |
| 141 | container_of(dmadev, struct mmp_pdma_device, device) | 142 | container_of(dmadev, struct mmp_pdma_device, device) |
| 142 | 143 | ||
| 144 | static int mmp_pdma_config_write(struct dma_chan *dchan, | ||
| 145 | struct dma_slave_config *cfg, | ||
| 146 | enum dma_transfer_direction direction); | ||
| 147 | |||
| 143 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | 148 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) |
| 144 | { | 149 | { |
| 145 | u32 reg = (phy->idx << 4) + DDADR; | 150 | u32 reg = (phy->idx << 4) + DDADR; |
| @@ -537,6 +542,8 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
| 537 | 542 | ||
| 538 | chan->byte_align = false; | 543 | chan->byte_align = false; |
| 539 | 544 | ||
| 545 | mmp_pdma_config_write(dchan, &chan->slave_config, dir); | ||
| 546 | |||
| 540 | for_each_sg(sgl, sg, sg_len, i) { | 547 | for_each_sg(sgl, sg, sg_len, i) { |
| 541 | addr = sg_dma_address(sg); | 548 | addr = sg_dma_address(sg); |
| 542 | avail = sg_dma_len(sgl); | 549 | avail = sg_dma_len(sgl); |
| @@ -619,6 +626,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, | |||
| 619 | return NULL; | 626 | return NULL; |
| 620 | 627 | ||
| 621 | chan = to_mmp_pdma_chan(dchan); | 628 | chan = to_mmp_pdma_chan(dchan); |
| 629 | mmp_pdma_config_write(dchan, &chan->slave_config, direction); | ||
| 622 | 630 | ||
| 623 | switch (direction) { | 631 | switch (direction) { |
| 624 | case DMA_MEM_TO_DEV: | 632 | case DMA_MEM_TO_DEV: |
| @@ -684,8 +692,9 @@ fail: | |||
| 684 | return NULL; | 692 | return NULL; |
| 685 | } | 693 | } |
| 686 | 694 | ||
| 687 | static int mmp_pdma_config(struct dma_chan *dchan, | 695 | static int mmp_pdma_config_write(struct dma_chan *dchan, |
| 688 | struct dma_slave_config *cfg) | 696 | struct dma_slave_config *cfg, |
| 697 | enum dma_transfer_direction direction) | ||
| 689 | { | 698 | { |
| 690 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 699 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
| 691 | u32 maxburst = 0, addr = 0; | 700 | u32 maxburst = 0, addr = 0; |
| @@ -694,12 +703,12 @@ static int mmp_pdma_config(struct dma_chan *dchan, | |||
| 694 | if (!dchan) | 703 | if (!dchan) |
| 695 | return -EINVAL; | 704 | return -EINVAL; |
| 696 | 705 | ||
| 697 | if (cfg->direction == DMA_DEV_TO_MEM) { | 706 | if (direction == DMA_DEV_TO_MEM) { |
| 698 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; | 707 | chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; |
| 699 | maxburst = cfg->src_maxburst; | 708 | maxburst = cfg->src_maxburst; |
| 700 | width = cfg->src_addr_width; | 709 | width = cfg->src_addr_width; |
| 701 | addr = cfg->src_addr; | 710 | addr = cfg->src_addr; |
| 702 | } else if (cfg->direction == DMA_MEM_TO_DEV) { | 711 | } else if (direction == DMA_MEM_TO_DEV) { |
| 703 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; | 712 | chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; |
| 704 | maxburst = cfg->dst_maxburst; | 713 | maxburst = cfg->dst_maxburst; |
| 705 | width = cfg->dst_addr_width; | 714 | width = cfg->dst_addr_width; |
| @@ -720,7 +729,7 @@ static int mmp_pdma_config(struct dma_chan *dchan, | |||
| 720 | else if (maxburst == 32) | 729 | else if (maxburst == 32) |
| 721 | chan->dcmd |= DCMD_BURST32; | 730 | chan->dcmd |= DCMD_BURST32; |
| 722 | 731 | ||
| 723 | chan->dir = cfg->direction; | 732 | chan->dir = direction; |
| 724 | chan->dev_addr = addr; | 733 | chan->dev_addr = addr; |
| 725 | /* FIXME: drivers should be ported over to use the filter | 734 | /* FIXME: drivers should be ported over to use the filter |
| 726 | * function. Once that's done, the following two lines can | 735 | * function. Once that's done, the following two lines can |
| @@ -732,6 +741,15 @@ static int mmp_pdma_config(struct dma_chan *dchan, | |||
| 732 | return 0; | 741 | return 0; |
| 733 | } | 742 | } |
| 734 | 743 | ||
| 744 | static int mmp_pdma_config(struct dma_chan *dchan, | ||
| 745 | struct dma_slave_config *cfg) | ||
| 746 | { | ||
| 747 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | ||
| 748 | |||
| 749 | memcpy(&chan->slave_config, cfg, sizeof(*cfg)); | ||
| 750 | return 0; | ||
| 751 | } | ||
| 752 | |||
| 735 | static int mmp_pdma_terminate_all(struct dma_chan *dchan) | 753 | static int mmp_pdma_terminate_all(struct dma_chan *dchan) |
| 736 | { | 754 | { |
| 737 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 755 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 88750a34e859..cff1b143fff5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -448,6 +448,7 @@ struct dma_pl330_chan { | |||
| 448 | /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ | 448 | /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ |
| 449 | dma_addr_t fifo_dma; | 449 | dma_addr_t fifo_dma; |
| 450 | enum dma_data_direction dir; | 450 | enum dma_data_direction dir; |
| 451 | struct dma_slave_config slave_config; | ||
| 451 | 452 | ||
| 452 | /* for cyclic capability */ | 453 | /* for cyclic capability */ |
| 453 | bool cyclic; | 454 | bool cyclic; |
| @@ -542,6 +543,10 @@ struct _xfer_spec { | |||
| 542 | struct dma_pl330_desc *desc; | 543 | struct dma_pl330_desc *desc; |
| 543 | }; | 544 | }; |
| 544 | 545 | ||
| 546 | static int pl330_config_write(struct dma_chan *chan, | ||
| 547 | struct dma_slave_config *slave_config, | ||
| 548 | enum dma_transfer_direction direction); | ||
| 549 | |||
| 545 | static inline bool _queue_full(struct pl330_thread *thrd) | 550 | static inline bool _queue_full(struct pl330_thread *thrd) |
| 546 | { | 551 | { |
| 547 | return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; | 552 | return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; |
| @@ -2220,20 +2225,21 @@ static int fixup_burst_len(int max_burst_len, int quirks) | |||
| 2220 | return max_burst_len; | 2225 | return max_burst_len; |
| 2221 | } | 2226 | } |
| 2222 | 2227 | ||
| 2223 | static int pl330_config(struct dma_chan *chan, | 2228 | static int pl330_config_write(struct dma_chan *chan, |
| 2224 | struct dma_slave_config *slave_config) | 2229 | struct dma_slave_config *slave_config, |
| 2230 | enum dma_transfer_direction direction) | ||
| 2225 | { | 2231 | { |
| 2226 | struct dma_pl330_chan *pch = to_pchan(chan); | 2232 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 2227 | 2233 | ||
| 2228 | pl330_unprep_slave_fifo(pch); | 2234 | pl330_unprep_slave_fifo(pch); |
| 2229 | if (slave_config->direction == DMA_MEM_TO_DEV) { | 2235 | if (direction == DMA_MEM_TO_DEV) { |
| 2230 | if (slave_config->dst_addr) | 2236 | if (slave_config->dst_addr) |
| 2231 | pch->fifo_addr = slave_config->dst_addr; | 2237 | pch->fifo_addr = slave_config->dst_addr; |
| 2232 | if (slave_config->dst_addr_width) | 2238 | if (slave_config->dst_addr_width) |
| 2233 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | 2239 | pch->burst_sz = __ffs(slave_config->dst_addr_width); |
| 2234 | pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, | 2240 | pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, |
| 2235 | pch->dmac->quirks); | 2241 | pch->dmac->quirks); |
| 2236 | } else if (slave_config->direction == DMA_DEV_TO_MEM) { | 2242 | } else if (direction == DMA_DEV_TO_MEM) { |
| 2237 | if (slave_config->src_addr) | 2243 | if (slave_config->src_addr) |
| 2238 | pch->fifo_addr = slave_config->src_addr; | 2244 | pch->fifo_addr = slave_config->src_addr; |
| 2239 | if (slave_config->src_addr_width) | 2245 | if (slave_config->src_addr_width) |
| @@ -2245,6 +2251,16 @@ static int pl330_config(struct dma_chan *chan, | |||
| 2245 | return 0; | 2251 | return 0; |
| 2246 | } | 2252 | } |
| 2247 | 2253 | ||
| 2254 | static int pl330_config(struct dma_chan *chan, | ||
| 2255 | struct dma_slave_config *slave_config) | ||
| 2256 | { | ||
| 2257 | struct dma_pl330_chan *pch = to_pchan(chan); | ||
| 2258 | |||
| 2259 | memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); | ||
| 2260 | |||
| 2261 | return 0; | ||
| 2262 | } | ||
| 2263 | |||
| 2248 | static int pl330_terminate_all(struct dma_chan *chan) | 2264 | static int pl330_terminate_all(struct dma_chan *chan) |
| 2249 | { | 2265 | { |
| 2250 | struct dma_pl330_chan *pch = to_pchan(chan); | 2266 | struct dma_pl330_chan *pch = to_pchan(chan); |
| @@ -2661,6 +2677,8 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
| 2661 | return NULL; | 2677 | return NULL; |
| 2662 | } | 2678 | } |
| 2663 | 2679 | ||
| 2680 | pl330_config_write(chan, &pch->slave_config, direction); | ||
| 2681 | |||
| 2664 | if (!pl330_prep_slave_fifo(pch, direction)) | 2682 | if (!pl330_prep_slave_fifo(pch, direction)) |
| 2665 | return NULL; | 2683 | return NULL; |
| 2666 | 2684 | ||
| @@ -2815,6 +2833,8 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 2815 | if (unlikely(!pch || !sgl || !sg_len)) | 2833 | if (unlikely(!pch || !sgl || !sg_len)) |
| 2816 | return NULL; | 2834 | return NULL; |
| 2817 | 2835 | ||
| 2836 | pl330_config_write(chan, &pch->slave_config, direction); | ||
| 2837 | |||
| 2818 | if (!pl330_prep_slave_fifo(pch, direction)) | 2838 | if (!pl330_prep_slave_fifo(pch, direction)) |
| 2819 | return NULL; | 2839 | return NULL; |
| 2820 | 2840 | ||
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index c7a328f81485..b429642f3e7a 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c | |||
| @@ -189,7 +189,7 @@ static bool pxad_filter_fn(struct dma_chan *chan, void *param); | |||
| 189 | #include <linux/uaccess.h> | 189 | #include <linux/uaccess.h> |
| 190 | #include <linux/seq_file.h> | 190 | #include <linux/seq_file.h> |
| 191 | 191 | ||
| 192 | static int dbg_show_requester_chan(struct seq_file *s, void *p) | 192 | static int requester_chan_show(struct seq_file *s, void *p) |
| 193 | { | 193 | { |
| 194 | struct pxad_phy *phy = s->private; | 194 | struct pxad_phy *phy = s->private; |
| 195 | int i; | 195 | int i; |
| @@ -220,7 +220,7 @@ static int is_phys_valid(unsigned long addr) | |||
| 220 | #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") | 220 | #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") |
| 221 | #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") | 221 | #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") |
| 222 | 222 | ||
| 223 | static int dbg_show_descriptors(struct seq_file *s, void *p) | 223 | static int descriptors_show(struct seq_file *s, void *p) |
| 224 | { | 224 | { |
| 225 | struct pxad_phy *phy = s->private; | 225 | struct pxad_phy *phy = s->private; |
| 226 | int i, max_show = 20, burst, width; | 226 | int i, max_show = 20, burst, width; |
| @@ -263,7 +263,7 @@ static int dbg_show_descriptors(struct seq_file *s, void *p) | |||
| 263 | return 0; | 263 | return 0; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static int dbg_show_chan_state(struct seq_file *s, void *p) | 266 | static int chan_state_show(struct seq_file *s, void *p) |
| 267 | { | 267 | { |
| 268 | struct pxad_phy *phy = s->private; | 268 | struct pxad_phy *phy = s->private; |
| 269 | u32 dcsr, dcmd; | 269 | u32 dcsr, dcmd; |
| @@ -306,7 +306,7 @@ static int dbg_show_chan_state(struct seq_file *s, void *p) | |||
| 306 | return 0; | 306 | return 0; |
| 307 | } | 307 | } |
| 308 | 308 | ||
| 309 | static int dbg_show_state(struct seq_file *s, void *p) | 309 | static int state_show(struct seq_file *s, void *p) |
| 310 | { | 310 | { |
| 311 | struct pxad_device *pdev = s->private; | 311 | struct pxad_device *pdev = s->private; |
| 312 | 312 | ||
| @@ -317,22 +317,10 @@ static int dbg_show_state(struct seq_file *s, void *p) | |||
| 317 | return 0; | 317 | return 0; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | #define DBGFS_FUNC_DECL(name) \ | 320 | DEFINE_SHOW_ATTRIBUTE(state); |
| 321 | static int dbg_open_##name(struct inode *inode, struct file *file) \ | 321 | DEFINE_SHOW_ATTRIBUTE(chan_state); |
| 322 | { \ | 322 | DEFINE_SHOW_ATTRIBUTE(descriptors); |
| 323 | return single_open(file, dbg_show_##name, inode->i_private); \ | 323 | DEFINE_SHOW_ATTRIBUTE(requester_chan); |
| 324 | } \ | ||
| 325 | static const struct file_operations dbg_fops_##name = { \ | ||
| 326 | .open = dbg_open_##name, \ | ||
| 327 | .llseek = seq_lseek, \ | ||
| 328 | .read = seq_read, \ | ||
| 329 | .release = single_release, \ | ||
| 330 | } | ||
| 331 | |||
| 332 | DBGFS_FUNC_DECL(state); | ||
| 333 | DBGFS_FUNC_DECL(chan_state); | ||
| 334 | DBGFS_FUNC_DECL(descriptors); | ||
| 335 | DBGFS_FUNC_DECL(requester_chan); | ||
| 336 | 324 | ||
| 337 | static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, | 325 | static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, |
| 338 | int ch, struct dentry *chandir) | 326 | int ch, struct dentry *chandir) |
| @@ -348,13 +336,13 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, | |||
| 348 | 336 | ||
| 349 | if (chan) | 337 | if (chan) |
| 350 | chan_state = debugfs_create_file("state", 0400, chan, dt, | 338 | chan_state = debugfs_create_file("state", 0400, chan, dt, |
| 351 | &dbg_fops_chan_state); | 339 | &chan_state_fops); |
| 352 | if (chan_state) | 340 | if (chan_state) |
| 353 | chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, | 341 | chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, |
| 354 | &dbg_fops_descriptors); | 342 | &descriptors_fops); |
| 355 | if (chan_descr) | 343 | if (chan_descr) |
| 356 | chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, | 344 | chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, |
| 357 | &dbg_fops_requester_chan); | 345 | &requester_chan_fops); |
| 358 | if (!chan_reqs) | 346 | if (!chan_reqs) |
| 359 | goto err_state; | 347 | goto err_state; |
| 360 | 348 | ||
| @@ -375,7 +363,7 @@ static void pxad_init_debugfs(struct pxad_device *pdev) | |||
| 375 | goto err_root; | 363 | goto err_root; |
| 376 | 364 | ||
| 377 | pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, | 365 | pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, |
| 378 | pdev, &dbg_fops_state); | 366 | pdev, &state_fops); |
| 379 | if (!pdev->dbgfs_state) | 367 | if (!pdev->dbgfs_state) |
| 380 | goto err_state; | 368 | goto err_state; |
| 381 | 369 | ||
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index 3bdcb8056a36..9523faf7acdc 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c | |||
| @@ -85,11 +85,11 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl) | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | /* | 87 | /* |
| 88 | * hidma_chan_stats: display HIDMA channel statistics | 88 | * hidma_chan_show: display HIDMA channel statistics |
| 89 | * | 89 | * |
| 90 | * Display the statistics for the current HIDMA virtual channel device. | 90 | * Display the statistics for the current HIDMA virtual channel device. |
| 91 | */ | 91 | */ |
| 92 | static int hidma_chan_stats(struct seq_file *s, void *unused) | 92 | static int hidma_chan_show(struct seq_file *s, void *unused) |
| 93 | { | 93 | { |
| 94 | struct hidma_chan *mchan = s->private; | 94 | struct hidma_chan *mchan = s->private; |
| 95 | struct hidma_desc *mdesc; | 95 | struct hidma_desc *mdesc; |
| @@ -117,11 +117,11 @@ static int hidma_chan_stats(struct seq_file *s, void *unused) | |||
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | /* | 119 | /* |
| 120 | * hidma_dma_info: display HIDMA device info | 120 | * hidma_dma_show: display HIDMA device info |
| 121 | * | 121 | * |
| 122 | * Display the info for the current HIDMA device. | 122 | * Display the info for the current HIDMA device. |
| 123 | */ | 123 | */ |
| 124 | static int hidma_dma_info(struct seq_file *s, void *unused) | 124 | static int hidma_dma_show(struct seq_file *s, void *unused) |
| 125 | { | 125 | { |
| 126 | struct hidma_dev *dmadev = s->private; | 126 | struct hidma_dev *dmadev = s->private; |
| 127 | resource_size_t sz; | 127 | resource_size_t sz; |
| @@ -138,29 +138,8 @@ static int hidma_dma_info(struct seq_file *s, void *unused) | |||
| 138 | return 0; | 138 | return 0; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static int hidma_chan_stats_open(struct inode *inode, struct file *file) | 141 | DEFINE_SHOW_ATTRIBUTE(hidma_chan); |
| 142 | { | 142 | DEFINE_SHOW_ATTRIBUTE(hidma_dma); |
| 143 | return single_open(file, hidma_chan_stats, inode->i_private); | ||
| 144 | } | ||
| 145 | |||
| 146 | static int hidma_dma_info_open(struct inode *inode, struct file *file) | ||
| 147 | { | ||
| 148 | return single_open(file, hidma_dma_info, inode->i_private); | ||
| 149 | } | ||
| 150 | |||
| 151 | static const struct file_operations hidma_chan_fops = { | ||
| 152 | .open = hidma_chan_stats_open, | ||
| 153 | .read = seq_read, | ||
| 154 | .llseek = seq_lseek, | ||
| 155 | .release = single_release, | ||
| 156 | }; | ||
| 157 | |||
| 158 | static const struct file_operations hidma_dma_fops = { | ||
| 159 | .open = hidma_dma_info_open, | ||
| 160 | .read = seq_read, | ||
| 161 | .llseek = seq_lseek, | ||
| 162 | .release = single_release, | ||
| 163 | }; | ||
| 164 | 143 | ||
| 165 | void hidma_debug_uninit(struct hidma_dev *dmadev) | 144 | void hidma_debug_uninit(struct hidma_dev *dmadev) |
| 166 | { | 145 | { |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index b31d07c7d93c..784d5f1a473b 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/sa11x0-dma.h> | ||
| 21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 22 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
| 23 | 22 | ||
| @@ -830,6 +829,14 @@ static const struct dma_slave_map sa11x0_dma_map[] = { | |||
| 830 | { "sa11x0-ssp", "rx", "Ser4SSPRc" }, | 829 | { "sa11x0-ssp", "rx", "Ser4SSPRc" }, |
| 831 | }; | 830 | }; |
| 832 | 831 | ||
| 832 | static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) | ||
| 833 | { | ||
| 834 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
| 835 | const char *p = param; | ||
| 836 | |||
| 837 | return !strcmp(c->name, p); | ||
| 838 | } | ||
| 839 | |||
| 833 | static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, | 840 | static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, |
| 834 | struct device *dev) | 841 | struct device *dev) |
| 835 | { | 842 | { |
| @@ -1087,18 +1094,6 @@ static struct platform_driver sa11x0_dma_driver = { | |||
| 1087 | .remove = sa11x0_dma_remove, | 1094 | .remove = sa11x0_dma_remove, |
| 1088 | }; | 1095 | }; |
| 1089 | 1096 | ||
| 1090 | bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) | ||
| 1091 | { | ||
| 1092 | if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { | ||
| 1093 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | ||
| 1094 | const char *p = param; | ||
| 1095 | |||
| 1096 | return !strcmp(c->name, p); | ||
| 1097 | } | ||
| 1098 | return false; | ||
| 1099 | } | ||
| 1100 | EXPORT_SYMBOL(sa11x0_dma_filter_fn); | ||
| 1101 | |||
| 1102 | static int __init sa11x0_dma_init(void) | 1097 | static int __init sa11x0_dma_init(void) |
| 1103 | { | 1098 | { |
| 1104 | return platform_driver_register(&sa11x0_dma_driver); | 1099 | return platform_driver_register(&sa11x0_dma_driver); |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 6e0685f1a838..4d6b02b3b1f1 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 1 | # | 2 | # |
| 2 | # DMA engine configuration for sh | 3 | # DMA engine configuration for sh |
| 3 | # | 4 | # |
| @@ -12,7 +13,7 @@ config RENESAS_DMA | |||
| 12 | 13 | ||
| 13 | config SH_DMAE_BASE | 14 | config SH_DMAE_BASE |
| 14 | bool "Renesas SuperH DMA Engine support" | 15 | bool "Renesas SuperH DMA Engine support" |
| 15 | depends on SUPERH || ARCH_RENESAS || COMPILE_TEST | 16 | depends on SUPERH || COMPILE_TEST |
| 16 | depends on !SUPERH || SH_DMA | 17 | depends on !SUPERH || SH_DMA |
| 17 | depends on !SH_DMA_API | 18 | depends on !SH_DMA_API |
| 18 | default y | 19 | default y |
| @@ -30,15 +31,6 @@ config SH_DMAE | |||
| 30 | help | 31 | help |
| 31 | Enable support for the Renesas SuperH DMA controllers. | 32 | Enable support for the Renesas SuperH DMA controllers. |
| 32 | 33 | ||
| 33 | if SH_DMAE | ||
| 34 | |||
| 35 | config SH_DMAE_R8A73A4 | ||
| 36 | def_bool y | ||
| 37 | depends on ARCH_R8A73A4 | ||
| 38 | depends on OF | ||
| 39 | |||
| 40 | endif | ||
| 41 | |||
| 42 | config RCAR_DMAC | 34 | config RCAR_DMAC |
| 43 | tristate "Renesas R-Car Gen2 DMA Controller" | 35 | tristate "Renesas R-Car Gen2 DMA Controller" |
| 44 | depends on ARCH_RENESAS || COMPILE_TEST | 36 | depends on ARCH_RENESAS || COMPILE_TEST |
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 7d7c9491ade1..42110dd57a56 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
| @@ -10,7 +10,6 @@ obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o | |||
| 10 | # | 10 | # |
| 11 | 11 | ||
| 12 | shdma-y := shdmac.o | 12 | shdma-y := shdmac.o |
| 13 | shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o | ||
| 14 | shdma-objs := $(shdma-y) | 13 | shdma-objs := $(shdma-y) |
| 15 | obj-$(CONFIG_SH_DMAE) += shdma.o | 14 | obj-$(CONFIG_SH_DMAE) += shdma.o |
| 16 | 15 | ||
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c deleted file mode 100644 index ddc9a3578353..000000000000 --- a/drivers/dma/sh/shdma-r8a73a4.c +++ /dev/null | |||
| @@ -1,74 +0,0 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Renesas Electronics, Inc. | ||
| 6 | */ | ||
| 7 | #include <linux/sh_dma.h> | ||
| 8 | |||
| 9 | #include "shdma-arm.h" | ||
| 10 | |||
| 11 | static const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT; | ||
| 12 | |||
| 13 | static const struct sh_dmae_slave_config dma_slaves[] = { | ||
| 14 | { | ||
| 15 | .chcr = CHCR_TX(XMIT_SZ_32BIT), | ||
| 16 | .mid_rid = 0xd1, /* MMC0 Tx */ | ||
| 17 | }, { | ||
| 18 | .chcr = CHCR_RX(XMIT_SZ_32BIT), | ||
| 19 | .mid_rid = 0xd2, /* MMC0 Rx */ | ||
| 20 | }, { | ||
| 21 | .chcr = CHCR_TX(XMIT_SZ_32BIT), | ||
| 22 | .mid_rid = 0xe1, /* MMC1 Tx */ | ||
| 23 | }, { | ||
| 24 | .chcr = CHCR_RX(XMIT_SZ_32BIT), | ||
| 25 | .mid_rid = 0xe2, /* MMC1 Rx */ | ||
| 26 | }, | ||
| 27 | }; | ||
| 28 | |||
| 29 | #define DMAE_CHANNEL(a, b) \ | ||
| 30 | { \ | ||
| 31 | .offset = (a) - 0x20, \ | ||
| 32 | .dmars = (a) - 0x20 + 0x40, \ | ||
| 33 | .chclr_bit = (b), \ | ||
| 34 | .chclr_offset = 0x80 - 0x20, \ | ||
| 35 | } | ||
| 36 | |||
| 37 | static const struct sh_dmae_channel dma_channels[] = { | ||
| 38 | DMAE_CHANNEL(0x8000, 0), | ||
| 39 | DMAE_CHANNEL(0x8080, 1), | ||
| 40 | DMAE_CHANNEL(0x8100, 2), | ||
| 41 | DMAE_CHANNEL(0x8180, 3), | ||
| 42 | DMAE_CHANNEL(0x8200, 4), | ||
| 43 | DMAE_CHANNEL(0x8280, 5), | ||
| 44 | DMAE_CHANNEL(0x8300, 6), | ||
| 45 | DMAE_CHANNEL(0x8380, 7), | ||
| 46 | DMAE_CHANNEL(0x8400, 8), | ||
| 47 | DMAE_CHANNEL(0x8480, 9), | ||
| 48 | DMAE_CHANNEL(0x8500, 10), | ||
| 49 | DMAE_CHANNEL(0x8580, 11), | ||
| 50 | DMAE_CHANNEL(0x8600, 12), | ||
| 51 | DMAE_CHANNEL(0x8680, 13), | ||
| 52 | DMAE_CHANNEL(0x8700, 14), | ||
| 53 | DMAE_CHANNEL(0x8780, 15), | ||
| 54 | DMAE_CHANNEL(0x8800, 16), | ||
| 55 | DMAE_CHANNEL(0x8880, 17), | ||
| 56 | DMAE_CHANNEL(0x8900, 18), | ||
| 57 | DMAE_CHANNEL(0x8980, 19), | ||
| 58 | }; | ||
| 59 | |||
| 60 | const struct sh_dmae_pdata r8a73a4_dma_pdata = { | ||
| 61 | .slave = dma_slaves, | ||
| 62 | .slave_num = ARRAY_SIZE(dma_slaves), | ||
| 63 | .channel = dma_channels, | ||
| 64 | .channel_num = ARRAY_SIZE(dma_channels), | ||
| 65 | .ts_low_shift = TS_LOW_SHIFT, | ||
| 66 | .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, | ||
| 67 | .ts_high_shift = TS_HI_SHIFT, | ||
| 68 | .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, | ||
| 69 | .ts_shift = dma_ts_shift, | ||
| 70 | .ts_shift_num = ARRAY_SIZE(dma_ts_shift), | ||
| 71 | .dmaor_init = DMAOR_DME, | ||
| 72 | .chclr_present = 1, | ||
| 73 | .chclr_bitwise = 1, | ||
| 74 | }; | ||
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index bfb69909bd19..9c121a4b33ad 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h | |||
| @@ -58,11 +58,4 @@ struct sh_dmae_desc { | |||
| 58 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ | 58 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ |
| 59 | struct sh_dmae_device, shdma_dev.dma_dev) | 59 | struct sh_dmae_device, shdma_dev.dma_dev) |
| 60 | 60 | ||
| 61 | #ifdef CONFIG_SH_DMAE_R8A73A4 | ||
| 62 | extern const struct sh_dmae_pdata r8a73a4_dma_pdata; | ||
| 63 | #define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) | ||
| 64 | #else | ||
| 65 | #define r8a73a4_shdma_devid NULL | ||
| 66 | #endif | ||
| 67 | |||
| 68 | #endif /* __DMA_SHDMA_H */ | 61 | #endif /* __DMA_SHDMA_H */ |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 7971ea275387..5aafe548ca5f 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
| @@ -665,12 +665,6 @@ static const struct shdma_ops sh_dmae_shdma_ops = { | |||
| 665 | .get_partial = sh_dmae_get_partial, | 665 | .get_partial = sh_dmae_get_partial, |
| 666 | }; | 666 | }; |
| 667 | 667 | ||
| 668 | static const struct of_device_id sh_dmae_of_match[] = { | ||
| 669 | {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, | ||
| 670 | {} | ||
| 671 | }; | ||
| 672 | MODULE_DEVICE_TABLE(of, sh_dmae_of_match); | ||
| 673 | |||
| 674 | static int sh_dmae_probe(struct platform_device *pdev) | 668 | static int sh_dmae_probe(struct platform_device *pdev) |
| 675 | { | 669 | { |
| 676 | const enum dma_slave_buswidth widths = | 670 | const enum dma_slave_buswidth widths = |
| @@ -915,7 +909,6 @@ static struct platform_driver sh_dmae_driver = { | |||
| 915 | .driver = { | 909 | .driver = { |
| 916 | .pm = &sh_dmae_pm, | 910 | .pm = &sh_dmae_pm, |
| 917 | .name = SH_DMAE_DRV_NAME, | 911 | .name = SH_DMAE_DRV_NAME, |
| 918 | .of_match_table = sh_dmae_of_match, | ||
| 919 | }, | 912 | }, |
| 920 | .remove = sh_dmae_remove, | 913 | .remove = sh_dmae_remove, |
| 921 | }; | 914 | }; |
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 38d4e4f07c66..e2f016700fcc 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c | |||
| @@ -36,6 +36,8 @@ | |||
| 36 | #define SPRD_DMA_GLB_CHN_EN_STS 0x1c | 36 | #define SPRD_DMA_GLB_CHN_EN_STS 0x1c |
| 37 | #define SPRD_DMA_GLB_DEBUG_STS 0x20 | 37 | #define SPRD_DMA_GLB_DEBUG_STS 0x20 |
| 38 | #define SPRD_DMA_GLB_ARB_SEL_STS 0x24 | 38 | #define SPRD_DMA_GLB_ARB_SEL_STS 0x24 |
| 39 | #define SPRD_DMA_GLB_2STAGE_GRP1 0x28 | ||
| 40 | #define SPRD_DMA_GLB_2STAGE_GRP2 0x2c | ||
| 39 | #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1)) | 41 | #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1)) |
| 40 | #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000 | 42 | #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000 |
| 41 | 43 | ||
| @@ -57,6 +59,18 @@ | |||
| 57 | #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38 | 59 | #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38 |
| 58 | #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c | 60 | #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c |
| 59 | 61 | ||
| 62 | /* SPRD_DMA_GLB_2STAGE_GRP register definition */ | ||
| 63 | #define SPRD_DMA_GLB_2STAGE_EN BIT(24) | ||
| 64 | #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20) | ||
| 65 | #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19) | ||
| 66 | #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18) | ||
| 67 | #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17) | ||
| 68 | #define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16) | ||
| 69 | #define SPRD_DMA_GLB_TRG_OFFSET 16 | ||
| 70 | #define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8) | ||
| 71 | #define SPRD_DMA_GLB_DEST_CHN_OFFSET 8 | ||
| 72 | #define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0) | ||
| 73 | |||
| 60 | /* SPRD_DMA_CHN_INTC register definition */ | 74 | /* SPRD_DMA_CHN_INTC register definition */ |
| 61 | #define SPRD_DMA_INT_MASK GENMASK(4, 0) | 75 | #define SPRD_DMA_INT_MASK GENMASK(4, 0) |
| 62 | #define SPRD_DMA_INT_CLR_OFFSET 24 | 76 | #define SPRD_DMA_INT_CLR_OFFSET 24 |
| @@ -118,6 +132,10 @@ | |||
| 118 | #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 | 132 | #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 |
| 119 | #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) | 133 | #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) |
| 120 | 134 | ||
| 135 | /* define DMA channel mode & trigger mode mask */ | ||
| 136 | #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) | ||
| 137 | #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) | ||
| 138 | |||
| 121 | /* define the DMA transfer step type */ | 139 | /* define the DMA transfer step type */ |
| 122 | #define SPRD_DMA_NONE_STEP 0 | 140 | #define SPRD_DMA_NONE_STEP 0 |
| 123 | #define SPRD_DMA_BYTE_STEP 1 | 141 | #define SPRD_DMA_BYTE_STEP 1 |
| @@ -159,6 +177,7 @@ struct sprd_dma_chn_hw { | |||
| 159 | struct sprd_dma_desc { | 177 | struct sprd_dma_desc { |
| 160 | struct virt_dma_desc vd; | 178 | struct virt_dma_desc vd; |
| 161 | struct sprd_dma_chn_hw chn_hw; | 179 | struct sprd_dma_chn_hw chn_hw; |
| 180 | enum dma_transfer_direction dir; | ||
| 162 | }; | 181 | }; |
| 163 | 182 | ||
| 164 | /* dma channel description */ | 183 | /* dma channel description */ |
| @@ -169,6 +188,8 @@ struct sprd_dma_chn { | |||
| 169 | struct dma_slave_config slave_cfg; | 188 | struct dma_slave_config slave_cfg; |
| 170 | u32 chn_num; | 189 | u32 chn_num; |
| 171 | u32 dev_id; | 190 | u32 dev_id; |
| 191 | enum sprd_dma_chn_mode chn_mode; | ||
| 192 | enum sprd_dma_trg_mode trg_mode; | ||
| 172 | struct sprd_dma_desc *cur_desc; | 193 | struct sprd_dma_desc *cur_desc; |
| 173 | }; | 194 | }; |
| 174 | 195 | ||
| @@ -205,6 +226,16 @@ static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd) | |||
| 205 | return container_of(vd, struct sprd_dma_desc, vd); | 226 | return container_of(vd, struct sprd_dma_desc, vd); |
| 206 | } | 227 | } |
| 207 | 228 | ||
| 229 | static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg, | ||
| 230 | u32 mask, u32 val) | ||
| 231 | { | ||
| 232 | u32 orig = readl(sdev->glb_base + reg); | ||
| 233 | u32 tmp; | ||
| 234 | |||
| 235 | tmp = (orig & ~mask) | val; | ||
| 236 | writel(tmp, sdev->glb_base + reg); | ||
| 237 | } | ||
| 238 | |||
| 208 | static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg, | 239 | static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg, |
| 209 | u32 mask, u32 val) | 240 | u32 mask, u32 val) |
| 210 | { | 241 | { |
| @@ -331,6 +362,17 @@ static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan) | |||
| 331 | sprd_dma_disable_chn(schan); | 362 | sprd_dma_disable_chn(schan); |
| 332 | } | 363 | } |
| 333 | 364 | ||
| 365 | static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan) | ||
| 366 | { | ||
| 367 | unsigned long addr, addr_high; | ||
| 368 | |||
| 369 | addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR); | ||
| 370 | addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) & | ||
| 371 | SPRD_DMA_HIGH_ADDR_MASK; | ||
| 372 | |||
| 373 | return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET); | ||
| 374 | } | ||
| 375 | |||
| 334 | static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan) | 376 | static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan) |
| 335 | { | 377 | { |
| 336 | unsigned long addr, addr_high; | 378 | unsigned long addr, addr_high; |
| @@ -377,6 +419,49 @@ static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan) | |||
| 377 | return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK; | 419 | return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK; |
| 378 | } | 420 | } |
| 379 | 421 | ||
| 422 | static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) | ||
| 423 | { | ||
| 424 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | ||
| 425 | u32 val, chn = schan->chn_num + 1; | ||
| 426 | |||
| 427 | switch (schan->chn_mode) { | ||
| 428 | case SPRD_DMA_SRC_CHN0: | ||
| 429 | val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; | ||
| 430 | val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; | ||
| 431 | val |= SPRD_DMA_GLB_2STAGE_EN; | ||
| 432 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); | ||
| 433 | break; | ||
| 434 | |||
| 435 | case SPRD_DMA_SRC_CHN1: | ||
| 436 | val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; | ||
| 437 | val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; | ||
| 438 | val |= SPRD_DMA_GLB_2STAGE_EN; | ||
| 439 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); | ||
| 440 | break; | ||
| 441 | |||
| 442 | case SPRD_DMA_DST_CHN0: | ||
| 443 | val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & | ||
| 444 | SPRD_DMA_GLB_DEST_CHN_MASK; | ||
| 445 | val |= SPRD_DMA_GLB_2STAGE_EN; | ||
| 446 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); | ||
| 447 | break; | ||
| 448 | |||
| 449 | case SPRD_DMA_DST_CHN1: | ||
| 450 | val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & | ||
| 451 | SPRD_DMA_GLB_DEST_CHN_MASK; | ||
| 452 | val |= SPRD_DMA_GLB_2STAGE_EN; | ||
| 453 | sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); | ||
| 454 | break; | ||
| 455 | |||
| 456 | default: | ||
| 457 | dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n", | ||
| 458 | schan->chn_mode); | ||
| 459 | return -EINVAL; | ||
| 460 | } | ||
| 461 | |||
| 462 | return 0; | ||
| 463 | } | ||
| 464 | |||
| 380 | static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, | 465 | static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, |
| 381 | struct sprd_dma_desc *sdesc) | 466 | struct sprd_dma_desc *sdesc) |
| 382 | { | 467 | { |
| @@ -411,6 +496,13 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) | |||
| 411 | schan->cur_desc = to_sprd_dma_desc(vd); | 496 | schan->cur_desc = to_sprd_dma_desc(vd); |
| 412 | 497 | ||
| 413 | /* | 498 | /* |
| 499 | * Set 2-stage configuration if the channel starts one 2-stage | ||
| 500 | * transfer. | ||
| 501 | */ | ||
| 502 | if (schan->chn_mode && sprd_dma_set_2stage_config(schan)) | ||
| 503 | return; | ||
| 504 | |||
| 505 | /* | ||
| 414 | * Copy the DMA configuration from DMA descriptor to this hardware | 506 | * Copy the DMA configuration from DMA descriptor to this hardware |
| 415 | * channel. | 507 | * channel. |
| 416 | */ | 508 | */ |
| @@ -427,6 +519,7 @@ static void sprd_dma_stop(struct sprd_dma_chn *schan) | |||
| 427 | sprd_dma_stop_and_disable(schan); | 519 | sprd_dma_stop_and_disable(schan); |
| 428 | sprd_dma_unset_uid(schan); | 520 | sprd_dma_unset_uid(schan); |
| 429 | sprd_dma_clear_int(schan); | 521 | sprd_dma_clear_int(schan); |
| 522 | schan->cur_desc = NULL; | ||
| 430 | } | 523 | } |
| 431 | 524 | ||
| 432 | static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc, | 525 | static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc, |
| @@ -450,7 +543,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) | |||
| 450 | struct sprd_dma_desc *sdesc; | 543 | struct sprd_dma_desc *sdesc; |
| 451 | enum sprd_dma_req_mode req_type; | 544 | enum sprd_dma_req_mode req_type; |
| 452 | enum sprd_dma_int_type int_type; | 545 | enum sprd_dma_int_type int_type; |
| 453 | bool trans_done = false; | 546 | bool trans_done = false, cyclic = false; |
| 454 | u32 i; | 547 | u32 i; |
| 455 | 548 | ||
| 456 | while (irq_status) { | 549 | while (irq_status) { |
| @@ -465,13 +558,19 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) | |||
| 465 | 558 | ||
| 466 | sdesc = schan->cur_desc; | 559 | sdesc = schan->cur_desc; |
| 467 | 560 | ||
| 468 | /* Check if the dma request descriptor is done. */ | 561 | /* cyclic mode schedule callback */ |
| 469 | trans_done = sprd_dma_check_trans_done(sdesc, int_type, | 562 | cyclic = schan->linklist.phy_addr ? true : false; |
| 470 | req_type); | 563 | if (cyclic == true) { |
| 471 | if (trans_done == true) { | 564 | vchan_cyclic_callback(&sdesc->vd); |
| 472 | vchan_cookie_complete(&sdesc->vd); | 565 | } else { |
| 473 | schan->cur_desc = NULL; | 566 | /* Check if the dma request descriptor is done. */ |
| 474 | sprd_dma_start(schan); | 567 | trans_done = sprd_dma_check_trans_done(sdesc, int_type, |
| 568 | req_type); | ||
| 569 | if (trans_done == true) { | ||
| 570 | vchan_cookie_complete(&sdesc->vd); | ||
| 571 | schan->cur_desc = NULL; | ||
| 572 | sprd_dma_start(schan); | ||
| 573 | } | ||
| 475 | } | 574 | } |
| 476 | spin_unlock(&schan->vc.lock); | 575 | spin_unlock(&schan->vc.lock); |
| 477 | } | 576 | } |
| @@ -534,7 +633,12 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, | |||
| 534 | else | 633 | else |
| 535 | pos = 0; | 634 | pos = 0; |
| 536 | } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { | 635 | } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { |
| 537 | pos = sprd_dma_get_dst_addr(schan); | 636 | struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); |
| 637 | |||
| 638 | if (sdesc->dir == DMA_DEV_TO_MEM) | ||
| 639 | pos = sprd_dma_get_dst_addr(schan); | ||
| 640 | else | ||
| 641 | pos = sprd_dma_get_src_addr(schan); | ||
| 538 | } else { | 642 | } else { |
| 539 | pos = 0; | 643 | pos = 0; |
| 540 | } | 644 | } |
| @@ -593,6 +697,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, | |||
| 593 | { | 697 | { |
| 594 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); | 698 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); |
| 595 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | 699 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); |
| 700 | enum sprd_dma_chn_mode chn_mode = schan->chn_mode; | ||
| 596 | u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK; | 701 | u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK; |
| 597 | u32 int_mode = flags & SPRD_DMA_INT_MASK; | 702 | u32 int_mode = flags & SPRD_DMA_INT_MASK; |
| 598 | int src_datawidth, dst_datawidth, src_step, dst_step; | 703 | int src_datawidth, dst_datawidth, src_step, dst_step; |
| @@ -604,7 +709,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, | |||
| 604 | dev_err(sdev->dma_dev.dev, "invalid source step\n"); | 709 | dev_err(sdev->dma_dev.dev, "invalid source step\n"); |
| 605 | return src_step; | 710 | return src_step; |
| 606 | } | 711 | } |
| 607 | dst_step = SPRD_DMA_NONE_STEP; | 712 | |
| 713 | /* | ||
| 714 | * For 2-stage transfer, destination channel step can not be 0, | ||
| 715 | * since destination device is AON IRAM. | ||
| 716 | */ | ||
| 717 | if (chn_mode == SPRD_DMA_DST_CHN0 || | ||
| 718 | chn_mode == SPRD_DMA_DST_CHN1) | ||
| 719 | dst_step = src_step; | ||
| 720 | else | ||
| 721 | dst_step = SPRD_DMA_NONE_STEP; | ||
| 608 | } else { | 722 | } else { |
| 609 | dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width); | 723 | dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width); |
| 610 | if (dst_step < 0) { | 724 | if (dst_step < 0) { |
| @@ -674,13 +788,11 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, | |||
| 674 | 788 | ||
| 675 | /* link-list configuration */ | 789 | /* link-list configuration */ |
| 676 | if (schan->linklist.phy_addr) { | 790 | if (schan->linklist.phy_addr) { |
| 677 | if (sg_index == sglen - 1) | ||
| 678 | hw->frg_len |= SPRD_DMA_LLIST_END; | ||
| 679 | |||
| 680 | hw->cfg |= SPRD_DMA_LINKLIST_EN; | 791 | hw->cfg |= SPRD_DMA_LINKLIST_EN; |
| 681 | 792 | ||
| 682 | /* link-list index */ | 793 | /* link-list index */ |
| 683 | temp = (sg_index + 1) % sglen; | 794 | temp = sglen ? (sg_index + 1) % sglen : 0; |
| 795 | |||
| 684 | /* Next link-list configuration's physical address offset */ | 796 | /* Next link-list configuration's physical address offset */ |
| 685 | temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR; | 797 | temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR; |
| 686 | /* | 798 | /* |
| @@ -804,6 +916,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 804 | if (!sdesc) | 916 | if (!sdesc) |
| 805 | return NULL; | 917 | return NULL; |
| 806 | 918 | ||
| 919 | sdesc->dir = dir; | ||
| 920 | |||
| 807 | for_each_sg(sgl, sg, sglen, i) { | 921 | for_each_sg(sgl, sg, sglen, i) { |
| 808 | len = sg_dma_len(sg); | 922 | len = sg_dma_len(sg); |
| 809 | 923 | ||
| @@ -831,6 +945,12 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 831 | } | 945 | } |
| 832 | } | 946 | } |
| 833 | 947 | ||
| 948 | /* Set channel mode and trigger mode for 2-stage transfer */ | ||
| 949 | schan->chn_mode = | ||
| 950 | (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK; | ||
| 951 | schan->trg_mode = | ||
| 952 | (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK; | ||
| 953 | |||
| 834 | ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, | 954 | ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, |
| 835 | dir, flags, slave_cfg); | 955 | dir, flags, slave_cfg); |
| 836 | if (ret) { | 956 | if (ret) { |
| @@ -847,9 +967,6 @@ static int sprd_dma_slave_config(struct dma_chan *chan, | |||
| 847 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | 967 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); |
| 848 | struct dma_slave_config *slave_cfg = &schan->slave_cfg; | 968 | struct dma_slave_config *slave_cfg = &schan->slave_cfg; |
| 849 | 969 | ||
| 850 | if (!is_slave_direction(config->direction)) | ||
| 851 | return -EINVAL; | ||
| 852 | |||
| 853 | memcpy(slave_cfg, config, sizeof(*config)); | 970 | memcpy(slave_cfg, config, sizeof(*config)); |
| 854 | return 0; | 971 | return 0; |
| 855 | } | 972 | } |
| @@ -1109,4 +1226,5 @@ module_platform_driver(sprd_dma_driver); | |||
| 1109 | MODULE_LICENSE("GPL v2"); | 1226 | MODULE_LICENSE("GPL v2"); |
| 1110 | MODULE_DESCRIPTION("DMA driver for Spreadtrum"); | 1227 | MODULE_DESCRIPTION("DMA driver for Spreadtrum"); |
| 1111 | MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>"); | 1228 | MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>"); |
| 1229 | MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>"); | ||
| 1112 | MODULE_ALIAS("platform:sprd-dma"); | 1230 | MODULE_ALIAS("platform:sprd-dma"); |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5e328bd10c27..907ae97a3ef4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -442,6 +442,7 @@ struct d40_base; | |||
| 442 | * @queue: Queued jobs. | 442 | * @queue: Queued jobs. |
| 443 | * @prepare_queue: Prepared jobs. | 443 | * @prepare_queue: Prepared jobs. |
| 444 | * @dma_cfg: The client configuration of this dma channel. | 444 | * @dma_cfg: The client configuration of this dma channel. |
| 445 | * @slave_config: DMA slave configuration. | ||
| 445 | * @configured: whether the dma_cfg configuration is valid | 446 | * @configured: whether the dma_cfg configuration is valid |
| 446 | * @base: Pointer to the device instance struct. | 447 | * @base: Pointer to the device instance struct. |
| 447 | * @src_def_cfg: Default cfg register setting for src. | 448 | * @src_def_cfg: Default cfg register setting for src. |
| @@ -468,6 +469,7 @@ struct d40_chan { | |||
| 468 | struct list_head queue; | 469 | struct list_head queue; |
| 469 | struct list_head prepare_queue; | 470 | struct list_head prepare_queue; |
| 470 | struct stedma40_chan_cfg dma_cfg; | 471 | struct stedma40_chan_cfg dma_cfg; |
| 472 | struct dma_slave_config slave_config; | ||
| 471 | bool configured; | 473 | bool configured; |
| 472 | struct d40_base *base; | 474 | struct d40_base *base; |
| 473 | /* Default register configurations */ | 475 | /* Default register configurations */ |
| @@ -625,6 +627,10 @@ static void __iomem *chan_base(struct d40_chan *chan) | |||
| 625 | #define chan_err(d40c, format, arg...) \ | 627 | #define chan_err(d40c, format, arg...) \ |
| 626 | d40_err(chan2dev(d40c), format, ## arg) | 628 | d40_err(chan2dev(d40c), format, ## arg) |
| 627 | 629 | ||
| 630 | static int d40_set_runtime_config_write(struct dma_chan *chan, | ||
| 631 | struct dma_slave_config *config, | ||
| 632 | enum dma_transfer_direction direction); | ||
| 633 | |||
| 628 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, | 634 | static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, |
| 629 | int lli_len) | 635 | int lli_len) |
| 630 | { | 636 | { |
| @@ -2216,6 +2222,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
| 2216 | return NULL; | 2222 | return NULL; |
| 2217 | } | 2223 | } |
| 2218 | 2224 | ||
| 2225 | d40_set_runtime_config_write(dchan, &chan->slave_config, direction); | ||
| 2226 | |||
| 2219 | spin_lock_irqsave(&chan->lock, flags); | 2227 | spin_lock_irqsave(&chan->lock, flags); |
| 2220 | 2228 | ||
| 2221 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); | 2229 | desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); |
| @@ -2634,11 +2642,22 @@ dma40_config_to_halfchannel(struct d40_chan *d40c, | |||
| 2634 | return 0; | 2642 | return 0; |
| 2635 | } | 2643 | } |
| 2636 | 2644 | ||
| 2637 | /* Runtime reconfiguration extension */ | ||
| 2638 | static int d40_set_runtime_config(struct dma_chan *chan, | 2645 | static int d40_set_runtime_config(struct dma_chan *chan, |
| 2639 | struct dma_slave_config *config) | 2646 | struct dma_slave_config *config) |
| 2640 | { | 2647 | { |
| 2641 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2648 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
| 2649 | |||
| 2650 | memcpy(&d40c->slave_config, config, sizeof(*config)); | ||
| 2651 | |||
| 2652 | return 0; | ||
| 2653 | } | ||
| 2654 | |||
| 2655 | /* Runtime reconfiguration extension */ | ||
| 2656 | static int d40_set_runtime_config_write(struct dma_chan *chan, | ||
| 2657 | struct dma_slave_config *config, | ||
| 2658 | enum dma_transfer_direction direction) | ||
| 2659 | { | ||
| 2660 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | ||
| 2642 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; | 2661 | struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; |
| 2643 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | 2662 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
| 2644 | dma_addr_t config_addr; | 2663 | dma_addr_t config_addr; |
| @@ -2655,7 +2674,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
| 2655 | dst_addr_width = config->dst_addr_width; | 2674 | dst_addr_width = config->dst_addr_width; |
| 2656 | dst_maxburst = config->dst_maxburst; | 2675 | dst_maxburst = config->dst_maxburst; |
| 2657 | 2676 | ||
| 2658 | if (config->direction == DMA_DEV_TO_MEM) { | 2677 | if (direction == DMA_DEV_TO_MEM) { |
| 2659 | config_addr = config->src_addr; | 2678 | config_addr = config->src_addr; |
| 2660 | 2679 | ||
| 2661 | if (cfg->dir != DMA_DEV_TO_MEM) | 2680 | if (cfg->dir != DMA_DEV_TO_MEM) |
| @@ -2671,7 +2690,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
| 2671 | if (dst_maxburst == 0) | 2690 | if (dst_maxburst == 0) |
| 2672 | dst_maxburst = src_maxburst; | 2691 | dst_maxburst = src_maxburst; |
| 2673 | 2692 | ||
| 2674 | } else if (config->direction == DMA_MEM_TO_DEV) { | 2693 | } else if (direction == DMA_MEM_TO_DEV) { |
| 2675 | config_addr = config->dst_addr; | 2694 | config_addr = config->dst_addr; |
| 2676 | 2695 | ||
| 2677 | if (cfg->dir != DMA_MEM_TO_DEV) | 2696 | if (cfg->dir != DMA_MEM_TO_DEV) |
| @@ -2689,7 +2708,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
| 2689 | } else { | 2708 | } else { |
| 2690 | dev_err(d40c->base->dev, | 2709 | dev_err(d40c->base->dev, |
| 2691 | "unrecognized channel direction %d\n", | 2710 | "unrecognized channel direction %d\n", |
| 2692 | config->direction); | 2711 | direction); |
| 2693 | return -EINVAL; | 2712 | return -EINVAL; |
| 2694 | } | 2713 | } |
| 2695 | 2714 | ||
| @@ -2746,12 +2765,12 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
| 2746 | 2765 | ||
| 2747 | /* These settings will take precedence later */ | 2766 | /* These settings will take precedence later */ |
| 2748 | d40c->runtime_addr = config_addr; | 2767 | d40c->runtime_addr = config_addr; |
| 2749 | d40c->runtime_direction = config->direction; | 2768 | d40c->runtime_direction = direction; |
| 2750 | dev_dbg(d40c->base->dev, | 2769 | dev_dbg(d40c->base->dev, |
| 2751 | "configured channel %s for %s, data width %d/%d, " | 2770 | "configured channel %s for %s, data width %d/%d, " |
| 2752 | "maxburst %d/%d elements, LE, no flow control\n", | 2771 | "maxburst %d/%d elements, LE, no flow control\n", |
| 2753 | dma_chan_name(chan), | 2772 | dma_chan_name(chan), |
| 2754 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", | 2773 | (direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
| 2755 | src_addr_width, dst_addr_width, | 2774 | src_addr_width, dst_addr_width, |
| 2756 | src_maxburst, dst_maxburst); | 2775 | src_maxburst, dst_maxburst); |
| 2757 | 2776 | ||
diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c new file mode 100644 index 000000000000..ec65a7430dc4 --- /dev/null +++ b/drivers/dma/uniphier-mdmac.c | |||
| @@ -0,0 +1,506 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // | ||
| 3 | // Copyright (C) 2018 Socionext Inc. | ||
| 4 | // Author: Masahiro Yamada <yamada.masahiro@socionext.com> | ||
| 5 | |||
| 6 | #include <linux/bits.h> | ||
| 7 | #include <linux/clk.h> | ||
| 8 | #include <linux/dma-mapping.h> | ||
| 9 | #include <linux/dmaengine.h> | ||
| 10 | #include <linux/interrupt.h> | ||
| 11 | #include <linux/iopoll.h> | ||
| 12 | #include <linux/list.h> | ||
| 13 | #include <linux/module.h> | ||
| 14 | #include <linux/of.h> | ||
| 15 | #include <linux/of_dma.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/slab.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | |||
| 20 | #include "virt-dma.h" | ||
| 21 | |||
| 22 | /* registers common for all channels */ | ||
| 23 | #define UNIPHIER_MDMAC_CMD 0x000 /* issue DMA start/abort */ | ||
| 24 | #define UNIPHIER_MDMAC_CMD_ABORT BIT(31) /* 1: abort, 0: start */ | ||
| 25 | |||
| 26 | /* per-channel registers */ | ||
| 27 | #define UNIPHIER_MDMAC_CH_OFFSET 0x100 | ||
| 28 | #define UNIPHIER_MDMAC_CH_STRIDE 0x040 | ||
| 29 | |||
| 30 | #define UNIPHIER_MDMAC_CH_IRQ_STAT 0x010 /* current hw status (RO) */ | ||
| 31 | #define UNIPHIER_MDMAC_CH_IRQ_REQ 0x014 /* latched STAT (WOC) */ | ||
| 32 | #define UNIPHIER_MDMAC_CH_IRQ_EN 0x018 /* IRQ enable mask */ | ||
| 33 | #define UNIPHIER_MDMAC_CH_IRQ_DET 0x01c /* REQ & EN (RO) */ | ||
| 34 | #define UNIPHIER_MDMAC_CH_IRQ__ABORT BIT(13) | ||
| 35 | #define UNIPHIER_MDMAC_CH_IRQ__DONE BIT(1) | ||
| 36 | #define UNIPHIER_MDMAC_CH_SRC_MODE 0x020 /* mode of source */ | ||
| 37 | #define UNIPHIER_MDMAC_CH_DEST_MODE 0x024 /* mode of destination */ | ||
| 38 | #define UNIPHIER_MDMAC_CH_MODE__ADDR_INC (0 << 4) | ||
| 39 | #define UNIPHIER_MDMAC_CH_MODE__ADDR_DEC (1 << 4) | ||
| 40 | #define UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED (2 << 4) | ||
| 41 | #define UNIPHIER_MDMAC_CH_SRC_ADDR 0x028 /* source address */ | ||
| 42 | #define UNIPHIER_MDMAC_CH_DEST_ADDR 0x02c /* destination address */ | ||
| 43 | #define UNIPHIER_MDMAC_CH_SIZE 0x030 /* transfer bytes */ | ||
| 44 | |||
| 45 | #define UNIPHIER_MDMAC_SLAVE_BUSWIDTHS \ | ||
| 46 | (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
| 47 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
| 48 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
| 49 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
| 50 | |||
| 51 | struct uniphier_mdmac_desc { | ||
| 52 | struct virt_dma_desc vd; | ||
| 53 | struct scatterlist *sgl; | ||
| 54 | unsigned int sg_len; | ||
| 55 | unsigned int sg_cur; | ||
| 56 | enum dma_transfer_direction dir; | ||
| 57 | }; | ||
| 58 | |||
| 59 | struct uniphier_mdmac_chan { | ||
| 60 | struct virt_dma_chan vc; | ||
| 61 | struct uniphier_mdmac_device *mdev; | ||
| 62 | struct uniphier_mdmac_desc *md; | ||
| 63 | void __iomem *reg_ch_base; | ||
| 64 | unsigned int chan_id; | ||
| 65 | }; | ||
| 66 | |||
| 67 | struct uniphier_mdmac_device { | ||
| 68 | struct dma_device ddev; | ||
| 69 | struct clk *clk; | ||
| 70 | void __iomem *reg_base; | ||
| 71 | struct uniphier_mdmac_chan channels[0]; | ||
| 72 | }; | ||
| 73 | |||
| 74 | static struct uniphier_mdmac_chan * | ||
| 75 | to_uniphier_mdmac_chan(struct virt_dma_chan *vc) | ||
| 76 | { | ||
| 77 | return container_of(vc, struct uniphier_mdmac_chan, vc); | ||
| 78 | } | ||
| 79 | |||
| 80 | static struct uniphier_mdmac_desc * | ||
| 81 | to_uniphier_mdmac_desc(struct virt_dma_desc *vd) | ||
| 82 | { | ||
| 83 | return container_of(vd, struct uniphier_mdmac_desc, vd); | ||
| 84 | } | ||
| 85 | |||
| 86 | /* mc->vc.lock must be held by caller */ | ||
| 87 | static struct uniphier_mdmac_desc * | ||
| 88 | uniphier_mdmac_next_desc(struct uniphier_mdmac_chan *mc) | ||
| 89 | { | ||
| 90 | struct virt_dma_desc *vd; | ||
| 91 | |||
| 92 | vd = vchan_next_desc(&mc->vc); | ||
| 93 | if (!vd) { | ||
| 94 | mc->md = NULL; | ||
| 95 | return NULL; | ||
| 96 | } | ||
| 97 | |||
| 98 | list_del(&vd->node); | ||
| 99 | |||
| 100 | mc->md = to_uniphier_mdmac_desc(vd); | ||
| 101 | |||
| 102 | return mc->md; | ||
| 103 | } | ||
| 104 | |||
| 105 | /* mc->vc.lock must be held by caller */ | ||
| 106 | static void uniphier_mdmac_handle(struct uniphier_mdmac_chan *mc, | ||
| 107 | struct uniphier_mdmac_desc *md) | ||
| 108 | { | ||
| 109 | struct uniphier_mdmac_device *mdev = mc->mdev; | ||
| 110 | struct scatterlist *sg; | ||
| 111 | u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__DONE; | ||
| 112 | u32 src_mode, src_addr, dest_mode, dest_addr, chunk_size; | ||
| 113 | |||
| 114 | sg = &md->sgl[md->sg_cur]; | ||
| 115 | |||
| 116 | if (md->dir == DMA_MEM_TO_DEV) { | ||
| 117 | src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC; | ||
| 118 | src_addr = sg_dma_address(sg); | ||
| 119 | dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED; | ||
| 120 | dest_addr = 0; | ||
| 121 | } else { | ||
| 122 | src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED; | ||
| 123 | src_addr = 0; | ||
| 124 | dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC; | ||
| 125 | dest_addr = sg_dma_address(sg); | ||
| 126 | } | ||
| 127 | |||
| 128 | chunk_size = sg_dma_len(sg); | ||
| 129 | |||
| 130 | writel(src_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_MODE); | ||
| 131 | writel(dest_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_MODE); | ||
| 132 | writel(src_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_ADDR); | ||
| 133 | writel(dest_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_ADDR); | ||
| 134 | writel(chunk_size, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SIZE); | ||
| 135 | |||
| 136 | /* write 1 to clear */ | ||
| 137 | writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); | ||
| 138 | |||
| 139 | writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_EN); | ||
| 140 | |||
| 141 | writel(BIT(mc->chan_id), mdev->reg_base + UNIPHIER_MDMAC_CMD); | ||
| 142 | } | ||
| 143 | |||
| 144 | /* mc->vc.lock must be held by caller */ | ||
| 145 | static void uniphier_mdmac_start(struct uniphier_mdmac_chan *mc) | ||
| 146 | { | ||
| 147 | struct uniphier_mdmac_desc *md; | ||
| 148 | |||
| 149 | md = uniphier_mdmac_next_desc(mc); | ||
| 150 | if (md) | ||
| 151 | uniphier_mdmac_handle(mc, md); | ||
| 152 | } | ||
| 153 | |||
| 154 | /* mc->vc.lock must be held by caller */ | ||
| 155 | static int uniphier_mdmac_abort(struct uniphier_mdmac_chan *mc) | ||
| 156 | { | ||
| 157 | struct uniphier_mdmac_device *mdev = mc->mdev; | ||
| 158 | u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__ABORT; | ||
| 159 | u32 val; | ||
| 160 | |||
| 161 | /* write 1 to clear */ | ||
| 162 | writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); | ||
| 163 | |||
| 164 | writel(UNIPHIER_MDMAC_CMD_ABORT | BIT(mc->chan_id), | ||
| 165 | mdev->reg_base + UNIPHIER_MDMAC_CMD); | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Abort should be accepted soon. We poll the bit here instead of | ||
| 169 | * waiting for the interrupt. | ||
| 170 | */ | ||
| 171 | return readl_poll_timeout(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ, | ||
| 172 | val, val & irq_flag, 0, 20); | ||
| 173 | } | ||
| 174 | |||
| 175 | static irqreturn_t uniphier_mdmac_interrupt(int irq, void *dev_id) | ||
| 176 | { | ||
| 177 | struct uniphier_mdmac_chan *mc = dev_id; | ||
| 178 | struct uniphier_mdmac_desc *md; | ||
| 179 | irqreturn_t ret = IRQ_HANDLED; | ||
| 180 | u32 irq_stat; | ||
| 181 | |||
| 182 | spin_lock(&mc->vc.lock); | ||
| 183 | |||
| 184 | irq_stat = readl(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_DET); | ||
| 185 | |||
| 186 | /* | ||
| 187 | * Some channels share a single interrupt line. If the IRQ status is 0, | ||
| 188 | * this is probably triggered by a different channel. | ||
| 189 | */ | ||
| 190 | if (!irq_stat) { | ||
| 191 | ret = IRQ_NONE; | ||
| 192 | goto out; | ||
| 193 | } | ||
| 194 | |||
| 195 | /* write 1 to clear */ | ||
| 196 | writel(irq_stat, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); | ||
| 197 | |||
| 198 | /* | ||
| 199 | * UNIPHIER_MDMAC_CH_IRQ__DONE interrupt is asserted even when the DMA | ||
| 200 | * is aborted. To distinguish the normal completion and the abort, | ||
| 201 | * check mc->md. If it is NULL, we are aborting. | ||
| 202 | */ | ||
| 203 | md = mc->md; | ||
| 204 | if (!md) | ||
| 205 | goto out; | ||
| 206 | |||
| 207 | md->sg_cur++; | ||
| 208 | |||
| 209 | if (md->sg_cur >= md->sg_len) { | ||
| 210 | vchan_cookie_complete(&md->vd); | ||
| 211 | md = uniphier_mdmac_next_desc(mc); | ||
| 212 | if (!md) | ||
| 213 | goto out; | ||
| 214 | } | ||
| 215 | |||
| 216 | uniphier_mdmac_handle(mc, md); | ||
| 217 | |||
| 218 | out: | ||
| 219 | spin_unlock(&mc->vc.lock); | ||
| 220 | |||
| 221 | return ret; | ||
| 222 | } | ||
| 223 | |||
| 224 | static void uniphier_mdmac_free_chan_resources(struct dma_chan *chan) | ||
| 225 | { | ||
| 226 | vchan_free_chan_resources(to_virt_chan(chan)); | ||
| 227 | } | ||
| 228 | |||
| 229 | static struct dma_async_tx_descriptor * | ||
| 230 | uniphier_mdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
| 231 | unsigned int sg_len, | ||
| 232 | enum dma_transfer_direction direction, | ||
| 233 | unsigned long flags, void *context) | ||
| 234 | { | ||
| 235 | struct virt_dma_chan *vc = to_virt_chan(chan); | ||
| 236 | struct uniphier_mdmac_desc *md; | ||
| 237 | |||
| 238 | if (!is_slave_direction(direction)) | ||
| 239 | return NULL; | ||
| 240 | |||
| 241 | md = kzalloc(sizeof(*md), GFP_NOWAIT); | ||
| 242 | if (!md) | ||
| 243 | return NULL; | ||
| 244 | |||
| 245 | md->sgl = sgl; | ||
| 246 | md->sg_len = sg_len; | ||
| 247 | md->dir = direction; | ||
| 248 | |||
| 249 | return vchan_tx_prep(vc, &md->vd, flags); | ||
| 250 | } | ||
| 251 | |||
| 252 | static int uniphier_mdmac_terminate_all(struct dma_chan *chan) | ||
| 253 | { | ||
| 254 | struct virt_dma_chan *vc = to_virt_chan(chan); | ||
| 255 | struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc); | ||
| 256 | unsigned long flags; | ||
| 257 | int ret = 0; | ||
| 258 | LIST_HEAD(head); | ||
| 259 | |||
| 260 | spin_lock_irqsave(&vc->lock, flags); | ||
| 261 | |||
| 262 | if (mc->md) { | ||
| 263 | vchan_terminate_vdesc(&mc->md->vd); | ||
| 264 | mc->md = NULL; | ||
| 265 | ret = uniphier_mdmac_abort(mc); | ||
| 266 | } | ||
| 267 | vchan_get_all_descriptors(vc, &head); | ||
| 268 | |||
| 269 | spin_unlock_irqrestore(&vc->lock, flags); | ||
| 270 | |||
| 271 | vchan_dma_desc_free_list(vc, &head); | ||
| 272 | |||
| 273 | return ret; | ||
| 274 | } | ||
| 275 | |||
| 276 | static void uniphier_mdmac_synchronize(struct dma_chan *chan) | ||
| 277 | { | ||
| 278 | vchan_synchronize(to_virt_chan(chan)); | ||
| 279 | } | ||
| 280 | |||
| 281 | static enum dma_status uniphier_mdmac_tx_status(struct dma_chan *chan, | ||
| 282 | dma_cookie_t cookie, | ||
| 283 | struct dma_tx_state *txstate) | ||
| 284 | { | ||
| 285 | struct virt_dma_chan *vc; | ||
| 286 | struct virt_dma_desc *vd; | ||
| 287 | struct uniphier_mdmac_chan *mc; | ||
| 288 | struct uniphier_mdmac_desc *md = NULL; | ||
| 289 | enum dma_status stat; | ||
| 290 | unsigned long flags; | ||
| 291 | int i; | ||
| 292 | |||
| 293 | stat = dma_cookie_status(chan, cookie, txstate); | ||
| 294 | /* Return immediately if we do not need to compute the residue. */ | ||
| 295 | if (stat == DMA_COMPLETE || !txstate) | ||
| 296 | return stat; | ||
| 297 | |||
| 298 | vc = to_virt_chan(chan); | ||
| 299 | |||
| 300 | spin_lock_irqsave(&vc->lock, flags); | ||
| 301 | |||
| 302 | mc = to_uniphier_mdmac_chan(vc); | ||
| 303 | |||
| 304 | if (mc->md && mc->md->vd.tx.cookie == cookie) { | ||
| 305 | /* residue from the on-flight chunk */ | ||
| 306 | txstate->residue = readl(mc->reg_ch_base + | ||
| 307 | UNIPHIER_MDMAC_CH_SIZE); | ||
| 308 | md = mc->md; | ||
| 309 | } | ||
| 310 | |||
| 311 | if (!md) { | ||
| 312 | vd = vchan_find_desc(vc, cookie); | ||
| 313 | if (vd) | ||
| 314 | md = to_uniphier_mdmac_desc(vd); | ||
| 315 | } | ||
| 316 | |||
| 317 | if (md) { | ||
| 318 | /* residue from the queued chunks */ | ||
| 319 | for (i = md->sg_cur; i < md->sg_len; i++) | ||
| 320 | txstate->residue += sg_dma_len(&md->sgl[i]); | ||
| 321 | } | ||
| 322 | |||
| 323 | spin_unlock_irqrestore(&vc->lock, flags); | ||
| 324 | |||
| 325 | return stat; | ||
| 326 | } | ||
| 327 | |||
| 328 | static void uniphier_mdmac_issue_pending(struct dma_chan *chan) | ||
| 329 | { | ||
| 330 | struct virt_dma_chan *vc = to_virt_chan(chan); | ||
| 331 | struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc); | ||
| 332 | unsigned long flags; | ||
| 333 | |||
| 334 | spin_lock_irqsave(&vc->lock, flags); | ||
| 335 | |||
| 336 | if (vchan_issue_pending(vc) && !mc->md) | ||
| 337 | uniphier_mdmac_start(mc); | ||
| 338 | |||
| 339 | spin_unlock_irqrestore(&vc->lock, flags); | ||
| 340 | } | ||
| 341 | |||
| 342 | static void uniphier_mdmac_desc_free(struct virt_dma_desc *vd) | ||
| 343 | { | ||
| 344 | kfree(to_uniphier_mdmac_desc(vd)); | ||
| 345 | } | ||
| 346 | |||
| 347 | static int uniphier_mdmac_chan_init(struct platform_device *pdev, | ||
| 348 | struct uniphier_mdmac_device *mdev, | ||
| 349 | int chan_id) | ||
| 350 | { | ||
| 351 | struct device *dev = &pdev->dev; | ||
| 352 | struct uniphier_mdmac_chan *mc = &mdev->channels[chan_id]; | ||
| 353 | char *irq_name; | ||
| 354 | int irq, ret; | ||
| 355 | |||
| 356 | irq = platform_get_irq(pdev, chan_id); | ||
| 357 | if (irq < 0) { | ||
| 358 | dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n", | ||
| 359 | chan_id); | ||
| 360 | return irq; | ||
| 361 | } | ||
| 362 | |||
| 363 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d", | ||
| 364 | chan_id); | ||
| 365 | if (!irq_name) | ||
| 366 | return -ENOMEM; | ||
| 367 | |||
| 368 | ret = devm_request_irq(dev, irq, uniphier_mdmac_interrupt, | ||
| 369 | IRQF_SHARED, irq_name, mc); | ||
| 370 | if (ret) | ||
| 371 | return ret; | ||
| 372 | |||
| 373 | mc->mdev = mdev; | ||
| 374 | mc->reg_ch_base = mdev->reg_base + UNIPHIER_MDMAC_CH_OFFSET + | ||
| 375 | UNIPHIER_MDMAC_CH_STRIDE * chan_id; | ||
| 376 | mc->chan_id = chan_id; | ||
| 377 | mc->vc.desc_free = uniphier_mdmac_desc_free; | ||
| 378 | vchan_init(&mc->vc, &mdev->ddev); | ||
| 379 | |||
| 380 | return 0; | ||
| 381 | } | ||
| 382 | |||
| 383 | static int uniphier_mdmac_probe(struct platform_device *pdev) | ||
| 384 | { | ||
| 385 | struct device *dev = &pdev->dev; | ||
| 386 | struct uniphier_mdmac_device *mdev; | ||
| 387 | struct dma_device *ddev; | ||
| 388 | struct resource *res; | ||
| 389 | int nr_chans, ret, i; | ||
| 390 | |||
| 391 | nr_chans = platform_irq_count(pdev); | ||
| 392 | if (nr_chans < 0) | ||
| 393 | return nr_chans; | ||
| 394 | |||
| 395 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
| 396 | if (ret) | ||
| 397 | return ret; | ||
| 398 | |||
| 399 | mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), | ||
| 400 | GFP_KERNEL); | ||
| 401 | if (!mdev) | ||
| 402 | return -ENOMEM; | ||
| 403 | |||
| 404 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 405 | mdev->reg_base = devm_ioremap_resource(dev, res); | ||
| 406 | if (IS_ERR(mdev->reg_base)) | ||
| 407 | return PTR_ERR(mdev->reg_base); | ||
| 408 | |||
| 409 | mdev->clk = devm_clk_get(dev, NULL); | ||
| 410 | if (IS_ERR(mdev->clk)) { | ||
| 411 | dev_err(dev, "failed to get clock\n"); | ||
| 412 | return PTR_ERR(mdev->clk); | ||
| 413 | } | ||
| 414 | |||
| 415 | ret = clk_prepare_enable(mdev->clk); | ||
| 416 | if (ret) | ||
| 417 | return ret; | ||
| 418 | |||
| 419 | ddev = &mdev->ddev; | ||
| 420 | ddev->dev = dev; | ||
| 421 | dma_cap_set(DMA_PRIVATE, ddev->cap_mask); | ||
| 422 | ddev->src_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS; | ||
| 423 | ddev->dst_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS; | ||
| 424 | ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
| 425 | ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
| 426 | ddev->device_free_chan_resources = uniphier_mdmac_free_chan_resources; | ||
| 427 | ddev->device_prep_slave_sg = uniphier_mdmac_prep_slave_sg; | ||
| 428 | ddev->device_terminate_all = uniphier_mdmac_terminate_all; | ||
| 429 | ddev->device_synchronize = uniphier_mdmac_synchronize; | ||
| 430 | ddev->device_tx_status = uniphier_mdmac_tx_status; | ||
| 431 | ddev->device_issue_pending = uniphier_mdmac_issue_pending; | ||
| 432 | INIT_LIST_HEAD(&ddev->channels); | ||
| 433 | |||
| 434 | for (i = 0; i < nr_chans; i++) { | ||
| 435 | ret = uniphier_mdmac_chan_init(pdev, mdev, i); | ||
| 436 | if (ret) | ||
| 437 | goto disable_clk; | ||
| 438 | } | ||
| 439 | |||
| 440 | ret = dma_async_device_register(ddev); | ||
| 441 | if (ret) | ||
| 442 | goto disable_clk; | ||
| 443 | |||
| 444 | ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, | ||
| 445 | ddev); | ||
| 446 | if (ret) | ||
| 447 | goto unregister_dmac; | ||
| 448 | |||
| 449 | platform_set_drvdata(pdev, mdev); | ||
| 450 | |||
| 451 | return 0; | ||
| 452 | |||
| 453 | unregister_dmac: | ||
| 454 | dma_async_device_unregister(ddev); | ||
| 455 | disable_clk: | ||
| 456 | clk_disable_unprepare(mdev->clk); | ||
| 457 | |||
| 458 | return ret; | ||
| 459 | } | ||
| 460 | |||
| 461 | static int uniphier_mdmac_remove(struct platform_device *pdev) | ||
| 462 | { | ||
| 463 | struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev); | ||
| 464 | struct dma_chan *chan; | ||
| 465 | int ret; | ||
| 466 | |||
| 467 | /* | ||
| 468 | * Before reaching here, almost all descriptors have been freed by the | ||
| 469 | * ->device_free_chan_resources() hook. However, each channel might | ||
| 470 | * be still holding one descriptor that was on-flight at that moment. | ||
| 471 | * Terminate it to make sure this hardware is no longer running. Then, | ||
| 472 | * free the channel resources once again to avoid memory leak. | ||
| 473 | */ | ||
| 474 | list_for_each_entry(chan, &mdev->ddev.channels, device_node) { | ||
| 475 | ret = dmaengine_terminate_sync(chan); | ||
| 476 | if (ret) | ||
| 477 | return ret; | ||
| 478 | uniphier_mdmac_free_chan_resources(chan); | ||
| 479 | } | ||
| 480 | |||
| 481 | of_dma_controller_free(pdev->dev.of_node); | ||
| 482 | dma_async_device_unregister(&mdev->ddev); | ||
| 483 | clk_disable_unprepare(mdev->clk); | ||
| 484 | |||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | static const struct of_device_id uniphier_mdmac_match[] = { | ||
| 489 | { .compatible = "socionext,uniphier-mio-dmac" }, | ||
| 490 | { /* sentinel */ } | ||
| 491 | }; | ||
| 492 | MODULE_DEVICE_TABLE(of, uniphier_mdmac_match); | ||
| 493 | |||
| 494 | static struct platform_driver uniphier_mdmac_driver = { | ||
| 495 | .probe = uniphier_mdmac_probe, | ||
| 496 | .remove = uniphier_mdmac_remove, | ||
| 497 | .driver = { | ||
| 498 | .name = "uniphier-mio-dmac", | ||
| 499 | .of_match_table = uniphier_mdmac_match, | ||
| 500 | }, | ||
| 501 | }; | ||
| 502 | module_platform_driver(uniphier_mdmac_driver); | ||
| 503 | |||
| 504 | MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); | ||
| 505 | MODULE_DESCRIPTION("UniPhier MIO DMAC driver"); | ||
| 506 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index c12442312595..02880963092f 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
| @@ -190,6 +190,8 @@ | |||
| 190 | /* AXI CDMA Specific Masks */ | 190 | /* AXI CDMA Specific Masks */ |
| 191 | #define XILINX_CDMA_CR_SGMODE BIT(3) | 191 | #define XILINX_CDMA_CR_SGMODE BIT(3) |
| 192 | 192 | ||
| 193 | #define xilinx_prep_dma_addr_t(addr) \ | ||
| 194 | ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) | ||
| 193 | /** | 195 | /** |
| 194 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | 196 | * struct xilinx_vdma_desc_hw - Hardware Descriptor |
| 195 | * @next_desc: Next Descriptor Pointer @0x00 | 197 | * @next_desc: Next Descriptor Pointer @0x00 |
| @@ -887,6 +889,24 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
| 887 | chan->id); | 889 | chan->id); |
| 888 | return -ENOMEM; | 890 | return -ENOMEM; |
| 889 | } | 891 | } |
| 892 | /* | ||
| 893 | * For cyclic DMA mode we need to program the tail Descriptor | ||
| 894 | * register with a value which is not a part of the BD chain | ||
| 895 | * so allocating a desc segment during channel allocation for | ||
| 896 | * programming tail descriptor. | ||
| 897 | */ | ||
| 898 | chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, | ||
| 899 | sizeof(*chan->cyclic_seg_v), | ||
| 900 | &chan->cyclic_seg_p, GFP_KERNEL); | ||
| 901 | if (!chan->cyclic_seg_v) { | ||
| 902 | dev_err(chan->dev, | ||
| 903 | "unable to allocate desc segment for cyclic DMA\n"); | ||
| 904 | dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * | ||
| 905 | XILINX_DMA_NUM_DESCS, chan->seg_v, | ||
| 906 | chan->seg_p); | ||
| 907 | return -ENOMEM; | ||
| 908 | } | ||
| 909 | chan->cyclic_seg_v->phys = chan->cyclic_seg_p; | ||
| 890 | 910 | ||
| 891 | for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { | 911 | for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { |
| 892 | chan->seg_v[i].hw.next_desc = | 912 | chan->seg_v[i].hw.next_desc = |
| @@ -922,24 +942,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
| 922 | return -ENOMEM; | 942 | return -ENOMEM; |
| 923 | } | 943 | } |
| 924 | 944 | ||
| 925 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | ||
| 926 | /* | ||
| 927 | * For cyclic DMA mode we need to program the tail Descriptor | ||
| 928 | * register with a value which is not a part of the BD chain | ||
| 929 | * so allocating a desc segment during channel allocation for | ||
| 930 | * programming tail descriptor. | ||
| 931 | */ | ||
| 932 | chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, | ||
| 933 | sizeof(*chan->cyclic_seg_v), | ||
| 934 | &chan->cyclic_seg_p, GFP_KERNEL); | ||
| 935 | if (!chan->cyclic_seg_v) { | ||
| 936 | dev_err(chan->dev, | ||
| 937 | "unable to allocate desc segment for cyclic DMA\n"); | ||
| 938 | return -ENOMEM; | ||
| 939 | } | ||
| 940 | chan->cyclic_seg_v->phys = chan->cyclic_seg_p; | ||
| 941 | } | ||
| 942 | |||
| 943 | dma_cookie_init(dchan); | 945 | dma_cookie_init(dchan); |
| 944 | 946 | ||
| 945 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { | 947 | if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
| @@ -1245,8 +1247,10 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) | |||
| 1245 | 1247 | ||
| 1246 | hw = &segment->hw; | 1248 | hw = &segment->hw; |
| 1247 | 1249 | ||
| 1248 | xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); | 1250 | xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, |
| 1249 | xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); | 1251 | xilinx_prep_dma_addr_t(hw->src_addr)); |
| 1252 | xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, | ||
| 1253 | xilinx_prep_dma_addr_t(hw->dest_addr)); | ||
| 1250 | 1254 | ||
| 1251 | /* Start the transfer */ | 1255 | /* Start the transfer */ |
| 1252 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, | 1256 | dma_ctrl_write(chan, XILINX_DMA_REG_BTT, |
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index c74a88b65039..8db51750ce93 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c | |||
| @@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll { | |||
| 163 | u32 ctrl; | 163 | u32 ctrl; |
| 164 | u64 nxtdscraddr; | 164 | u64 nxtdscraddr; |
| 165 | u64 rsvd; | 165 | u64 rsvd; |
| 166 | }; __aligned(64) | 166 | }; |
| 167 | 167 | ||
| 168 | /** | 168 | /** |
| 169 | * struct zynqmp_dma_desc_sw - Per Transaction structure | 169 | * struct zynqmp_dma_desc_sw - Per Transaction structure |
| @@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 375 | struct zynqmp_dma_chan *chan = to_chan(tx->chan); | 375 | struct zynqmp_dma_chan *chan = to_chan(tx->chan); |
| 376 | struct zynqmp_dma_desc_sw *desc, *new; | 376 | struct zynqmp_dma_desc_sw *desc, *new; |
| 377 | dma_cookie_t cookie; | 377 | dma_cookie_t cookie; |
| 378 | unsigned long irqflags; | ||
| 378 | 379 | ||
| 379 | new = tx_to_desc(tx); | 380 | new = tx_to_desc(tx); |
| 380 | spin_lock_bh(&chan->lock); | 381 | spin_lock_irqsave(&chan->lock, irqflags); |
| 381 | cookie = dma_cookie_assign(tx); | 382 | cookie = dma_cookie_assign(tx); |
| 382 | 383 | ||
| 383 | if (!list_empty(&chan->pending_list)) { | 384 | if (!list_empty(&chan->pending_list)) { |
| @@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 393 | } | 394 | } |
| 394 | 395 | ||
| 395 | list_add_tail(&new->node, &chan->pending_list); | 396 | list_add_tail(&new->node, &chan->pending_list); |
| 396 | spin_unlock_bh(&chan->lock); | 397 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 397 | 398 | ||
| 398 | return cookie; | 399 | return cookie; |
| 399 | } | 400 | } |
| @@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw * | |||
| 408 | zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) | 409 | zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) |
| 409 | { | 410 | { |
| 410 | struct zynqmp_dma_desc_sw *desc; | 411 | struct zynqmp_dma_desc_sw *desc; |
| 412 | unsigned long irqflags; | ||
| 411 | 413 | ||
| 412 | spin_lock_bh(&chan->lock); | 414 | spin_lock_irqsave(&chan->lock, irqflags); |
| 413 | desc = list_first_entry(&chan->free_list, | 415 | desc = list_first_entry(&chan->free_list, |
| 414 | struct zynqmp_dma_desc_sw, node); | 416 | struct zynqmp_dma_desc_sw, node); |
| 415 | list_del(&desc->node); | 417 | list_del(&desc->node); |
| 416 | spin_unlock_bh(&chan->lock); | 418 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 417 | 419 | ||
| 418 | INIT_LIST_HEAD(&desc->tx_list); | 420 | INIT_LIST_HEAD(&desc->tx_list); |
| 419 | /* Clear the src and dst descriptor memory */ | 421 | /* Clear the src and dst descriptor memory */ |
| @@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) | |||
| 643 | static void zynqmp_dma_issue_pending(struct dma_chan *dchan) | 645 | static void zynqmp_dma_issue_pending(struct dma_chan *dchan) |
| 644 | { | 646 | { |
| 645 | struct zynqmp_dma_chan *chan = to_chan(dchan); | 647 | struct zynqmp_dma_chan *chan = to_chan(dchan); |
| 648 | unsigned long irqflags; | ||
| 646 | 649 | ||
| 647 | spin_lock_bh(&chan->lock); | 650 | spin_lock_irqsave(&chan->lock, irqflags); |
| 648 | zynqmp_dma_start_transfer(chan); | 651 | zynqmp_dma_start_transfer(chan); |
| 649 | spin_unlock_bh(&chan->lock); | 652 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 650 | } | 653 | } |
| 651 | 654 | ||
| 652 | /** | 655 | /** |
| @@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) | |||
| 667 | static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) | 670 | static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) |
| 668 | { | 671 | { |
| 669 | struct zynqmp_dma_chan *chan = to_chan(dchan); | 672 | struct zynqmp_dma_chan *chan = to_chan(dchan); |
| 673 | unsigned long irqflags; | ||
| 670 | 674 | ||
| 671 | spin_lock_bh(&chan->lock); | 675 | spin_lock_irqsave(&chan->lock, irqflags); |
| 672 | zynqmp_dma_free_descriptors(chan); | 676 | zynqmp_dma_free_descriptors(chan); |
| 673 | spin_unlock_bh(&chan->lock); | 677 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 674 | dma_free_coherent(chan->dev, | 678 | dma_free_coherent(chan->dev, |
| 675 | (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), | 679 | (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), |
| 676 | chan->desc_pool_v, chan->desc_pool_p); | 680 | chan->desc_pool_v, chan->desc_pool_p); |
| @@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data) | |||
| 743 | { | 747 | { |
| 744 | struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; | 748 | struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; |
| 745 | u32 count; | 749 | u32 count; |
| 750 | unsigned long irqflags; | ||
| 746 | 751 | ||
| 747 | spin_lock(&chan->lock); | 752 | spin_lock_irqsave(&chan->lock, irqflags); |
| 748 | 753 | ||
| 749 | if (chan->err) { | 754 | if (chan->err) { |
| 750 | zynqmp_dma_reset(chan); | 755 | zynqmp_dma_reset(chan); |
| @@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data) | |||
| 764 | zynqmp_dma_start_transfer(chan); | 769 | zynqmp_dma_start_transfer(chan); |
| 765 | 770 | ||
| 766 | unlock: | 771 | unlock: |
| 767 | spin_unlock(&chan->lock); | 772 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 768 | } | 773 | } |
| 769 | 774 | ||
| 770 | /** | 775 | /** |
| @@ -776,11 +781,12 @@ unlock: | |||
| 776 | static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) | 781 | static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) |
| 777 | { | 782 | { |
| 778 | struct zynqmp_dma_chan *chan = to_chan(dchan); | 783 | struct zynqmp_dma_chan *chan = to_chan(dchan); |
| 784 | unsigned long irqflags; | ||
| 779 | 785 | ||
| 780 | spin_lock_bh(&chan->lock); | 786 | spin_lock_irqsave(&chan->lock, irqflags); |
| 781 | writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); | 787 | writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); |
| 782 | zynqmp_dma_free_descriptors(chan); | 788 | zynqmp_dma_free_descriptors(chan); |
| 783 | spin_unlock_bh(&chan->lock); | 789 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 784 | 790 | ||
| 785 | return 0; | 791 | return 0; |
| 786 | } | 792 | } |
| @@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( | |||
| 804 | void *desc = NULL, *prev = NULL; | 810 | void *desc = NULL, *prev = NULL; |
| 805 | size_t copy; | 811 | size_t copy; |
| 806 | u32 desc_cnt; | 812 | u32 desc_cnt; |
| 813 | unsigned long irqflags; | ||
| 807 | 814 | ||
| 808 | chan = to_chan(dchan); | 815 | chan = to_chan(dchan); |
| 809 | 816 | ||
| 810 | desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); | 817 | desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); |
| 811 | 818 | ||
| 812 | spin_lock_bh(&chan->lock); | 819 | spin_lock_irqsave(&chan->lock, irqflags); |
| 813 | if (desc_cnt > chan->desc_free_cnt) { | 820 | if (desc_cnt > chan->desc_free_cnt) { |
| 814 | spin_unlock_bh(&chan->lock); | 821 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 815 | dev_dbg(chan->dev, "chan %p descs are not available\n", chan); | 822 | dev_dbg(chan->dev, "chan %p descs are not available\n", chan); |
| 816 | return NULL; | 823 | return NULL; |
| 817 | } | 824 | } |
| 818 | chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; | 825 | chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; |
| 819 | spin_unlock_bh(&chan->lock); | 826 | spin_unlock_irqrestore(&chan->lock, irqflags); |
| 820 | 827 | ||
| 821 | do { | 828 | do { |
| 822 | /* Allocate and populate the descriptor */ | 829 | /* Allocate and populate the descriptor */ |
diff --git a/include/dt-bindings/dma/dw-dmac.h b/include/dt-bindings/dma/dw-dmac.h new file mode 100644 index 000000000000..d1ca705c95b3 --- /dev/null +++ b/include/dt-bindings/dma/dw-dmac.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ | ||
| 2 | |||
| 3 | #ifndef __DT_BINDINGS_DMA_DW_DMAC_H__ | ||
| 4 | #define __DT_BINDINGS_DMA_DW_DMAC_H__ | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Protection Control bits provide protection against illegal transactions. | ||
| 8 | * The protection bits[0:2] are one-to-one mapped to AHB HPROT[3:1] signals. | ||
| 9 | */ | ||
| 10 | #define DW_DMAC_HPROT1_PRIVILEGED_MODE (1 << 0) /* Privileged Mode */ | ||
| 11 | #define DW_DMAC_HPROT2_BUFFERABLE (1 << 1) /* DMA is bufferable */ | ||
| 12 | #define DW_DMAC_HPROT3_CACHEABLE (1 << 2) /* DMA is cacheable */ | ||
| 13 | |||
| 14 | #endif /* __DT_BINDINGS_DMA_DW_DMAC_H__ */ | ||
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h index b42b80e52cc2..ab82df64682a 100644 --- a/include/linux/dma/sprd-dma.h +++ b/include/linux/dma/sprd-dma.h | |||
| @@ -3,9 +3,65 @@ | |||
| 3 | #ifndef _SPRD_DMA_H_ | 3 | #ifndef _SPRD_DMA_H_ |
| 4 | #define _SPRD_DMA_H_ | 4 | #define _SPRD_DMA_H_ |
| 5 | 5 | ||
| 6 | #define SPRD_DMA_REQ_SHIFT 16 | 6 | #define SPRD_DMA_REQ_SHIFT 8 |
| 7 | #define SPRD_DMA_FLAGS(req_mode, int_type) \ | 7 | #define SPRD_DMA_TRG_MODE_SHIFT 16 |
| 8 | ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) | 8 | #define SPRD_DMA_CHN_MODE_SHIFT 24 |
| 9 | #define SPRD_DMA_FLAGS(chn_mode, trg_mode, req_mode, int_type) \ | ||
| 10 | ((chn_mode) << SPRD_DMA_CHN_MODE_SHIFT | \ | ||
| 11 | (trg_mode) << SPRD_DMA_TRG_MODE_SHIFT | \ | ||
| 12 | (req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) | ||
| 13 | |||
| 14 | /* | ||
| 15 | * The Spreadtrum DMA controller supports channel 2-stage tansfer, that means | ||
| 16 | * we can request 2 dma channels, one for source channel, and another one for | ||
| 17 | * destination channel. Each channel is independent, and has its own | ||
| 18 | * configurations. Once the source channel's transaction is done, it will | ||
| 19 | * trigger the destination channel's transaction automatically by hardware | ||
| 20 | * signal. | ||
| 21 | * | ||
| 22 | * To support 2-stage tansfer, we must configure the channel mode and trigger | ||
| 23 | * mode as below definition. | ||
| 24 | */ | ||
| 25 | |||
| 26 | /* | ||
| 27 | * enum sprd_dma_chn_mode: define the DMA channel mode for 2-stage transfer | ||
| 28 | * @SPRD_DMA_CHN_MODE_NONE: No channel mode setting which means channel doesn't | ||
| 29 | * support the 2-stage transfer. | ||
| 30 | * @SPRD_DMA_SRC_CHN0: Channel used as source channel 0. | ||
| 31 | * @SPRD_DMA_SRC_CHN1: Channel used as source channel 1. | ||
| 32 | * @SPRD_DMA_DST_CHN0: Channel used as destination channel 0. | ||
| 33 | * @SPRD_DMA_DST_CHN1: Channel used as destination channel 1. | ||
| 34 | * | ||
| 35 | * Now the DMA controller can supports 2 groups 2-stage transfer. | ||
| 36 | */ | ||
| 37 | enum sprd_dma_chn_mode { | ||
| 38 | SPRD_DMA_CHN_MODE_NONE, | ||
| 39 | SPRD_DMA_SRC_CHN0, | ||
| 40 | SPRD_DMA_SRC_CHN1, | ||
| 41 | SPRD_DMA_DST_CHN0, | ||
| 42 | SPRD_DMA_DST_CHN1, | ||
| 43 | }; | ||
| 44 | |||
| 45 | /* | ||
| 46 | * enum sprd_dma_trg_mode: define the DMA channel trigger mode for 2-stage | ||
| 47 | * transfer | ||
| 48 | * @SPRD_DMA_NO_TRG: No trigger setting. | ||
| 49 | * @SPRD_DMA_FRAG_DONE_TRG: Trigger the transaction of destination channel | ||
| 50 | * automatically once the source channel's fragment request is done. | ||
| 51 | * @SPRD_DMA_BLOCK_DONE_TRG: Trigger the transaction of destination channel | ||
| 52 | * automatically once the source channel's block request is done. | ||
| 53 | * @SPRD_DMA_TRANS_DONE_TRG: Trigger the transaction of destination channel | ||
| 54 | * automatically once the source channel's transfer request is done. | ||
| 55 | * @SPRD_DMA_LIST_DONE_TRG: Trigger the transaction of destination channel | ||
| 56 | * automatically once the source channel's link-list request is done. | ||
| 57 | */ | ||
| 58 | enum sprd_dma_trg_mode { | ||
| 59 | SPRD_DMA_NO_TRG, | ||
| 60 | SPRD_DMA_FRAG_DONE_TRG, | ||
| 61 | SPRD_DMA_BLOCK_DONE_TRG, | ||
| 62 | SPRD_DMA_TRANS_DONE_TRG, | ||
| 63 | SPRD_DMA_LIST_DONE_TRG, | ||
| 64 | }; | ||
| 9 | 65 | ||
| 10 | /* | 66 | /* |
| 11 | * enum sprd_dma_req_mode: define the DMA request mode | 67 | * enum sprd_dma_req_mode: define the DMA request mode |
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 896cb71a382c..1a1d58ebffbf 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
| @@ -49,6 +49,7 @@ struct dw_dma_slave { | |||
| 49 | * @data_width: Maximum data width supported by hardware per AHB master | 49 | * @data_width: Maximum data width supported by hardware per AHB master |
| 50 | * (in bytes, power of 2) | 50 | * (in bytes, power of 2) |
| 51 | * @multi_block: Multi block transfers supported by hardware per channel. | 51 | * @multi_block: Multi block transfers supported by hardware per channel. |
| 52 | * @protctl: Protection control signals setting per channel. | ||
| 52 | */ | 53 | */ |
| 53 | struct dw_dma_platform_data { | 54 | struct dw_dma_platform_data { |
| 54 | unsigned int nr_channels; | 55 | unsigned int nr_channels; |
| @@ -65,6 +66,11 @@ struct dw_dma_platform_data { | |||
| 65 | unsigned char nr_masters; | 66 | unsigned char nr_masters; |
| 66 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; | 67 | unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; |
| 67 | unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; | 68 | unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; |
| 69 | #define CHAN_PROTCTL_PRIVILEGED BIT(0) | ||
| 70 | #define CHAN_PROTCTL_BUFFERABLE BIT(1) | ||
| 71 | #define CHAN_PROTCTL_CACHEABLE BIT(2) | ||
| 72 | #define CHAN_PROTCTL_MASK GENMASK(2, 0) | ||
| 73 | unsigned char protctl; | ||
| 68 | }; | 74 | }; |
| 69 | 75 | ||
| 70 | #endif /* _PLATFORM_DATA_DMA_DW_H */ | 76 | #endif /* _PLATFORM_DATA_DMA_DW_H */ |
diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h deleted file mode 100644 index 65839a58b8e5..000000000000 --- a/include/linux/sa11x0-dma.h +++ /dev/null | |||
| @@ -1,24 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SA11x0 DMA Engine support | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 Russell King | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | #ifndef __LINUX_SA11X0_DMA_H | ||
| 11 | #define __LINUX_SA11X0_DMA_H | ||
| 12 | |||
| 13 | struct dma_chan; | ||
| 14 | |||
| 15 | #if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) | ||
| 16 | bool sa11x0_dma_filter_fn(struct dma_chan *, void *); | ||
| 17 | #else | ||
| 18 | static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) | ||
| 19 | { | ||
| 20 | return false; | ||
| 21 | } | ||
| 22 | #endif | ||
| 23 | |||
| 24 | #endif | ||
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index d927647e6350..6dfd05ef5c2d 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | /* | 1 | /* SPDX-License-Identifier: GPL-2.0 |
| 2 | * | ||
| 2 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs | 3 | * Dmaengine driver base library for DMA controllers, found on SH-based SoCs |
| 3 | * | 4 | * |
| 4 | * extracted from shdma.c and headers | 5 | * extracted from shdma.c and headers |
| @@ -7,10 +8,6 @@ | |||
| 7 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 8 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
| 8 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | 9 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
| 9 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 10 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
| 10 | * | ||
| 11 | * This is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of version 2 of the GNU General Public License as | ||
| 13 | * published by the Free Software Foundation. | ||
| 14 | */ | 11 | */ |
| 15 | 12 | ||
| 16 | #ifndef SHDMA_BASE_H | 13 | #ifndef SHDMA_BASE_H |
