diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-10 15:14:37 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-10 15:14:37 -0400 |
| commit | 1b02dcb9fa530614151d5713684a626a3c93e054 (patch) | |
| tree | ac1d6e059431b1647ec72ee08b881b1860e27af5 | |
| parent | 92589cbdda677a84ca5e485e1083c7d3bdcfc7b9 (diff) | |
| parent | 2ffb850e23a943acfbeda62599397c863cdd854c (diff) | |
Merge tag 'dmaengine-4.17-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This time we have couple of new drivers along with updates to drivers:
- new drivers for the DesignWare AXI DMAC and MediaTek High-Speed DMA
controllers
- stm32 dma and qcom bam dma driver updates
- norandom test option for dmatest"
* tag 'dmaengine-4.17-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (30 commits)
dmaengine: stm32-dma: properly mask irq bits
dmaengine: stm32-dma: fix max items per transfer
dmaengine: stm32-dma: fix DMA IRQ status handling
dmaengine: stm32-dma: Improve memory burst management
dmaengine: stm32-dma: fix typo and reported checkpatch warnings
dmaengine: stm32-dma: fix incomplete configuration in cyclic mode
dmaengine: stm32-dma: threshold manages with bitfield feature
dt-bindings: stm32-dma: introduce DMA features bitfield
dt-bindings: rcar-dmac: Document r8a77470 support
dmaengine: rcar-dmac: Fix too early/late system suspend/resume callbacks
dmaengine: dw-axi-dmac: fix spelling mistake: "catched" -> "caught"
dmaengine: edma: Check the memory allocation for the memcpy dma device
dmaengine: at_xdmac: fix rare residue corruption
dmaengine: mediatek: update MAINTAINERS entry with MediaTek DMA driver
dmaengine: mediatek: Add MediaTek High-Speed DMA controller for MT7622 and MT7623 SoC
dt-bindings: dmaengine: Add MediaTek High-Speed DMA controller bindings
dt-bindings: Document the Synopsys DW AXI DMA bindings
dmaengine: Introduce DW AXI DMAC driver
dmaengine: pl330: fix a race condition in case of threaded irqs
dmaengine: imx-sdma: fix pagefault when channel is disabled during interrupt
...
24 files changed, 2871 insertions, 74 deletions
diff --git a/Documentation/devicetree/bindings/dma/mtk-hsdma.txt b/Documentation/devicetree/bindings/dma/mtk-hsdma.txt new file mode 100644 index 000000000000..4bb317359dc6 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/mtk-hsdma.txt | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | MediaTek High-Speed DMA Controller | ||
| 2 | ================================== | ||
| 3 | |||
| 4 | This device follows the generic DMA bindings defined in dma/dma.txt. | ||
| 5 | |||
| 6 | Required properties: | ||
| 7 | |||
| 8 | - compatible: Must be one of | ||
| 9 | "mediatek,mt7622-hsdma": for MT7622 SoC | ||
| 10 | "mediatek,mt7623-hsdma": for MT7623 SoC | ||
| 11 | - reg: Should contain the register's base address and length. | ||
| 12 | - interrupts: Should contain a reference to the interrupt used by this | ||
| 13 | device. | ||
| 14 | - clocks: Should be the clock specifiers corresponding to the entry in | ||
| 15 | clock-names property. | ||
| 16 | - clock-names: Should contain "hsdma" entries. | ||
| 17 | - power-domains: Phandle to the power domain that the device is part of | ||
| 18 | - #dma-cells: The length of the DMA specifier, must be <1>. This one cell | ||
| 19 | in dmas property of a client device represents the channel | ||
| 20 | number. | ||
| 21 | Example: | ||
| 22 | |||
| 23 | hsdma: dma-controller@1b007000 { | ||
| 24 | compatible = "mediatek,mt7623-hsdma"; | ||
| 25 | reg = <0 0x1b007000 0 0x1000>; | ||
| 26 | interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_LOW>; | ||
| 27 | clocks = <ðsys CLK_ETHSYS_HSDMA>; | ||
| 28 | clock-names = "hsdma"; | ||
| 29 | power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>; | ||
| 30 | #dma-cells = <1>; | ||
| 31 | }; | ||
| 32 | |||
| 33 | DMA clients must use the format described in dma/dma.txt file. | ||
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt index 9cbf5d9df8fd..cf5b9e44432c 100644 --- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt +++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt | |||
| @@ -15,6 +15,10 @@ Required properties: | |||
| 15 | the secure world. | 15 | the secure world. |
| 16 | - qcom,controlled-remotely : optional, indicates that the bam is controlled by | 16 | - qcom,controlled-remotely : optional, indicates that the bam is controlled by |
| 17 | remote proccessor i.e. execution environment. | 17 | remote proccessor i.e. execution environment. |
| 18 | - num-channels : optional, indicates supported number of DMA channels in a | ||
| 19 | remotely controlled bam. | ||
| 20 | - qcom,num-ees : optional, indicates supported number of Execution Environments | ||
| 21 | in a remotely controlled bam. | ||
| 18 | 22 | ||
| 19 | Example: | 23 | Example: |
| 20 | 24 | ||
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index 891db41e9420..aadfb236d53a 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | |||
| @@ -18,6 +18,7 @@ Required Properties: | |||
| 18 | Examples with soctypes are: | 18 | Examples with soctypes are: |
| 19 | - "renesas,dmac-r8a7743" (RZ/G1M) | 19 | - "renesas,dmac-r8a7743" (RZ/G1M) |
| 20 | - "renesas,dmac-r8a7745" (RZ/G1E) | 20 | - "renesas,dmac-r8a7745" (RZ/G1E) |
| 21 | - "renesas,dmac-r8a77470" (RZ/G1C) | ||
| 21 | - "renesas,dmac-r8a7790" (R-Car H2) | 22 | - "renesas,dmac-r8a7790" (R-Car H2) |
| 22 | - "renesas,dmac-r8a7791" (R-Car M2-W) | 23 | - "renesas,dmac-r8a7791" (R-Car M2-W) |
| 23 | - "renesas,dmac-r8a7792" (R-Car V2H) | 24 | - "renesas,dmac-r8a7792" (R-Car V2H) |
| @@ -26,6 +27,7 @@ Required Properties: | |||
| 26 | - "renesas,dmac-r8a7795" (R-Car H3) | 27 | - "renesas,dmac-r8a7795" (R-Car H3) |
| 27 | - "renesas,dmac-r8a7796" (R-Car M3-W) | 28 | - "renesas,dmac-r8a7796" (R-Car M3-W) |
| 28 | - "renesas,dmac-r8a77970" (R-Car V3M) | 29 | - "renesas,dmac-r8a77970" (R-Car V3M) |
| 30 | - "renesas,dmac-r8a77980" (R-Car V3H) | ||
| 29 | 31 | ||
| 30 | - reg: base address and length of the registers block for the DMAC | 32 | - reg: base address and length of the registers block for the DMAC |
| 31 | 33 | ||
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt index f3d1f151ba80..9dc935e24e55 100644 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt | |||
| @@ -11,6 +11,7 @@ Required Properties: | |||
| 11 | - "renesas,r8a7794-usb-dmac" (R-Car E2) | 11 | - "renesas,r8a7794-usb-dmac" (R-Car E2) |
| 12 | - "renesas,r8a7795-usb-dmac" (R-Car H3) | 12 | - "renesas,r8a7795-usb-dmac" (R-Car H3) |
| 13 | - "renesas,r8a7796-usb-dmac" (R-Car M3-W) | 13 | - "renesas,r8a7796-usb-dmac" (R-Car M3-W) |
| 14 | - "renesas,r8a77965-usb-dmac" (R-Car M3-N) | ||
| 14 | - reg: base address and length of the registers block for the DMAC | 15 | - reg: base address and length of the registers block for the DMAC |
| 15 | - interrupts: interrupt specifiers for the DMAC, one for each entry in | 16 | - interrupts: interrupt specifiers for the DMAC, one for each entry in |
| 16 | interrupt-names. | 17 | interrupt-names. |
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt new file mode 100644 index 000000000000..f237b7928283 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | Synopsys DesignWare AXI DMA Controller | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: "snps,axi-dma-1.01a" | ||
| 5 | - reg: Address range of the DMAC registers. This should include | ||
| 6 | all of the per-channel registers. | ||
| 7 | - interrupt: Should contain the DMAC interrupt number. | ||
| 8 | - interrupt-parent: Should be the phandle for the interrupt controller | ||
| 9 | that services interrupts for this device. | ||
| 10 | - dma-channels: Number of channels supported by hardware. | ||
| 11 | - snps,dma-masters: Number of AXI masters supported by the hardware. | ||
| 12 | - snps,data-width: Maximum AXI data width supported by hardware. | ||
| 13 | (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits) | ||
| 14 | - snps,priority: Priority of channel. Array size is equal to the number of | ||
| 15 | dma-channels. Priority value must be programmed within [0:dma-channels-1] | ||
| 16 | range. (0 - minimum priority) | ||
| 17 | - snps,block-size: Maximum block size supported by the controller channel. | ||
| 18 | Array size is equal to the number of dma-channels. | ||
| 19 | |||
| 20 | Optional properties: | ||
| 21 | - snps,axi-max-burst-len: Restrict master AXI burst length by value specified | ||
| 22 | in this property. If this property is missing the maximum AXI burst length | ||
| 23 | supported by DMAC is used. [1:256] | ||
| 24 | |||
| 25 | Example: | ||
| 26 | |||
| 27 | dmac: dma-controller@80000 { | ||
| 28 | compatible = "snps,axi-dma-1.01a"; | ||
| 29 | reg = <0x80000 0x400>; | ||
| 30 | clocks = <&core_clk>, <&cfgr_clk>; | ||
| 31 | clock-names = "core-clk", "cfgr-clk"; | ||
| 32 | interrupt-parent = <&intc>; | ||
| 33 | interrupts = <27>; | ||
| 34 | |||
| 35 | dma-channels = <4>; | ||
| 36 | snps,dma-masters = <2>; | ||
| 37 | snps,data-width = <3>; | ||
| 38 | snps,block-size = <4096 4096 4096 4096>; | ||
| 39 | snps,priority = <0 1 2 3>; | ||
| 40 | snps,axi-max-burst-len = <16>; | ||
| 41 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/stm32-dma.txt b/Documentation/devicetree/bindings/dma/stm32-dma.txt index 0b55718bf889..c5f519097204 100644 --- a/Documentation/devicetree/bindings/dma/stm32-dma.txt +++ b/Documentation/devicetree/bindings/dma/stm32-dma.txt | |||
| @@ -62,14 +62,14 @@ channel: a phandle to the DMA controller plus the following four integer cells: | |||
| 62 | 0x1: medium | 62 | 0x1: medium |
| 63 | 0x2: high | 63 | 0x2: high |
| 64 | 0x3: very high | 64 | 0x3: very high |
| 65 | 4. A 32bit mask specifying the DMA FIFO threshold configuration which are device | 65 | 4. A 32bit bitfield value specifying DMA features which are device dependent: |
| 66 | dependent: | 66 | -bit 0-1: DMA FIFO threshold selection |
| 67 | -bit 0-1: Fifo threshold | ||
| 68 | 0x0: 1/4 full FIFO | 67 | 0x0: 1/4 full FIFO |
| 69 | 0x1: 1/2 full FIFO | 68 | 0x1: 1/2 full FIFO |
| 70 | 0x2: 3/4 full FIFO | 69 | 0x2: 3/4 full FIFO |
| 71 | 0x3: full FIFO | 70 | 0x3: full FIFO |
| 72 | 71 | ||
| 72 | |||
| 73 | Example: | 73 | Example: |
| 74 | 74 | ||
| 75 | usart1: serial@40011000 { | 75 | usart1: serial@40011000 { |
diff --git a/MAINTAINERS b/MAINTAINERS index b7bd40b6b80d..7bb2e9595f14 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -8859,6 +8859,15 @@ M: Sean Wang <sean.wang@mediatek.com> | |||
| 8859 | S: Maintained | 8859 | S: Maintained |
| 8860 | F: drivers/media/rc/mtk-cir.c | 8860 | F: drivers/media/rc/mtk-cir.c |
| 8861 | 8861 | ||
| 8862 | MEDIATEK DMA DRIVER | ||
| 8863 | M: Sean Wang <sean.wang@mediatek.com> | ||
| 8864 | L: dmaengine@vger.kernel.org | ||
| 8865 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 8866 | L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) | ||
| 8867 | S: Maintained | ||
| 8868 | F: Documentation/devicetree/bindings/dma/mtk-* | ||
| 8869 | F: drivers/dma/mediatek/ | ||
| 8870 | |||
| 8862 | MEDIATEK PMIC LED DRIVER | 8871 | MEDIATEK PMIC LED DRIVER |
| 8863 | M: Sean Wang <sean.wang@mediatek.com> | 8872 | M: Sean Wang <sean.wang@mediatek.com> |
| 8864 | S: Maintained | 8873 | S: Maintained |
| @@ -13482,6 +13491,12 @@ S: Maintained | |||
| 13482 | F: drivers/gpio/gpio-dwapb.c | 13491 | F: drivers/gpio/gpio-dwapb.c |
| 13483 | F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt | 13492 | F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt |
| 13484 | 13493 | ||
| 13494 | SYNOPSYS DESIGNWARE AXI DMAC DRIVER | ||
| 13495 | M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> | ||
| 13496 | S: Maintained | ||
| 13497 | F: drivers/dma/dwi-axi-dmac/ | ||
| 13498 | F: Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt | ||
| 13499 | |||
| 13485 | SYNOPSYS DESIGNWARE DMAC DRIVER | 13500 | SYNOPSYS DESIGNWARE DMAC DRIVER |
| 13486 | M: Viresh Kumar <vireshk@kernel.org> | 13501 | M: Viresh Kumar <vireshk@kernel.org> |
| 13487 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 13502 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 27df3e2837fd..6d61cd023633 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -187,6 +187,16 @@ config DMA_SUN6I | |||
| 187 | help | 187 | help |
| 188 | Support for the DMA engine first found in Allwinner A31 SoCs. | 188 | Support for the DMA engine first found in Allwinner A31 SoCs. |
| 189 | 189 | ||
| 190 | config DW_AXI_DMAC | ||
| 191 | tristate "Synopsys DesignWare AXI DMA support" | ||
| 192 | depends on OF || COMPILE_TEST | ||
| 193 | select DMA_ENGINE | ||
| 194 | select DMA_VIRTUAL_CHANNELS | ||
| 195 | help | ||
| 196 | Enable support for Synopsys DesignWare AXI DMA controller. | ||
| 197 | NOTE: This driver wasn't tested on 64 bit platform because | ||
| 198 | of lack 64 bit platform with Synopsys DW AXI DMAC. | ||
| 199 | |||
| 190 | config EP93XX_DMA | 200 | config EP93XX_DMA |
| 191 | bool "Cirrus Logic EP93xx DMA support" | 201 | bool "Cirrus Logic EP93xx DMA support" |
| 192 | depends on ARCH_EP93XX || COMPILE_TEST | 202 | depends on ARCH_EP93XX || COMPILE_TEST |
| @@ -633,6 +643,8 @@ config ZX_DMA | |||
| 633 | # driver files | 643 | # driver files |
| 634 | source "drivers/dma/bestcomm/Kconfig" | 644 | source "drivers/dma/bestcomm/Kconfig" |
| 635 | 645 | ||
| 646 | source "drivers/dma/mediatek/Kconfig" | ||
| 647 | |||
| 636 | source "drivers/dma/qcom/Kconfig" | 648 | source "drivers/dma/qcom/Kconfig" |
| 637 | 649 | ||
| 638 | source "drivers/dma/dw/Kconfig" | 650 | source "drivers/dma/dw/Kconfig" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b9dca8a0e142..0f62a4d49aab 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -28,6 +28,7 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o | |||
| 28 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | 28 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o |
| 29 | obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o | 29 | obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o |
| 30 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o | 30 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o |
| 31 | obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ | ||
| 31 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ | 32 | obj-$(CONFIG_DW_DMAC_CORE) += dw/ |
| 32 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 33 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
| 33 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 34 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
| @@ -75,5 +76,6 @@ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o | |||
| 75 | obj-$(CONFIG_ZX_DMA) += zx_dma.o | 76 | obj-$(CONFIG_ZX_DMA) += zx_dma.o |
| 76 | obj-$(CONFIG_ST_FDMA) += st_fdma.o | 77 | obj-$(CONFIG_ST_FDMA) += st_fdma.o |
| 77 | 78 | ||
| 79 | obj-y += mediatek/ | ||
| 78 | obj-y += qcom/ | 80 | obj-y += qcom/ |
| 79 | obj-y += xilinx/ | 81 | obj-y += xilinx/ |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index c00e3923d7d8..94236ec9d410 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1471 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { | 1471 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { |
| 1472 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | 1472 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; |
| 1473 | rmb(); | 1473 | rmb(); |
| 1474 | initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); | ||
| 1475 | rmb(); | ||
| 1476 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | 1474 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); |
| 1477 | rmb(); | 1475 | rmb(); |
| 1476 | initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); | ||
| 1477 | rmb(); | ||
| 1478 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | 1478 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; |
| 1479 | rmb(); | 1479 | rmb(); |
| 1480 | 1480 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 80cc2be6483c..b9339524d5bd 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -74,7 +74,11 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | |||
| 74 | 74 | ||
| 75 | static bool noverify; | 75 | static bool noverify; |
| 76 | module_param(noverify, bool, S_IRUGO | S_IWUSR); | 76 | module_param(noverify, bool, S_IRUGO | S_IWUSR); |
| 77 | MODULE_PARM_DESC(noverify, "Disable random data setup and verification"); | 77 | MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)"); |
| 78 | |||
| 79 | static bool norandom; | ||
| 80 | module_param(norandom, bool, 0644); | ||
| 81 | MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)"); | ||
| 78 | 82 | ||
| 79 | static bool verbose; | 83 | static bool verbose; |
| 80 | module_param(verbose, bool, S_IRUGO | S_IWUSR); | 84 | module_param(verbose, bool, S_IRUGO | S_IWUSR); |
| @@ -103,6 +107,7 @@ struct dmatest_params { | |||
| 103 | unsigned int pq_sources; | 107 | unsigned int pq_sources; |
| 104 | int timeout; | 108 | int timeout; |
| 105 | bool noverify; | 109 | bool noverify; |
| 110 | bool norandom; | ||
| 106 | }; | 111 | }; |
| 107 | 112 | ||
| 108 | /** | 113 | /** |
| @@ -575,7 +580,7 @@ static int dmatest_func(void *data) | |||
| 575 | break; | 580 | break; |
| 576 | } | 581 | } |
| 577 | 582 | ||
| 578 | if (params->noverify) | 583 | if (params->norandom) |
| 579 | len = params->buf_size; | 584 | len = params->buf_size; |
| 580 | else | 585 | else |
| 581 | len = dmatest_random() % params->buf_size + 1; | 586 | len = dmatest_random() % params->buf_size + 1; |
| @@ -586,17 +591,19 @@ static int dmatest_func(void *data) | |||
| 586 | 591 | ||
| 587 | total_len += len; | 592 | total_len += len; |
| 588 | 593 | ||
| 589 | if (params->noverify) { | 594 | if (params->norandom) { |
| 590 | src_off = 0; | 595 | src_off = 0; |
| 591 | dst_off = 0; | 596 | dst_off = 0; |
| 592 | } else { | 597 | } else { |
| 593 | start = ktime_get(); | ||
| 594 | src_off = dmatest_random() % (params->buf_size - len + 1); | 598 | src_off = dmatest_random() % (params->buf_size - len + 1); |
| 595 | dst_off = dmatest_random() % (params->buf_size - len + 1); | 599 | dst_off = dmatest_random() % (params->buf_size - len + 1); |
| 596 | 600 | ||
| 597 | src_off = (src_off >> align) << align; | 601 | src_off = (src_off >> align) << align; |
| 598 | dst_off = (dst_off >> align) << align; | 602 | dst_off = (dst_off >> align) << align; |
| 603 | } | ||
| 599 | 604 | ||
| 605 | if (!params->noverify) { | ||
| 606 | start = ktime_get(); | ||
| 600 | dmatest_init_srcs(thread->srcs, src_off, len, | 607 | dmatest_init_srcs(thread->srcs, src_off, len, |
| 601 | params->buf_size, is_memset); | 608 | params->buf_size, is_memset); |
| 602 | dmatest_init_dsts(thread->dsts, dst_off, len, | 609 | dmatest_init_dsts(thread->dsts, dst_off, len, |
| @@ -975,6 +982,7 @@ static void run_threaded_test(struct dmatest_info *info) | |||
| 975 | params->pq_sources = pq_sources; | 982 | params->pq_sources = pq_sources; |
| 976 | params->timeout = timeout; | 983 | params->timeout = timeout; |
| 977 | params->noverify = noverify; | 984 | params->noverify = noverify; |
| 985 | params->norandom = norandom; | ||
| 978 | 986 | ||
| 979 | request_channels(info, DMA_MEMCPY); | 987 | request_channels(info, DMA_MEMCPY); |
| 980 | request_channels(info, DMA_MEMSET); | 988 | request_channels(info, DMA_MEMSET); |
diff --git a/drivers/dma/dw-axi-dmac/Makefile b/drivers/dma/dw-axi-dmac/Makefile new file mode 100644 index 000000000000..4bfa462005be --- /dev/null +++ b/drivers/dma/dw-axi-dmac/Makefile | |||
| @@ -0,0 +1 @@ | |||
| obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac-platform.o | |||
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c new file mode 100644 index 000000000000..c4eb55e3011c --- /dev/null +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c | |||
| @@ -0,0 +1,1008 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com) | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Synopsys DesignWare AXI DMA Controller driver. | ||
| 6 | * | ||
| 7 | * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/bitops.h> | ||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/device.h> | ||
| 13 | #include <linux/dmaengine.h> | ||
| 14 | #include <linux/dmapool.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | #include <linux/interrupt.h> | ||
| 17 | #include <linux/io.h> | ||
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/of.h> | ||
| 21 | #include <linux/platform_device.h> | ||
| 22 | #include <linux/pm_runtime.h> | ||
| 23 | #include <linux/property.h> | ||
| 24 | #include <linux/types.h> | ||
| 25 | |||
| 26 | #include "dw-axi-dmac.h" | ||
| 27 | #include "../dmaengine.h" | ||
| 28 | #include "../virt-dma.h" | ||
| 29 | |||
| 30 | /* | ||
| 31 | * The set of bus widths supported by the DMA controller. DW AXI DMAC supports | ||
| 32 | * master data bus width up to 512 bits (for both AXI master interfaces), but | ||
| 33 | * it depends on IP block configurarion. | ||
| 34 | */ | ||
| 35 | #define AXI_DMA_BUSWIDTHS \ | ||
| 36 | (DMA_SLAVE_BUSWIDTH_1_BYTE | \ | ||
| 37 | DMA_SLAVE_BUSWIDTH_2_BYTES | \ | ||
| 38 | DMA_SLAVE_BUSWIDTH_4_BYTES | \ | ||
| 39 | DMA_SLAVE_BUSWIDTH_8_BYTES | \ | ||
| 40 | DMA_SLAVE_BUSWIDTH_16_BYTES | \ | ||
| 41 | DMA_SLAVE_BUSWIDTH_32_BYTES | \ | ||
| 42 | DMA_SLAVE_BUSWIDTH_64_BYTES) | ||
| 43 | |||
| 44 | static inline void | ||
| 45 | axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val) | ||
| 46 | { | ||
| 47 | iowrite32(val, chip->regs + reg); | ||
| 48 | } | ||
| 49 | |||
| 50 | static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg) | ||
| 51 | { | ||
| 52 | return ioread32(chip->regs + reg); | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline void | ||
| 56 | axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val) | ||
| 57 | { | ||
| 58 | iowrite32(val, chan->chan_regs + reg); | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg) | ||
| 62 | { | ||
| 63 | return ioread32(chan->chan_regs + reg); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void | ||
| 67 | axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val) | ||
| 68 | { | ||
| 69 | /* | ||
| 70 | * We split one 64 bit write for two 32 bit write as some HW doesn't | ||
| 71 | * support 64 bit access. | ||
| 72 | */ | ||
| 73 | iowrite32(lower_32_bits(val), chan->chan_regs + reg); | ||
| 74 | iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4); | ||
| 75 | } | ||
| 76 | |||
| 77 | static inline void axi_dma_disable(struct axi_dma_chip *chip) | ||
| 78 | { | ||
| 79 | u32 val; | ||
| 80 | |||
| 81 | val = axi_dma_ioread32(chip, DMAC_CFG); | ||
| 82 | val &= ~DMAC_EN_MASK; | ||
| 83 | axi_dma_iowrite32(chip, DMAC_CFG, val); | ||
| 84 | } | ||
| 85 | |||
| 86 | static inline void axi_dma_enable(struct axi_dma_chip *chip) | ||
| 87 | { | ||
| 88 | u32 val; | ||
| 89 | |||
| 90 | val = axi_dma_ioread32(chip, DMAC_CFG); | ||
| 91 | val |= DMAC_EN_MASK; | ||
| 92 | axi_dma_iowrite32(chip, DMAC_CFG, val); | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline void axi_dma_irq_disable(struct axi_dma_chip *chip) | ||
| 96 | { | ||
| 97 | u32 val; | ||
| 98 | |||
| 99 | val = axi_dma_ioread32(chip, DMAC_CFG); | ||
| 100 | val &= ~INT_EN_MASK; | ||
| 101 | axi_dma_iowrite32(chip, DMAC_CFG, val); | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline void axi_dma_irq_enable(struct axi_dma_chip *chip) | ||
| 105 | { | ||
| 106 | u32 val; | ||
| 107 | |||
| 108 | val = axi_dma_ioread32(chip, DMAC_CFG); | ||
| 109 | val |= INT_EN_MASK; | ||
| 110 | axi_dma_iowrite32(chip, DMAC_CFG, val); | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask) | ||
| 114 | { | ||
| 115 | u32 val; | ||
| 116 | |||
| 117 | if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) { | ||
| 118 | axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE); | ||
| 119 | } else { | ||
| 120 | val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA); | ||
| 121 | val &= ~irq_mask; | ||
| 122 | axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val); | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask) | ||
| 127 | { | ||
| 128 | axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask); | ||
| 129 | } | ||
| 130 | |||
| 131 | static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask) | ||
| 132 | { | ||
| 133 | axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask); | ||
| 134 | } | ||
| 135 | |||
| 136 | static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask) | ||
| 137 | { | ||
| 138 | axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask); | ||
| 139 | } | ||
| 140 | |||
| 141 | static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan) | ||
| 142 | { | ||
| 143 | return axi_chan_ioread32(chan, CH_INTSTATUS); | ||
| 144 | } | ||
| 145 | |||
| 146 | static inline void axi_chan_disable(struct axi_dma_chan *chan) | ||
| 147 | { | ||
| 148 | u32 val; | ||
| 149 | |||
| 150 | val = axi_dma_ioread32(chan->chip, DMAC_CHEN); | ||
| 151 | val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); | ||
| 152 | val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; | ||
| 153 | axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); | ||
| 154 | } | ||
| 155 | |||
| 156 | static inline void axi_chan_enable(struct axi_dma_chan *chan) | ||
| 157 | { | ||
| 158 | u32 val; | ||
| 159 | |||
| 160 | val = axi_dma_ioread32(chan->chip, DMAC_CHEN); | ||
| 161 | val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | | ||
| 162 | BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; | ||
| 163 | axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) | ||
| 167 | { | ||
| 168 | u32 val; | ||
| 169 | |||
| 170 | val = axi_dma_ioread32(chan->chip, DMAC_CHEN); | ||
| 171 | |||
| 172 | return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT)); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void axi_dma_hw_init(struct axi_dma_chip *chip) | ||
| 176 | { | ||
| 177 | u32 i; | ||
| 178 | |||
| 179 | for (i = 0; i < chip->dw->hdata->nr_channels; i++) { | ||
| 180 | axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); | ||
| 181 | axi_chan_disable(&chip->dw->chan[i]); | ||
| 182 | } | ||
| 183 | } | ||
| 184 | |||
| 185 | static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src, | ||
| 186 | dma_addr_t dst, size_t len) | ||
| 187 | { | ||
| 188 | u32 max_width = chan->chip->dw->hdata->m_data_width; | ||
| 189 | |||
| 190 | return __ffs(src | dst | len | BIT(max_width)); | ||
| 191 | } | ||
| 192 | |||
| 193 | static inline const char *axi_chan_name(struct axi_dma_chan *chan) | ||
| 194 | { | ||
| 195 | return dma_chan_name(&chan->vc.chan); | ||
| 196 | } | ||
| 197 | |||
| 198 | static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan) | ||
| 199 | { | ||
| 200 | struct dw_axi_dma *dw = chan->chip->dw; | ||
| 201 | struct axi_dma_desc *desc; | ||
| 202 | dma_addr_t phys; | ||
| 203 | |||
| 204 | desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys); | ||
| 205 | if (unlikely(!desc)) { | ||
| 206 | dev_err(chan2dev(chan), "%s: not enough descriptors available\n", | ||
| 207 | axi_chan_name(chan)); | ||
| 208 | return NULL; | ||
| 209 | } | ||
| 210 | |||
| 211 | atomic_inc(&chan->descs_allocated); | ||
| 212 | INIT_LIST_HEAD(&desc->xfer_list); | ||
| 213 | desc->vd.tx.phys = phys; | ||
| 214 | desc->chan = chan; | ||
| 215 | |||
| 216 | return desc; | ||
| 217 | } | ||
| 218 | |||
| 219 | static void axi_desc_put(struct axi_dma_desc *desc) | ||
| 220 | { | ||
| 221 | struct axi_dma_chan *chan = desc->chan; | ||
| 222 | struct dw_axi_dma *dw = chan->chip->dw; | ||
| 223 | struct axi_dma_desc *child, *_next; | ||
| 224 | unsigned int descs_put = 0; | ||
| 225 | |||
| 226 | list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) { | ||
| 227 | list_del(&child->xfer_list); | ||
| 228 | dma_pool_free(dw->desc_pool, child, child->vd.tx.phys); | ||
| 229 | descs_put++; | ||
| 230 | } | ||
| 231 | |||
| 232 | dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys); | ||
| 233 | descs_put++; | ||
| 234 | |||
| 235 | atomic_sub(descs_put, &chan->descs_allocated); | ||
| 236 | dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n", | ||
| 237 | axi_chan_name(chan), descs_put, | ||
| 238 | atomic_read(&chan->descs_allocated)); | ||
| 239 | } | ||
| 240 | |||
| 241 | static void vchan_desc_put(struct virt_dma_desc *vdesc) | ||
| 242 | { | ||
| 243 | axi_desc_put(vd_to_axi_desc(vdesc)); | ||
| 244 | } | ||
| 245 | |||
| 246 | static enum dma_status | ||
| 247 | dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | ||
| 248 | struct dma_tx_state *txstate) | ||
| 249 | { | ||
| 250 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 251 | enum dma_status ret; | ||
| 252 | |||
| 253 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
| 254 | |||
| 255 | if (chan->is_paused && ret == DMA_IN_PROGRESS) | ||
| 256 | ret = DMA_PAUSED; | ||
| 257 | |||
| 258 | return ret; | ||
| 259 | } | ||
| 260 | |||
| 261 | static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr) | ||
| 262 | { | ||
| 263 | desc->lli.llp = cpu_to_le64(adr); | ||
| 264 | } | ||
| 265 | |||
| 266 | static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr) | ||
| 267 | { | ||
| 268 | axi_chan_iowrite64(chan, CH_LLP, adr); | ||
| 269 | } | ||
| 270 | |||
| 271 | /* Called in chan locked context */ | ||
| 272 | static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, | ||
| 273 | struct axi_dma_desc *first) | ||
| 274 | { | ||
| 275 | u32 priority = chan->chip->dw->hdata->priority[chan->id]; | ||
| 276 | u32 reg, irq_mask; | ||
| 277 | u8 lms = 0; /* Select AXI0 master for LLI fetching */ | ||
| 278 | |||
| 279 | if (unlikely(axi_chan_is_hw_enable(chan))) { | ||
| 280 | dev_err(chan2dev(chan), "%s is non-idle!\n", | ||
| 281 | axi_chan_name(chan)); | ||
| 282 | |||
| 283 | return; | ||
| 284 | } | ||
| 285 | |||
| 286 | axi_dma_enable(chan->chip); | ||
| 287 | |||
| 288 | reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS | | ||
| 289 | DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS); | ||
| 290 | axi_chan_iowrite32(chan, CH_CFG_L, reg); | ||
| 291 | |||
| 292 | reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS | | ||
| 293 | priority << CH_CFG_H_PRIORITY_POS | | ||
| 294 | DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS | | ||
| 295 | DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS); | ||
| 296 | axi_chan_iowrite32(chan, CH_CFG_H, reg); | ||
| 297 | |||
| 298 | write_chan_llp(chan, first->vd.tx.phys | lms); | ||
| 299 | |||
| 300 | irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR; | ||
| 301 | axi_chan_irq_sig_set(chan, irq_mask); | ||
| 302 | |||
| 303 | /* Generate 'suspend' status but don't generate interrupt */ | ||
| 304 | irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; | ||
| 305 | axi_chan_irq_set(chan, irq_mask); | ||
| 306 | |||
| 307 | axi_chan_enable(chan); | ||
| 308 | } | ||
| 309 | |||
| 310 | static void axi_chan_start_first_queued(struct axi_dma_chan *chan) | ||
| 311 | { | ||
| 312 | struct axi_dma_desc *desc; | ||
| 313 | struct virt_dma_desc *vd; | ||
| 314 | |||
| 315 | vd = vchan_next_desc(&chan->vc); | ||
| 316 | if (!vd) | ||
| 317 | return; | ||
| 318 | |||
| 319 | desc = vd_to_axi_desc(vd); | ||
| 320 | dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan), | ||
| 321 | vd->tx.cookie); | ||
| 322 | axi_chan_block_xfer_start(chan, desc); | ||
| 323 | } | ||
| 324 | |||
| 325 | static void dma_chan_issue_pending(struct dma_chan *dchan) | ||
| 326 | { | ||
| 327 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 328 | unsigned long flags; | ||
| 329 | |||
| 330 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 331 | if (vchan_issue_pending(&chan->vc)) | ||
| 332 | axi_chan_start_first_queued(chan); | ||
| 333 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 334 | } | ||
| 335 | |||
| 336 | static int dma_chan_alloc_chan_resources(struct dma_chan *dchan) | ||
| 337 | { | ||
| 338 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 339 | |||
| 340 | /* ASSERT: channel is idle */ | ||
| 341 | if (axi_chan_is_hw_enable(chan)) { | ||
| 342 | dev_err(chan2dev(chan), "%s is non-idle!\n", | ||
| 343 | axi_chan_name(chan)); | ||
| 344 | return -EBUSY; | ||
| 345 | } | ||
| 346 | |||
| 347 | dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan)); | ||
| 348 | |||
| 349 | pm_runtime_get(chan->chip->dev); | ||
| 350 | |||
| 351 | return 0; | ||
| 352 | } | ||
| 353 | |||
| 354 | static void dma_chan_free_chan_resources(struct dma_chan *dchan) | ||
| 355 | { | ||
| 356 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 357 | |||
| 358 | /* ASSERT: channel is idle */ | ||
| 359 | if (axi_chan_is_hw_enable(chan)) | ||
| 360 | dev_err(dchan2dev(dchan), "%s is non-idle!\n", | ||
| 361 | axi_chan_name(chan)); | ||
| 362 | |||
| 363 | axi_chan_disable(chan); | ||
| 364 | axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL); | ||
| 365 | |||
| 366 | vchan_free_chan_resources(&chan->vc); | ||
| 367 | |||
| 368 | dev_vdbg(dchan2dev(dchan), | ||
| 369 | "%s: free resources, descriptor still allocated: %u\n", | ||
| 370 | axi_chan_name(chan), atomic_read(&chan->descs_allocated)); | ||
| 371 | |||
| 372 | pm_runtime_put(chan->chip->dev); | ||
| 373 | } | ||
| 374 | |||
| 375 | /* | ||
| 376 | * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI | ||
| 377 | * as 1, it understands that the current block is the final block in the | ||
| 378 | * transfer and completes the DMA transfer operation at the end of current | ||
| 379 | * block transfer. | ||
| 380 | */ | ||
| 381 | static void set_desc_last(struct axi_dma_desc *desc) | ||
| 382 | { | ||
| 383 | u32 val; | ||
| 384 | |||
| 385 | val = le32_to_cpu(desc->lli.ctl_hi); | ||
| 386 | val |= CH_CTL_H_LLI_LAST; | ||
| 387 | desc->lli.ctl_hi = cpu_to_le32(val); | ||
| 388 | } | ||
| 389 | |||
| 390 | static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr) | ||
| 391 | { | ||
| 392 | desc->lli.sar = cpu_to_le64(adr); | ||
| 393 | } | ||
| 394 | |||
| 395 | static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr) | ||
| 396 | { | ||
| 397 | desc->lli.dar = cpu_to_le64(adr); | ||
| 398 | } | ||
| 399 | |||
| 400 | static void set_desc_src_master(struct axi_dma_desc *desc) | ||
| 401 | { | ||
| 402 | u32 val; | ||
| 403 | |||
| 404 | /* Select AXI0 for source master */ | ||
| 405 | val = le32_to_cpu(desc->lli.ctl_lo); | ||
| 406 | val &= ~CH_CTL_L_SRC_MAST; | ||
| 407 | desc->lli.ctl_lo = cpu_to_le32(val); | ||
| 408 | } | ||
| 409 | |||
| 410 | static void set_desc_dest_master(struct axi_dma_desc *desc) | ||
| 411 | { | ||
| 412 | u32 val; | ||
| 413 | |||
| 414 | /* Select AXI1 for source master if available */ | ||
| 415 | val = le32_to_cpu(desc->lli.ctl_lo); | ||
| 416 | if (desc->chan->chip->dw->hdata->nr_masters > 1) | ||
| 417 | val |= CH_CTL_L_DST_MAST; | ||
| 418 | else | ||
| 419 | val &= ~CH_CTL_L_DST_MAST; | ||
| 420 | |||
| 421 | desc->lli.ctl_lo = cpu_to_le32(val); | ||
| 422 | } | ||
| 423 | |||
| 424 | static struct dma_async_tx_descriptor * | ||
| 425 | dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, | ||
| 426 | dma_addr_t src_adr, size_t len, unsigned long flags) | ||
| 427 | { | ||
| 428 | struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL; | ||
| 429 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 430 | size_t block_ts, max_block_ts, xfer_len; | ||
| 431 | u32 xfer_width, reg; | ||
| 432 | u8 lms = 0; /* Select AXI0 master for LLI fetching */ | ||
| 433 | |||
| 434 | dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx", | ||
| 435 | axi_chan_name(chan), &src_adr, &dst_adr, len, flags); | ||
| 436 | |||
| 437 | max_block_ts = chan->chip->dw->hdata->block_size[chan->id]; | ||
| 438 | |||
| 439 | while (len) { | ||
| 440 | xfer_len = len; | ||
| 441 | |||
| 442 | /* | ||
| 443 | * Take care for the alignment. | ||
| 444 | * Actually source and destination widths can be different, but | ||
| 445 | * make them same to be simpler. | ||
| 446 | */ | ||
| 447 | xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len); | ||
| 448 | |||
| 449 | /* | ||
| 450 | * block_ts indicates the total number of data of width | ||
| 451 | * to be transferred in a DMA block transfer. | ||
| 452 | * BLOCK_TS register should be set to block_ts - 1 | ||
| 453 | */ | ||
| 454 | block_ts = xfer_len >> xfer_width; | ||
| 455 | if (block_ts > max_block_ts) { | ||
| 456 | block_ts = max_block_ts; | ||
| 457 | xfer_len = max_block_ts << xfer_width; | ||
| 458 | } | ||
| 459 | |||
| 460 | desc = axi_desc_get(chan); | ||
| 461 | if (unlikely(!desc)) | ||
| 462 | goto err_desc_get; | ||
| 463 | |||
| 464 | write_desc_sar(desc, src_adr); | ||
| 465 | write_desc_dar(desc, dst_adr); | ||
| 466 | desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1); | ||
| 467 | |||
| 468 | reg = CH_CTL_H_LLI_VALID; | ||
| 469 | if (chan->chip->dw->hdata->restrict_axi_burst_len) { | ||
| 470 | u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len; | ||
| 471 | |||
| 472 | reg |= (CH_CTL_H_ARLEN_EN | | ||
| 473 | burst_len << CH_CTL_H_ARLEN_POS | | ||
| 474 | CH_CTL_H_AWLEN_EN | | ||
| 475 | burst_len << CH_CTL_H_AWLEN_POS); | ||
| 476 | } | ||
| 477 | desc->lli.ctl_hi = cpu_to_le32(reg); | ||
| 478 | |||
| 479 | reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS | | ||
| 480 | DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS | | ||
| 481 | xfer_width << CH_CTL_L_DST_WIDTH_POS | | ||
| 482 | xfer_width << CH_CTL_L_SRC_WIDTH_POS | | ||
| 483 | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS | | ||
| 484 | DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS); | ||
| 485 | desc->lli.ctl_lo = cpu_to_le32(reg); | ||
| 486 | |||
| 487 | set_desc_src_master(desc); | ||
| 488 | set_desc_dest_master(desc); | ||
| 489 | |||
| 490 | /* Manage transfer list (xfer_list) */ | ||
| 491 | if (!first) { | ||
| 492 | first = desc; | ||
| 493 | } else { | ||
| 494 | list_add_tail(&desc->xfer_list, &first->xfer_list); | ||
| 495 | write_desc_llp(prev, desc->vd.tx.phys | lms); | ||
| 496 | } | ||
| 497 | prev = desc; | ||
| 498 | |||
| 499 | /* update the length and addresses for the next loop cycle */ | ||
| 500 | len -= xfer_len; | ||
| 501 | dst_adr += xfer_len; | ||
| 502 | src_adr += xfer_len; | ||
| 503 | } | ||
| 504 | |||
| 505 | /* Total len of src/dest sg == 0, so no descriptor were allocated */ | ||
| 506 | if (unlikely(!first)) | ||
| 507 | return NULL; | ||
| 508 | |||
| 509 | /* Set end-of-link to the last link descriptor of list */ | ||
| 510 | set_desc_last(desc); | ||
| 511 | |||
| 512 | return vchan_tx_prep(&chan->vc, &first->vd, flags); | ||
| 513 | |||
| 514 | err_desc_get: | ||
| 515 | axi_desc_put(first); | ||
| 516 | return NULL; | ||
| 517 | } | ||
| 518 | |||
| 519 | static void axi_chan_dump_lli(struct axi_dma_chan *chan, | ||
| 520 | struct axi_dma_desc *desc) | ||
| 521 | { | ||
| 522 | dev_err(dchan2dev(&chan->vc.chan), | ||
| 523 | "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x", | ||
| 524 | le64_to_cpu(desc->lli.sar), | ||
| 525 | le64_to_cpu(desc->lli.dar), | ||
| 526 | le64_to_cpu(desc->lli.llp), | ||
| 527 | le32_to_cpu(desc->lli.block_ts_lo), | ||
| 528 | le32_to_cpu(desc->lli.ctl_hi), | ||
| 529 | le32_to_cpu(desc->lli.ctl_lo)); | ||
| 530 | } | ||
| 531 | |||
| 532 | static void axi_chan_list_dump_lli(struct axi_dma_chan *chan, | ||
| 533 | struct axi_dma_desc *desc_head) | ||
| 534 | { | ||
| 535 | struct axi_dma_desc *desc; | ||
| 536 | |||
| 537 | axi_chan_dump_lli(chan, desc_head); | ||
| 538 | list_for_each_entry(desc, &desc_head->xfer_list, xfer_list) | ||
| 539 | axi_chan_dump_lli(chan, desc); | ||
| 540 | } | ||
| 541 | |||
| 542 | static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status) | ||
| 543 | { | ||
| 544 | struct virt_dma_desc *vd; | ||
| 545 | unsigned long flags; | ||
| 546 | |||
| 547 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 548 | |||
| 549 | axi_chan_disable(chan); | ||
| 550 | |||
| 551 | /* The bad descriptor currently is in the head of vc list */ | ||
| 552 | vd = vchan_next_desc(&chan->vc); | ||
| 553 | /* Remove the completed descriptor from issued list */ | ||
| 554 | list_del(&vd->node); | ||
| 555 | |||
| 556 | /* WARN about bad descriptor */ | ||
| 557 | dev_err(chan2dev(chan), | ||
| 558 | "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n", | ||
| 559 | axi_chan_name(chan), vd->tx.cookie, status); | ||
| 560 | axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd)); | ||
| 561 | |||
| 562 | vchan_cookie_complete(vd); | ||
| 563 | |||
| 564 | /* Try to restart the controller */ | ||
| 565 | axi_chan_start_first_queued(chan); | ||
| 566 | |||
| 567 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 568 | } | ||
| 569 | |||
| 570 | static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) | ||
| 571 | { | ||
| 572 | struct virt_dma_desc *vd; | ||
| 573 | unsigned long flags; | ||
| 574 | |||
| 575 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 576 | if (unlikely(axi_chan_is_hw_enable(chan))) { | ||
| 577 | dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n", | ||
| 578 | axi_chan_name(chan)); | ||
| 579 | axi_chan_disable(chan); | ||
| 580 | } | ||
| 581 | |||
| 582 | /* The completed descriptor currently is in the head of vc list */ | ||
| 583 | vd = vchan_next_desc(&chan->vc); | ||
| 584 | /* Remove the completed descriptor from issued list before completing */ | ||
| 585 | list_del(&vd->node); | ||
| 586 | vchan_cookie_complete(vd); | ||
| 587 | |||
| 588 | /* Submit queued descriptors after processing the completed ones */ | ||
| 589 | axi_chan_start_first_queued(chan); | ||
| 590 | |||
| 591 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 592 | } | ||
| 593 | |||
| 594 | static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) | ||
| 595 | { | ||
| 596 | struct axi_dma_chip *chip = dev_id; | ||
| 597 | struct dw_axi_dma *dw = chip->dw; | ||
| 598 | struct axi_dma_chan *chan; | ||
| 599 | |||
| 600 | u32 status, i; | ||
| 601 | |||
| 602 | /* Disable DMAC inerrupts. We'll enable them after processing chanels */ | ||
| 603 | axi_dma_irq_disable(chip); | ||
| 604 | |||
| 605 | /* Poll, clear and process every chanel interrupt status */ | ||
| 606 | for (i = 0; i < dw->hdata->nr_channels; i++) { | ||
| 607 | chan = &dw->chan[i]; | ||
| 608 | status = axi_chan_irq_read(chan); | ||
| 609 | axi_chan_irq_clear(chan, status); | ||
| 610 | |||
| 611 | dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n", | ||
| 612 | axi_chan_name(chan), i, status); | ||
| 613 | |||
| 614 | if (status & DWAXIDMAC_IRQ_ALL_ERR) | ||
| 615 | axi_chan_handle_err(chan, status); | ||
| 616 | else if (status & DWAXIDMAC_IRQ_DMA_TRF) | ||
| 617 | axi_chan_block_xfer_complete(chan); | ||
| 618 | } | ||
| 619 | |||
| 620 | /* Re-enable interrupts */ | ||
| 621 | axi_dma_irq_enable(chip); | ||
| 622 | |||
| 623 | return IRQ_HANDLED; | ||
| 624 | } | ||
| 625 | |||
| 626 | static int dma_chan_terminate_all(struct dma_chan *dchan) | ||
| 627 | { | ||
| 628 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 629 | unsigned long flags; | ||
| 630 | LIST_HEAD(head); | ||
| 631 | |||
| 632 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 633 | |||
| 634 | axi_chan_disable(chan); | ||
| 635 | |||
| 636 | vchan_get_all_descriptors(&chan->vc, &head); | ||
| 637 | |||
| 638 | /* | ||
| 639 | * As vchan_dma_desc_free_list can access to desc_allocated list | ||
| 640 | * we need to call it in vc.lock context. | ||
| 641 | */ | ||
| 642 | vchan_dma_desc_free_list(&chan->vc, &head); | ||
| 643 | |||
| 644 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 645 | |||
| 646 | dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); | ||
| 647 | |||
| 648 | return 0; | ||
| 649 | } | ||
| 650 | |||
| 651 | static int dma_chan_pause(struct dma_chan *dchan) | ||
| 652 | { | ||
| 653 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 654 | unsigned long flags; | ||
| 655 | unsigned int timeout = 20; /* timeout iterations */ | ||
| 656 | u32 val; | ||
| 657 | |||
| 658 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 659 | |||
| 660 | val = axi_dma_ioread32(chan->chip, DMAC_CHEN); | ||
| 661 | val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | | ||
| 662 | BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; | ||
| 663 | axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); | ||
| 664 | |||
| 665 | do { | ||
| 666 | if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) | ||
| 667 | break; | ||
| 668 | |||
| 669 | udelay(2); | ||
| 670 | } while (--timeout); | ||
| 671 | |||
| 672 | axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED); | ||
| 673 | |||
| 674 | chan->is_paused = true; | ||
| 675 | |||
| 676 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 677 | |||
| 678 | return timeout ? 0 : -EAGAIN; | ||
| 679 | } | ||
| 680 | |||
| 681 | /* Called in chan locked context */ | ||
| 682 | static inline void axi_chan_resume(struct axi_dma_chan *chan) | ||
| 683 | { | ||
| 684 | u32 val; | ||
| 685 | |||
| 686 | val = axi_dma_ioread32(chan->chip, DMAC_CHEN); | ||
| 687 | val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); | ||
| 688 | val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); | ||
| 689 | axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); | ||
| 690 | |||
| 691 | chan->is_paused = false; | ||
| 692 | } | ||
| 693 | |||
| 694 | static int dma_chan_resume(struct dma_chan *dchan) | ||
| 695 | { | ||
| 696 | struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan); | ||
| 697 | unsigned long flags; | ||
| 698 | |||
| 699 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 700 | |||
| 701 | if (chan->is_paused) | ||
| 702 | axi_chan_resume(chan); | ||
| 703 | |||
| 704 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 705 | |||
| 706 | return 0; | ||
| 707 | } | ||
| 708 | |||
| 709 | static int axi_dma_suspend(struct axi_dma_chip *chip) | ||
| 710 | { | ||
| 711 | axi_dma_irq_disable(chip); | ||
| 712 | axi_dma_disable(chip); | ||
| 713 | |||
| 714 | clk_disable_unprepare(chip->core_clk); | ||
| 715 | clk_disable_unprepare(chip->cfgr_clk); | ||
| 716 | |||
| 717 | return 0; | ||
| 718 | } | ||
| 719 | |||
| 720 | static int axi_dma_resume(struct axi_dma_chip *chip) | ||
| 721 | { | ||
| 722 | int ret; | ||
| 723 | |||
| 724 | ret = clk_prepare_enable(chip->cfgr_clk); | ||
| 725 | if (ret < 0) | ||
| 726 | return ret; | ||
| 727 | |||
| 728 | ret = clk_prepare_enable(chip->core_clk); | ||
| 729 | if (ret < 0) | ||
| 730 | return ret; | ||
| 731 | |||
| 732 | axi_dma_enable(chip); | ||
| 733 | axi_dma_irq_enable(chip); | ||
| 734 | |||
| 735 | return 0; | ||
| 736 | } | ||
| 737 | |||
| 738 | static int __maybe_unused axi_dma_runtime_suspend(struct device *dev) | ||
| 739 | { | ||
| 740 | struct axi_dma_chip *chip = dev_get_drvdata(dev); | ||
| 741 | |||
| 742 | return axi_dma_suspend(chip); | ||
| 743 | } | ||
| 744 | |||
| 745 | static int __maybe_unused axi_dma_runtime_resume(struct device *dev) | ||
| 746 | { | ||
| 747 | struct axi_dma_chip *chip = dev_get_drvdata(dev); | ||
| 748 | |||
| 749 | return axi_dma_resume(chip); | ||
| 750 | } | ||
| 751 | |||
| 752 | static int parse_device_properties(struct axi_dma_chip *chip) | ||
| 753 | { | ||
| 754 | struct device *dev = chip->dev; | ||
| 755 | u32 tmp, carr[DMAC_MAX_CHANNELS]; | ||
| 756 | int ret; | ||
| 757 | |||
| 758 | ret = device_property_read_u32(dev, "dma-channels", &tmp); | ||
| 759 | if (ret) | ||
| 760 | return ret; | ||
| 761 | if (tmp == 0 || tmp > DMAC_MAX_CHANNELS) | ||
| 762 | return -EINVAL; | ||
| 763 | |||
| 764 | chip->dw->hdata->nr_channels = tmp; | ||
| 765 | |||
| 766 | ret = device_property_read_u32(dev, "snps,dma-masters", &tmp); | ||
| 767 | if (ret) | ||
| 768 | return ret; | ||
| 769 | if (tmp == 0 || tmp > DMAC_MAX_MASTERS) | ||
| 770 | return -EINVAL; | ||
| 771 | |||
| 772 | chip->dw->hdata->nr_masters = tmp; | ||
| 773 | |||
| 774 | ret = device_property_read_u32(dev, "snps,data-width", &tmp); | ||
| 775 | if (ret) | ||
| 776 | return ret; | ||
| 777 | if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX) | ||
| 778 | return -EINVAL; | ||
| 779 | |||
| 780 | chip->dw->hdata->m_data_width = tmp; | ||
| 781 | |||
| 782 | ret = device_property_read_u32_array(dev, "snps,block-size", carr, | ||
| 783 | chip->dw->hdata->nr_channels); | ||
| 784 | if (ret) | ||
| 785 | return ret; | ||
| 786 | for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) { | ||
| 787 | if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE) | ||
| 788 | return -EINVAL; | ||
| 789 | |||
| 790 | chip->dw->hdata->block_size[tmp] = carr[tmp]; | ||
| 791 | } | ||
| 792 | |||
| 793 | ret = device_property_read_u32_array(dev, "snps,priority", carr, | ||
| 794 | chip->dw->hdata->nr_channels); | ||
| 795 | if (ret) | ||
| 796 | return ret; | ||
| 797 | /* Priority value must be programmed within [0:nr_channels-1] range */ | ||
| 798 | for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) { | ||
| 799 | if (carr[tmp] >= chip->dw->hdata->nr_channels) | ||
| 800 | return -EINVAL; | ||
| 801 | |||
| 802 | chip->dw->hdata->priority[tmp] = carr[tmp]; | ||
| 803 | } | ||
| 804 | |||
| 805 | /* axi-max-burst-len is optional property */ | ||
| 806 | ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp); | ||
| 807 | if (!ret) { | ||
| 808 | if (tmp > DWAXIDMAC_ARWLEN_MAX + 1) | ||
| 809 | return -EINVAL; | ||
| 810 | if (tmp < DWAXIDMAC_ARWLEN_MIN + 1) | ||
| 811 | return -EINVAL; | ||
| 812 | |||
| 813 | chip->dw->hdata->restrict_axi_burst_len = true; | ||
| 814 | chip->dw->hdata->axi_rw_burst_len = tmp - 1; | ||
| 815 | } | ||
| 816 | |||
| 817 | return 0; | ||
| 818 | } | ||
| 819 | |||
| 820 | static int dw_probe(struct platform_device *pdev) | ||
| 821 | { | ||
| 822 | struct axi_dma_chip *chip; | ||
| 823 | struct resource *mem; | ||
| 824 | struct dw_axi_dma *dw; | ||
| 825 | struct dw_axi_dma_hcfg *hdata; | ||
| 826 | u32 i; | ||
| 827 | int ret; | ||
| 828 | |||
| 829 | chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); | ||
| 830 | if (!chip) | ||
| 831 | return -ENOMEM; | ||
| 832 | |||
| 833 | dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL); | ||
| 834 | if (!dw) | ||
| 835 | return -ENOMEM; | ||
| 836 | |||
| 837 | hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL); | ||
| 838 | if (!hdata) | ||
| 839 | return -ENOMEM; | ||
| 840 | |||
| 841 | chip->dw = dw; | ||
| 842 | chip->dev = &pdev->dev; | ||
| 843 | chip->dw->hdata = hdata; | ||
| 844 | |||
| 845 | chip->irq = platform_get_irq(pdev, 0); | ||
| 846 | if (chip->irq < 0) | ||
| 847 | return chip->irq; | ||
| 848 | |||
| 849 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 850 | chip->regs = devm_ioremap_resource(chip->dev, mem); | ||
| 851 | if (IS_ERR(chip->regs)) | ||
| 852 | return PTR_ERR(chip->regs); | ||
| 853 | |||
| 854 | chip->core_clk = devm_clk_get(chip->dev, "core-clk"); | ||
| 855 | if (IS_ERR(chip->core_clk)) | ||
| 856 | return PTR_ERR(chip->core_clk); | ||
| 857 | |||
| 858 | chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk"); | ||
| 859 | if (IS_ERR(chip->cfgr_clk)) | ||
| 860 | return PTR_ERR(chip->cfgr_clk); | ||
| 861 | |||
| 862 | ret = parse_device_properties(chip); | ||
| 863 | if (ret) | ||
| 864 | return ret; | ||
| 865 | |||
| 866 | dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels, | ||
| 867 | sizeof(*dw->chan), GFP_KERNEL); | ||
| 868 | if (!dw->chan) | ||
| 869 | return -ENOMEM; | ||
| 870 | |||
| 871 | ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt, | ||
| 872 | IRQF_SHARED, KBUILD_MODNAME, chip); | ||
| 873 | if (ret) | ||
| 874 | return ret; | ||
| 875 | |||
| 876 | /* Lli address must be aligned to a 64-byte boundary */ | ||
| 877 | dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev, | ||
| 878 | sizeof(struct axi_dma_desc), 64, 0); | ||
| 879 | if (!dw->desc_pool) { | ||
| 880 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); | ||
| 881 | return -ENOMEM; | ||
| 882 | } | ||
| 883 | |||
| 884 | INIT_LIST_HEAD(&dw->dma.channels); | ||
| 885 | for (i = 0; i < hdata->nr_channels; i++) { | ||
| 886 | struct axi_dma_chan *chan = &dw->chan[i]; | ||
| 887 | |||
| 888 | chan->chip = chip; | ||
| 889 | chan->id = i; | ||
| 890 | chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN; | ||
| 891 | atomic_set(&chan->descs_allocated, 0); | ||
| 892 | |||
| 893 | chan->vc.desc_free = vchan_desc_put; | ||
| 894 | vchan_init(&chan->vc, &dw->dma); | ||
| 895 | } | ||
| 896 | |||
| 897 | /* Set capabilities */ | ||
| 898 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | ||
| 899 | |||
| 900 | /* DMA capabilities */ | ||
| 901 | dw->dma.chancnt = hdata->nr_channels; | ||
| 902 | dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS; | ||
| 903 | dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS; | ||
| 904 | dw->dma.directions = BIT(DMA_MEM_TO_MEM); | ||
| 905 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
| 906 | |||
| 907 | dw->dma.dev = chip->dev; | ||
| 908 | dw->dma.device_tx_status = dma_chan_tx_status; | ||
| 909 | dw->dma.device_issue_pending = dma_chan_issue_pending; | ||
| 910 | dw->dma.device_terminate_all = dma_chan_terminate_all; | ||
| 911 | dw->dma.device_pause = dma_chan_pause; | ||
| 912 | dw->dma.device_resume = dma_chan_resume; | ||
| 913 | |||
| 914 | dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources; | ||
| 915 | dw->dma.device_free_chan_resources = dma_chan_free_chan_resources; | ||
| 916 | |||
| 917 | dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy; | ||
| 918 | |||
| 919 | platform_set_drvdata(pdev, chip); | ||
| 920 | |||
| 921 | pm_runtime_enable(chip->dev); | ||
| 922 | |||
| 923 | /* | ||
| 924 | * We can't just call pm_runtime_get here instead of | ||
| 925 | * pm_runtime_get_noresume + axi_dma_resume because we need | ||
| 926 | * driver to work also without Runtime PM. | ||
| 927 | */ | ||
| 928 | pm_runtime_get_noresume(chip->dev); | ||
| 929 | ret = axi_dma_resume(chip); | ||
| 930 | if (ret < 0) | ||
| 931 | goto err_pm_disable; | ||
| 932 | |||
| 933 | axi_dma_hw_init(chip); | ||
| 934 | |||
| 935 | pm_runtime_put(chip->dev); | ||
| 936 | |||
| 937 | ret = dma_async_device_register(&dw->dma); | ||
| 938 | if (ret) | ||
| 939 | goto err_pm_disable; | ||
| 940 | |||
| 941 | dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n", | ||
| 942 | dw->hdata->nr_channels); | ||
| 943 | |||
| 944 | return 0; | ||
| 945 | |||
| 946 | err_pm_disable: | ||
| 947 | pm_runtime_disable(chip->dev); | ||
| 948 | |||
| 949 | return ret; | ||
| 950 | } | ||
| 951 | |||
| 952 | static int dw_remove(struct platform_device *pdev) | ||
| 953 | { | ||
| 954 | struct axi_dma_chip *chip = platform_get_drvdata(pdev); | ||
| 955 | struct dw_axi_dma *dw = chip->dw; | ||
| 956 | struct axi_dma_chan *chan, *_chan; | ||
| 957 | u32 i; | ||
| 958 | |||
| 959 | /* Enable clk before accessing to registers */ | ||
| 960 | clk_prepare_enable(chip->cfgr_clk); | ||
| 961 | clk_prepare_enable(chip->core_clk); | ||
| 962 | axi_dma_irq_disable(chip); | ||
| 963 | for (i = 0; i < dw->hdata->nr_channels; i++) { | ||
| 964 | axi_chan_disable(&chip->dw->chan[i]); | ||
| 965 | axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL); | ||
| 966 | } | ||
| 967 | axi_dma_disable(chip); | ||
| 968 | |||
| 969 | pm_runtime_disable(chip->dev); | ||
| 970 | axi_dma_suspend(chip); | ||
| 971 | |||
| 972 | devm_free_irq(chip->dev, chip->irq, chip); | ||
| 973 | |||
| 974 | list_for_each_entry_safe(chan, _chan, &dw->dma.channels, | ||
| 975 | vc.chan.device_node) { | ||
| 976 | list_del(&chan->vc.chan.device_node); | ||
| 977 | tasklet_kill(&chan->vc.task); | ||
| 978 | } | ||
| 979 | |||
| 980 | dma_async_device_unregister(&dw->dma); | ||
| 981 | |||
| 982 | return 0; | ||
| 983 | } | ||
| 984 | |||
| 985 | static const struct dev_pm_ops dw_axi_dma_pm_ops = { | ||
| 986 | SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL) | ||
| 987 | }; | ||
| 988 | |||
| 989 | static const struct of_device_id dw_dma_of_id_table[] = { | ||
| 990 | { .compatible = "snps,axi-dma-1.01a" }, | ||
| 991 | {} | ||
| 992 | }; | ||
| 993 | MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); | ||
| 994 | |||
| 995 | static struct platform_driver dw_driver = { | ||
| 996 | .probe = dw_probe, | ||
| 997 | .remove = dw_remove, | ||
| 998 | .driver = { | ||
| 999 | .name = KBUILD_MODNAME, | ||
| 1000 | .of_match_table = of_match_ptr(dw_dma_of_id_table), | ||
| 1001 | .pm = &dw_axi_dma_pm_ops, | ||
| 1002 | }, | ||
| 1003 | }; | ||
| 1004 | module_platform_driver(dw_driver); | ||
| 1005 | |||
| 1006 | MODULE_LICENSE("GPL v2"); | ||
| 1007 | MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver"); | ||
| 1008 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); | ||
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h new file mode 100644 index 000000000000..f8888dc0b8dc --- /dev/null +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h | |||
| @@ -0,0 +1,334 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com) | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Synopsys DesignWare AXI DMA Controller driver. | ||
| 6 | * | ||
| 7 | * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef _AXI_DMA_PLATFORM_H | ||
| 11 | #define _AXI_DMA_PLATFORM_H | ||
| 12 | |||
| 13 | #include <linux/bitops.h> | ||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/device.h> | ||
| 16 | #include <linux/dmaengine.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | |||
| 19 | #include "../virt-dma.h" | ||
| 20 | |||
| 21 | #define DMAC_MAX_CHANNELS 8 | ||
| 22 | #define DMAC_MAX_MASTERS 2 | ||
| 23 | #define DMAC_MAX_BLK_SIZE 0x200000 | ||
| 24 | |||
| 25 | struct dw_axi_dma_hcfg { | ||
| 26 | u32 nr_channels; | ||
| 27 | u32 nr_masters; | ||
| 28 | u32 m_data_width; | ||
| 29 | u32 block_size[DMAC_MAX_CHANNELS]; | ||
| 30 | u32 priority[DMAC_MAX_CHANNELS]; | ||
| 31 | /* maximum supported axi burst length */ | ||
| 32 | u32 axi_rw_burst_len; | ||
| 33 | bool restrict_axi_burst_len; | ||
| 34 | }; | ||
| 35 | |||
| 36 | struct axi_dma_chan { | ||
| 37 | struct axi_dma_chip *chip; | ||
| 38 | void __iomem *chan_regs; | ||
| 39 | u8 id; | ||
| 40 | atomic_t descs_allocated; | ||
| 41 | |||
| 42 | struct virt_dma_chan vc; | ||
| 43 | |||
| 44 | /* these other elements are all protected by vc.lock */ | ||
| 45 | bool is_paused; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct dw_axi_dma { | ||
| 49 | struct dma_device dma; | ||
| 50 | struct dw_axi_dma_hcfg *hdata; | ||
| 51 | struct dma_pool *desc_pool; | ||
| 52 | |||
| 53 | /* channels */ | ||
| 54 | struct axi_dma_chan *chan; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct axi_dma_chip { | ||
| 58 | struct device *dev; | ||
| 59 | int irq; | ||
| 60 | void __iomem *regs; | ||
| 61 | struct clk *core_clk; | ||
| 62 | struct clk *cfgr_clk; | ||
| 63 | struct dw_axi_dma *dw; | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* LLI == Linked List Item */ | ||
| 67 | struct __packed axi_dma_lli { | ||
| 68 | __le64 sar; | ||
| 69 | __le64 dar; | ||
| 70 | __le32 block_ts_lo; | ||
| 71 | __le32 block_ts_hi; | ||
| 72 | __le64 llp; | ||
| 73 | __le32 ctl_lo; | ||
| 74 | __le32 ctl_hi; | ||
| 75 | __le32 sstat; | ||
| 76 | __le32 dstat; | ||
| 77 | __le32 status_lo; | ||
| 78 | __le32 ststus_hi; | ||
| 79 | __le32 reserved_lo; | ||
| 80 | __le32 reserved_hi; | ||
| 81 | }; | ||
| 82 | |||
| 83 | struct axi_dma_desc { | ||
| 84 | struct axi_dma_lli lli; | ||
| 85 | |||
| 86 | struct virt_dma_desc vd; | ||
| 87 | struct axi_dma_chan *chan; | ||
| 88 | struct list_head xfer_list; | ||
| 89 | }; | ||
| 90 | |||
| 91 | static inline struct device *dchan2dev(struct dma_chan *dchan) | ||
| 92 | { | ||
| 93 | return &dchan->dev->device; | ||
| 94 | } | ||
| 95 | |||
| 96 | static inline struct device *chan2dev(struct axi_dma_chan *chan) | ||
| 97 | { | ||
| 98 | return &chan->vc.chan.dev->device; | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline struct axi_dma_desc *vd_to_axi_desc(struct virt_dma_desc *vd) | ||
| 102 | { | ||
| 103 | return container_of(vd, struct axi_dma_desc, vd); | ||
| 104 | } | ||
| 105 | |||
| 106 | static inline struct axi_dma_chan *vc_to_axi_dma_chan(struct virt_dma_chan *vc) | ||
| 107 | { | ||
| 108 | return container_of(vc, struct axi_dma_chan, vc); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) | ||
| 112 | { | ||
| 113 | return vc_to_axi_dma_chan(to_virt_chan(dchan)); | ||
| 114 | } | ||
| 115 | |||
| 116 | |||
| 117 | #define COMMON_REG_LEN 0x100 | ||
| 118 | #define CHAN_REG_LEN 0x100 | ||
| 119 | |||
| 120 | /* Common registers offset */ | ||
| 121 | #define DMAC_ID 0x000 /* R DMAC ID */ | ||
| 122 | #define DMAC_COMPVER 0x008 /* R DMAC Component Version */ | ||
| 123 | #define DMAC_CFG 0x010 /* R/W DMAC Configuration */ | ||
| 124 | #define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */ | ||
| 125 | #define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */ | ||
| 126 | #define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */ | ||
| 127 | #define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */ | ||
| 128 | #define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */ | ||
| 129 | #define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */ | ||
| 130 | #define DMAC_COMMON_INTSIGNAL_ENA 0x048 /* R/W DMAC Interrupt Signal Enable */ | ||
| 131 | #define DMAC_COMMON_INTSTATUS 0x050 /* R DMAC Interrupt Status */ | ||
| 132 | #define DMAC_RESET 0x058 /* R DMAC Reset Register1 */ | ||
| 133 | |||
| 134 | /* DMA channel registers offset */ | ||
| 135 | #define CH_SAR 0x000 /* R/W Chan Source Address */ | ||
| 136 | #define CH_DAR 0x008 /* R/W Chan Destination Address */ | ||
| 137 | #define CH_BLOCK_TS 0x010 /* R/W Chan Block Transfer Size */ | ||
| 138 | #define CH_CTL 0x018 /* R/W Chan Control */ | ||
| 139 | #define CH_CTL_L 0x018 /* R/W Chan Control 00-31 */ | ||
| 140 | #define CH_CTL_H 0x01C /* R/W Chan Control 32-63 */ | ||
| 141 | #define CH_CFG 0x020 /* R/W Chan Configuration */ | ||
| 142 | #define CH_CFG_L 0x020 /* R/W Chan Configuration 00-31 */ | ||
| 143 | #define CH_CFG_H 0x024 /* R/W Chan Configuration 32-63 */ | ||
| 144 | #define CH_LLP 0x028 /* R/W Chan Linked List Pointer */ | ||
| 145 | #define CH_STATUS 0x030 /* R Chan Status */ | ||
| 146 | #define CH_SWHSSRC 0x038 /* R/W Chan SW Handshake Source */ | ||
| 147 | #define CH_SWHSDST 0x040 /* R/W Chan SW Handshake Destination */ | ||
| 148 | #define CH_BLK_TFR_RESUMEREQ 0x048 /* W Chan Block Transfer Resume Req */ | ||
| 149 | #define CH_AXI_ID 0x050 /* R/W Chan AXI ID */ | ||
| 150 | #define CH_AXI_QOS 0x058 /* R/W Chan AXI QOS */ | ||
| 151 | #define CH_SSTAT 0x060 /* R Chan Source Status */ | ||
| 152 | #define CH_DSTAT 0x068 /* R Chan Destination Status */ | ||
| 153 | #define CH_SSTATAR 0x070 /* R/W Chan Source Status Fetch Addr */ | ||
| 154 | #define CH_DSTATAR 0x078 /* R/W Chan Destination Status Fetch Addr */ | ||
| 155 | #define CH_INTSTATUS_ENA 0x080 /* R/W Chan Interrupt Status Enable */ | ||
| 156 | #define CH_INTSTATUS 0x088 /* R/W Chan Interrupt Status */ | ||
| 157 | #define CH_INTSIGNAL_ENA 0x090 /* R/W Chan Interrupt Signal Enable */ | ||
| 158 | #define CH_INTCLEAR 0x098 /* W Chan Interrupt Clear */ | ||
| 159 | |||
| 160 | |||
| 161 | /* DMAC_CFG */ | ||
| 162 | #define DMAC_EN_POS 0 | ||
| 163 | #define DMAC_EN_MASK BIT(DMAC_EN_POS) | ||
| 164 | |||
| 165 | #define INT_EN_POS 1 | ||
| 166 | #define INT_EN_MASK BIT(INT_EN_POS) | ||
| 167 | |||
| 168 | #define DMAC_CHAN_EN_SHIFT 0 | ||
| 169 | #define DMAC_CHAN_EN_WE_SHIFT 8 | ||
| 170 | |||
| 171 | #define DMAC_CHAN_SUSP_SHIFT 16 | ||
| 172 | #define DMAC_CHAN_SUSP_WE_SHIFT 24 | ||
| 173 | |||
| 174 | /* CH_CTL_H */ | ||
| 175 | #define CH_CTL_H_ARLEN_EN BIT(6) | ||
| 176 | #define CH_CTL_H_ARLEN_POS 7 | ||
| 177 | #define CH_CTL_H_AWLEN_EN BIT(15) | ||
| 178 | #define CH_CTL_H_AWLEN_POS 16 | ||
| 179 | |||
| 180 | enum { | ||
| 181 | DWAXIDMAC_ARWLEN_1 = 0, | ||
| 182 | DWAXIDMAC_ARWLEN_2 = 1, | ||
| 183 | DWAXIDMAC_ARWLEN_4 = 3, | ||
| 184 | DWAXIDMAC_ARWLEN_8 = 7, | ||
| 185 | DWAXIDMAC_ARWLEN_16 = 15, | ||
| 186 | DWAXIDMAC_ARWLEN_32 = 31, | ||
| 187 | DWAXIDMAC_ARWLEN_64 = 63, | ||
| 188 | DWAXIDMAC_ARWLEN_128 = 127, | ||
| 189 | DWAXIDMAC_ARWLEN_256 = 255, | ||
| 190 | DWAXIDMAC_ARWLEN_MIN = DWAXIDMAC_ARWLEN_1, | ||
| 191 | DWAXIDMAC_ARWLEN_MAX = DWAXIDMAC_ARWLEN_256 | ||
| 192 | }; | ||
| 193 | |||
| 194 | #define CH_CTL_H_LLI_LAST BIT(30) | ||
| 195 | #define CH_CTL_H_LLI_VALID BIT(31) | ||
| 196 | |||
| 197 | /* CH_CTL_L */ | ||
| 198 | #define CH_CTL_L_LAST_WRITE_EN BIT(30) | ||
| 199 | |||
| 200 | #define CH_CTL_L_DST_MSIZE_POS 18 | ||
| 201 | #define CH_CTL_L_SRC_MSIZE_POS 14 | ||
| 202 | |||
| 203 | enum { | ||
| 204 | DWAXIDMAC_BURST_TRANS_LEN_1 = 0, | ||
| 205 | DWAXIDMAC_BURST_TRANS_LEN_4, | ||
| 206 | DWAXIDMAC_BURST_TRANS_LEN_8, | ||
| 207 | DWAXIDMAC_BURST_TRANS_LEN_16, | ||
| 208 | DWAXIDMAC_BURST_TRANS_LEN_32, | ||
| 209 | DWAXIDMAC_BURST_TRANS_LEN_64, | ||
| 210 | DWAXIDMAC_BURST_TRANS_LEN_128, | ||
| 211 | DWAXIDMAC_BURST_TRANS_LEN_256, | ||
| 212 | DWAXIDMAC_BURST_TRANS_LEN_512, | ||
| 213 | DWAXIDMAC_BURST_TRANS_LEN_1024 | ||
| 214 | }; | ||
| 215 | |||
| 216 | #define CH_CTL_L_DST_WIDTH_POS 11 | ||
| 217 | #define CH_CTL_L_SRC_WIDTH_POS 8 | ||
| 218 | |||
| 219 | #define CH_CTL_L_DST_INC_POS 6 | ||
| 220 | #define CH_CTL_L_SRC_INC_POS 4 | ||
| 221 | enum { | ||
| 222 | DWAXIDMAC_CH_CTL_L_INC = 0, | ||
| 223 | DWAXIDMAC_CH_CTL_L_NOINC | ||
| 224 | }; | ||
| 225 | |||
| 226 | #define CH_CTL_L_DST_MAST BIT(2) | ||
| 227 | #define CH_CTL_L_SRC_MAST BIT(0) | ||
| 228 | |||
| 229 | /* CH_CFG_H */ | ||
| 230 | #define CH_CFG_H_PRIORITY_POS 17 | ||
| 231 | #define CH_CFG_H_HS_SEL_DST_POS 4 | ||
| 232 | #define CH_CFG_H_HS_SEL_SRC_POS 3 | ||
| 233 | enum { | ||
| 234 | DWAXIDMAC_HS_SEL_HW = 0, | ||
| 235 | DWAXIDMAC_HS_SEL_SW | ||
| 236 | }; | ||
| 237 | |||
| 238 | #define CH_CFG_H_TT_FC_POS 0 | ||
| 239 | enum { | ||
| 240 | DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC = 0, | ||
| 241 | DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC, | ||
| 242 | DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC, | ||
| 243 | DWAXIDMAC_TT_FC_PER_TO_PER_DMAC, | ||
| 244 | DWAXIDMAC_TT_FC_PER_TO_MEM_SRC, | ||
| 245 | DWAXIDMAC_TT_FC_PER_TO_PER_SRC, | ||
| 246 | DWAXIDMAC_TT_FC_MEM_TO_PER_DST, | ||
| 247 | DWAXIDMAC_TT_FC_PER_TO_PER_DST | ||
| 248 | }; | ||
| 249 | |||
| 250 | /* CH_CFG_L */ | ||
| 251 | #define CH_CFG_L_DST_MULTBLK_TYPE_POS 2 | ||
| 252 | #define CH_CFG_L_SRC_MULTBLK_TYPE_POS 0 | ||
| 253 | enum { | ||
| 254 | DWAXIDMAC_MBLK_TYPE_CONTIGUOUS = 0, | ||
| 255 | DWAXIDMAC_MBLK_TYPE_RELOAD, | ||
| 256 | DWAXIDMAC_MBLK_TYPE_SHADOW_REG, | ||
| 257 | DWAXIDMAC_MBLK_TYPE_LL | ||
| 258 | }; | ||
| 259 | |||
| 260 | /** | ||
| 261 | * DW AXI DMA channel interrupts | ||
| 262 | * | ||
| 263 | * @DWAXIDMAC_IRQ_NONE: Bitmask of no one interrupt | ||
| 264 | * @DWAXIDMAC_IRQ_BLOCK_TRF: Block transfer complete | ||
| 265 | * @DWAXIDMAC_IRQ_DMA_TRF: Dma transfer complete | ||
| 266 | * @DWAXIDMAC_IRQ_SRC_TRAN: Source transaction complete | ||
| 267 | * @DWAXIDMAC_IRQ_DST_TRAN: Destination transaction complete | ||
| 268 | * @DWAXIDMAC_IRQ_SRC_DEC_ERR: Source decode error | ||
| 269 | * @DWAXIDMAC_IRQ_DST_DEC_ERR: Destination decode error | ||
| 270 | * @DWAXIDMAC_IRQ_SRC_SLV_ERR: Source slave error | ||
| 271 | * @DWAXIDMAC_IRQ_DST_SLV_ERR: Destination slave error | ||
| 272 | * @DWAXIDMAC_IRQ_LLI_RD_DEC_ERR: LLI read decode error | ||
| 273 | * @DWAXIDMAC_IRQ_LLI_WR_DEC_ERR: LLI write decode error | ||
| 274 | * @DWAXIDMAC_IRQ_LLI_RD_SLV_ERR: LLI read slave error | ||
| 275 | * @DWAXIDMAC_IRQ_LLI_WR_SLV_ERR: LLI write slave error | ||
| 276 | * @DWAXIDMAC_IRQ_INVALID_ERR: LLI invalid error or Shadow register error | ||
| 277 | * @DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR: Slave Interface Multiblock type error | ||
| 278 | * @DWAXIDMAC_IRQ_DEC_ERR: Slave Interface decode error | ||
| 279 | * @DWAXIDMAC_IRQ_WR2RO_ERR: Slave Interface write to read only error | ||
| 280 | * @DWAXIDMAC_IRQ_RD2RWO_ERR: Slave Interface read to write only error | ||
| 281 | * @DWAXIDMAC_IRQ_WRONCHEN_ERR: Slave Interface write to channel error | ||
| 282 | * @DWAXIDMAC_IRQ_SHADOWREG_ERR: Slave Interface shadow reg error | ||
| 283 | * @DWAXIDMAC_IRQ_WRONHOLD_ERR: Slave Interface hold error | ||
| 284 | * @DWAXIDMAC_IRQ_LOCK_CLEARED: Lock Cleared Status | ||
| 285 | * @DWAXIDMAC_IRQ_SRC_SUSPENDED: Source Suspended Status | ||
| 286 | * @DWAXIDMAC_IRQ_SUSPENDED: Channel Suspended Status | ||
| 287 | * @DWAXIDMAC_IRQ_DISABLED: Channel Disabled Status | ||
| 288 | * @DWAXIDMAC_IRQ_ABORTED: Channel Aborted Status | ||
| 289 | * @DWAXIDMAC_IRQ_ALL_ERR: Bitmask of all error interrupts | ||
| 290 | * @DWAXIDMAC_IRQ_ALL: Bitmask of all interrupts | ||
| 291 | */ | ||
| 292 | enum { | ||
| 293 | DWAXIDMAC_IRQ_NONE = 0, | ||
| 294 | DWAXIDMAC_IRQ_BLOCK_TRF = BIT(0), | ||
| 295 | DWAXIDMAC_IRQ_DMA_TRF = BIT(1), | ||
| 296 | DWAXIDMAC_IRQ_SRC_TRAN = BIT(3), | ||
| 297 | DWAXIDMAC_IRQ_DST_TRAN = BIT(4), | ||
| 298 | DWAXIDMAC_IRQ_SRC_DEC_ERR = BIT(5), | ||
| 299 | DWAXIDMAC_IRQ_DST_DEC_ERR = BIT(6), | ||
| 300 | DWAXIDMAC_IRQ_SRC_SLV_ERR = BIT(7), | ||
| 301 | DWAXIDMAC_IRQ_DST_SLV_ERR = BIT(8), | ||
| 302 | DWAXIDMAC_IRQ_LLI_RD_DEC_ERR = BIT(9), | ||
| 303 | DWAXIDMAC_IRQ_LLI_WR_DEC_ERR = BIT(10), | ||
| 304 | DWAXIDMAC_IRQ_LLI_RD_SLV_ERR = BIT(11), | ||
| 305 | DWAXIDMAC_IRQ_LLI_WR_SLV_ERR = BIT(12), | ||
| 306 | DWAXIDMAC_IRQ_INVALID_ERR = BIT(13), | ||
| 307 | DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR = BIT(14), | ||
| 308 | DWAXIDMAC_IRQ_DEC_ERR = BIT(16), | ||
| 309 | DWAXIDMAC_IRQ_WR2RO_ERR = BIT(17), | ||
| 310 | DWAXIDMAC_IRQ_RD2RWO_ERR = BIT(18), | ||
| 311 | DWAXIDMAC_IRQ_WRONCHEN_ERR = BIT(19), | ||
| 312 | DWAXIDMAC_IRQ_SHADOWREG_ERR = BIT(20), | ||
| 313 | DWAXIDMAC_IRQ_WRONHOLD_ERR = BIT(21), | ||
| 314 | DWAXIDMAC_IRQ_LOCK_CLEARED = BIT(27), | ||
| 315 | DWAXIDMAC_IRQ_SRC_SUSPENDED = BIT(28), | ||
| 316 | DWAXIDMAC_IRQ_SUSPENDED = BIT(29), | ||
| 317 | DWAXIDMAC_IRQ_DISABLED = BIT(30), | ||
| 318 | DWAXIDMAC_IRQ_ABORTED = BIT(31), | ||
| 319 | DWAXIDMAC_IRQ_ALL_ERR = (GENMASK(21, 16) | GENMASK(14, 5)), | ||
| 320 | DWAXIDMAC_IRQ_ALL = GENMASK(31, 0) | ||
| 321 | }; | ||
| 322 | |||
| 323 | enum { | ||
| 324 | DWAXIDMAC_TRANS_WIDTH_8 = 0, | ||
| 325 | DWAXIDMAC_TRANS_WIDTH_16, | ||
| 326 | DWAXIDMAC_TRANS_WIDTH_32, | ||
| 327 | DWAXIDMAC_TRANS_WIDTH_64, | ||
| 328 | DWAXIDMAC_TRANS_WIDTH_128, | ||
| 329 | DWAXIDMAC_TRANS_WIDTH_256, | ||
| 330 | DWAXIDMAC_TRANS_WIDTH_512, | ||
| 331 | DWAXIDMAC_TRANS_WIDTH_MAX = DWAXIDMAC_TRANS_WIDTH_512 | ||
| 332 | }; | ||
| 333 | |||
| 334 | #endif /* _AXI_DMA_PLATFORM_H */ | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 948df1ab5f1a..85ea92fcea54 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -1876,6 +1876,11 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | |||
| 1876 | 1876 | ||
| 1877 | if (memcpy_channels) { | 1877 | if (memcpy_channels) { |
| 1878 | m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); | 1878 | m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); |
| 1879 | if (!m_ddev) { | ||
| 1880 | dev_warn(ecc->dev, "memcpy is disabled due to OoM\n"); | ||
| 1881 | memcpy_channels = NULL; | ||
| 1882 | goto ch_setup; | ||
| 1883 | } | ||
| 1879 | ecc->dma_memcpy = m_ddev; | 1884 | ecc->dma_memcpy = m_ddev; |
| 1880 | 1885 | ||
| 1881 | dma_cap_zero(m_ddev->cap_mask); | 1886 | dma_cap_zero(m_ddev->cap_mask); |
| @@ -1903,6 +1908,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | |||
| 1903 | dev_info(ecc->dev, "memcpy is disabled\n"); | 1908 | dev_info(ecc->dev, "memcpy is disabled\n"); |
| 1904 | } | 1909 | } |
| 1905 | 1910 | ||
| 1911 | ch_setup: | ||
| 1906 | for (i = 0; i < ecc->num_channels; i++) { | 1912 | for (i = 0; i < ecc->num_channels; i++) { |
| 1907 | struct edma_chan *echan = &ecc->slave_chans[i]; | 1913 | struct edma_chan *echan = &ecc->slave_chans[i]; |
| 1908 | echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); | 1914 | echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index e7db24c67030..ccd03c3cedfe 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -338,6 +338,7 @@ struct sdma_channel { | |||
| 338 | unsigned int chn_real_count; | 338 | unsigned int chn_real_count; |
| 339 | struct tasklet_struct tasklet; | 339 | struct tasklet_struct tasklet; |
| 340 | struct imx_dma_data data; | 340 | struct imx_dma_data data; |
| 341 | bool enabled; | ||
| 341 | }; | 342 | }; |
| 342 | 343 | ||
| 343 | #define IMX_DMA_SG_LOOP BIT(0) | 344 | #define IMX_DMA_SG_LOOP BIT(0) |
| @@ -596,7 +597,14 @@ static int sdma_config_ownership(struct sdma_channel *sdmac, | |||
| 596 | 597 | ||
| 597 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | 598 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) |
| 598 | { | 599 | { |
| 600 | unsigned long flags; | ||
| 601 | struct sdma_channel *sdmac = &sdma->channel[channel]; | ||
| 602 | |||
| 599 | writel(BIT(channel), sdma->regs + SDMA_H_START); | 603 | writel(BIT(channel), sdma->regs + SDMA_H_START); |
| 604 | |||
| 605 | spin_lock_irqsave(&sdmac->lock, flags); | ||
| 606 | sdmac->enabled = true; | ||
| 607 | spin_unlock_irqrestore(&sdmac->lock, flags); | ||
| 600 | } | 608 | } |
| 601 | 609 | ||
| 602 | /* | 610 | /* |
| @@ -685,6 +693,14 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac) | |||
| 685 | struct sdma_buffer_descriptor *bd; | 693 | struct sdma_buffer_descriptor *bd; |
| 686 | int error = 0; | 694 | int error = 0; |
| 687 | enum dma_status old_status = sdmac->status; | 695 | enum dma_status old_status = sdmac->status; |
| 696 | unsigned long flags; | ||
| 697 | |||
| 698 | spin_lock_irqsave(&sdmac->lock, flags); | ||
| 699 | if (!sdmac->enabled) { | ||
| 700 | spin_unlock_irqrestore(&sdmac->lock, flags); | ||
| 701 | return; | ||
| 702 | } | ||
| 703 | spin_unlock_irqrestore(&sdmac->lock, flags); | ||
| 688 | 704 | ||
| 689 | /* | 705 | /* |
| 690 | * loop mode. Iterate over descriptors, re-setup them and | 706 | * loop mode. Iterate over descriptors, re-setup them and |
| @@ -938,10 +954,15 @@ static int sdma_disable_channel(struct dma_chan *chan) | |||
| 938 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 954 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
| 939 | struct sdma_engine *sdma = sdmac->sdma; | 955 | struct sdma_engine *sdma = sdmac->sdma; |
| 940 | int channel = sdmac->channel; | 956 | int channel = sdmac->channel; |
| 957 | unsigned long flags; | ||
| 941 | 958 | ||
| 942 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); | 959 | writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); |
| 943 | sdmac->status = DMA_ERROR; | 960 | sdmac->status = DMA_ERROR; |
| 944 | 961 | ||
| 962 | spin_lock_irqsave(&sdmac->lock, flags); | ||
| 963 | sdmac->enabled = false; | ||
| 964 | spin_unlock_irqrestore(&sdmac->lock, flags); | ||
| 965 | |||
| 945 | return 0; | 966 | return 0; |
| 946 | } | 967 | } |
| 947 | 968 | ||
diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig new file mode 100644 index 000000000000..27bac0bba09e --- /dev/null +++ b/drivers/dma/mediatek/Kconfig | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | |||
| 2 | config MTK_HSDMA | ||
| 3 | tristate "MediaTek High-Speed DMA controller support" | ||
| 4 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 5 | select DMA_ENGINE | ||
| 6 | select DMA_VIRTUAL_CHANNELS | ||
| 7 | ---help--- | ||
| 8 | Enable support for High-Speed DMA controller on MediaTek | ||
| 9 | SoCs. | ||
| 10 | |||
| 11 | This controller provides the channels which is dedicated to | ||
| 12 | memory-to-memory transfer to offload from CPU through ring- | ||
| 13 | based descriptor management. | ||
diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile new file mode 100644 index 000000000000..6e778f842f01 --- /dev/null +++ b/drivers/dma/mediatek/Makefile | |||
| @@ -0,0 +1 @@ | |||
| obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o | |||
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c new file mode 100644 index 000000000000..b7ec56ae02a6 --- /dev/null +++ b/drivers/dma/mediatek/mtk-hsdma.c | |||
| @@ -0,0 +1,1056 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (c) 2017-2018 MediaTek Inc. | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Driver for MediaTek High-Speed DMA Controller | ||
| 6 | * | ||
| 7 | * Author: Sean Wang <sean.wang@mediatek.com> | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/bitops.h> | ||
| 12 | #include <linux/clk.h> | ||
| 13 | #include <linux/dmaengine.h> | ||
| 14 | #include <linux/dma-mapping.h> | ||
| 15 | #include <linux/err.h> | ||
| 16 | #include <linux/iopoll.h> | ||
| 17 | #include <linux/list.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/of.h> | ||
| 20 | #include <linux/of_device.h> | ||
| 21 | #include <linux/of_dma.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/pm_runtime.h> | ||
| 24 | #include <linux/refcount.h> | ||
| 25 | #include <linux/slab.h> | ||
| 26 | |||
| 27 | #include "../virt-dma.h" | ||
| 28 | |||
| 29 | #define MTK_HSDMA_USEC_POLL 20 | ||
| 30 | #define MTK_HSDMA_TIMEOUT_POLL 200000 | ||
| 31 | #define MTK_HSDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
| 32 | |||
| 33 | /* The default number of virtual channel */ | ||
| 34 | #define MTK_HSDMA_NR_VCHANS 3 | ||
| 35 | |||
| 36 | /* Only one physical channel supported */ | ||
| 37 | #define MTK_HSDMA_NR_MAX_PCHANS 1 | ||
| 38 | |||
| 39 | /* Macro for physical descriptor (PD) manipulation */ | ||
| 40 | /* The number of PD which must be 2 of power */ | ||
| 41 | #define MTK_DMA_SIZE 64 | ||
| 42 | #define MTK_HSDMA_NEXT_DESP_IDX(x, y) (((x) + 1) & ((y) - 1)) | ||
| 43 | #define MTK_HSDMA_LAST_DESP_IDX(x, y) (((x) - 1) & ((y) - 1)) | ||
| 44 | #define MTK_HSDMA_MAX_LEN 0x3f80 | ||
| 45 | #define MTK_HSDMA_ALIGN_SIZE 4 | ||
| 46 | #define MTK_HSDMA_PLEN_MASK 0x3fff | ||
| 47 | #define MTK_HSDMA_DESC_PLEN(x) (((x) & MTK_HSDMA_PLEN_MASK) << 16) | ||
| 48 | #define MTK_HSDMA_DESC_PLEN_GET(x) (((x) >> 16) & MTK_HSDMA_PLEN_MASK) | ||
| 49 | |||
| 50 | /* Registers for underlying ring manipulation */ | ||
| 51 | #define MTK_HSDMA_TX_BASE 0x0 | ||
| 52 | #define MTK_HSDMA_TX_CNT 0x4 | ||
| 53 | #define MTK_HSDMA_TX_CPU 0x8 | ||
| 54 | #define MTK_HSDMA_TX_DMA 0xc | ||
| 55 | #define MTK_HSDMA_RX_BASE 0x100 | ||
| 56 | #define MTK_HSDMA_RX_CNT 0x104 | ||
| 57 | #define MTK_HSDMA_RX_CPU 0x108 | ||
| 58 | #define MTK_HSDMA_RX_DMA 0x10c | ||
| 59 | |||
| 60 | /* Registers for global setup */ | ||
| 61 | #define MTK_HSDMA_GLO 0x204 | ||
| 62 | #define MTK_HSDMA_GLO_MULTI_DMA BIT(10) | ||
| 63 | #define MTK_HSDMA_TX_WB_DDONE BIT(6) | ||
| 64 | #define MTK_HSDMA_BURST_64BYTES (0x2 << 4) | ||
| 65 | #define MTK_HSDMA_GLO_RX_BUSY BIT(3) | ||
| 66 | #define MTK_HSDMA_GLO_RX_DMA BIT(2) | ||
| 67 | #define MTK_HSDMA_GLO_TX_BUSY BIT(1) | ||
| 68 | #define MTK_HSDMA_GLO_TX_DMA BIT(0) | ||
| 69 | #define MTK_HSDMA_GLO_DMA (MTK_HSDMA_GLO_TX_DMA | \ | ||
| 70 | MTK_HSDMA_GLO_RX_DMA) | ||
| 71 | #define MTK_HSDMA_GLO_BUSY (MTK_HSDMA_GLO_RX_BUSY | \ | ||
| 72 | MTK_HSDMA_GLO_TX_BUSY) | ||
| 73 | #define MTK_HSDMA_GLO_DEFAULT (MTK_HSDMA_GLO_TX_DMA | \ | ||
| 74 | MTK_HSDMA_GLO_RX_DMA | \ | ||
| 75 | MTK_HSDMA_TX_WB_DDONE | \ | ||
| 76 | MTK_HSDMA_BURST_64BYTES | \ | ||
| 77 | MTK_HSDMA_GLO_MULTI_DMA) | ||
| 78 | |||
| 79 | /* Registers for reset */ | ||
| 80 | #define MTK_HSDMA_RESET 0x208 | ||
| 81 | #define MTK_HSDMA_RST_TX BIT(0) | ||
| 82 | #define MTK_HSDMA_RST_RX BIT(16) | ||
| 83 | |||
| 84 | /* Registers for interrupt control */ | ||
| 85 | #define MTK_HSDMA_DLYINT 0x20c | ||
| 86 | #define MTK_HSDMA_RXDLY_INT_EN BIT(15) | ||
| 87 | |||
| 88 | /* Interrupt fires when the pending number's more than the specified */ | ||
| 89 | #define MTK_HSDMA_RXMAX_PINT(x) (((x) & 0x7f) << 8) | ||
| 90 | |||
| 91 | /* Interrupt fires when the pending time's more than the specified in 20 us */ | ||
| 92 | #define MTK_HSDMA_RXMAX_PTIME(x) ((x) & 0x7f) | ||
| 93 | #define MTK_HSDMA_DLYINT_DEFAULT (MTK_HSDMA_RXDLY_INT_EN | \ | ||
| 94 | MTK_HSDMA_RXMAX_PINT(20) | \ | ||
| 95 | MTK_HSDMA_RXMAX_PTIME(20)) | ||
| 96 | #define MTK_HSDMA_INT_STATUS 0x220 | ||
| 97 | #define MTK_HSDMA_INT_ENABLE 0x228 | ||
| 98 | #define MTK_HSDMA_INT_RXDONE BIT(16) | ||
| 99 | |||
| 100 | enum mtk_hsdma_vdesc_flag { | ||
| 101 | MTK_HSDMA_VDESC_FINISHED = 0x01, | ||
| 102 | }; | ||
| 103 | |||
| 104 | #define IS_MTK_HSDMA_VDESC_FINISHED(x) ((x) == MTK_HSDMA_VDESC_FINISHED) | ||
| 105 | |||
| 106 | /** | ||
| 107 | * struct mtk_hsdma_pdesc - This is the struct holding info describing physical | ||
| 108 | * descriptor (PD) and its placement must be kept at | ||
| 109 | * 4-bytes alignment in little endian order. | ||
| 110 | * @desc[1-4]: The control pad used to indicate hardware how to | ||
| 111 | * deal with the descriptor such as source and | ||
| 112 | * destination address and data length. The maximum | ||
| 113 | * data length each pdesc can handle is 0x3f80 bytes | ||
| 114 | */ | ||
| 115 | struct mtk_hsdma_pdesc { | ||
| 116 | __le32 desc1; | ||
| 117 | __le32 desc2; | ||
| 118 | __le32 desc3; | ||
| 119 | __le32 desc4; | ||
| 120 | } __packed __aligned(4); | ||
| 121 | |||
| 122 | /** | ||
| 123 | * struct mtk_hsdma_vdesc - This is the struct holding info describing virtual | ||
| 124 | * descriptor (VD) | ||
| 125 | * @vd: An instance for struct virt_dma_desc | ||
| 126 | * @len: The total data size device wants to move | ||
| 127 | * @residue: The remaining data size device will move | ||
| 128 | * @dest: The destination address device wants to move to | ||
| 129 | * @src: The source address device wants to move from | ||
| 130 | */ | ||
| 131 | struct mtk_hsdma_vdesc { | ||
| 132 | struct virt_dma_desc vd; | ||
| 133 | size_t len; | ||
| 134 | size_t residue; | ||
| 135 | dma_addr_t dest; | ||
| 136 | dma_addr_t src; | ||
| 137 | }; | ||
| 138 | |||
| 139 | /** | ||
| 140 | * struct mtk_hsdma_cb - This is the struct holding extra info required for RX | ||
| 141 | * ring to know what relevant VD the the PD is being | ||
| 142 | * mapped to. | ||
| 143 | * @vd: Pointer to the relevant VD. | ||
| 144 | * @flag: Flag indicating what action should be taken when VD | ||
| 145 | * is completed. | ||
| 146 | */ | ||
| 147 | struct mtk_hsdma_cb { | ||
| 148 | struct virt_dma_desc *vd; | ||
| 149 | enum mtk_hsdma_vdesc_flag flag; | ||
| 150 | }; | ||
| 151 | |||
| 152 | /** | ||
| 153 | * struct mtk_hsdma_ring - This struct holds info describing underlying ring | ||
| 154 | * space | ||
| 155 | * @txd: The descriptor TX ring which describes DMA source | ||
| 156 | * information | ||
| 157 | * @rxd: The descriptor RX ring which describes DMA | ||
| 158 | * destination information | ||
| 159 | * @cb: The extra information pointed at by RX ring | ||
| 160 | * @tphys: The physical addr of TX ring | ||
| 161 | * @rphys: The physical addr of RX ring | ||
| 162 | * @cur_tptr: Pointer to the next free descriptor used by the host | ||
| 163 | * @cur_rptr: Pointer to the last done descriptor by the device | ||
| 164 | */ | ||
| 165 | struct mtk_hsdma_ring { | ||
| 166 | struct mtk_hsdma_pdesc *txd; | ||
| 167 | struct mtk_hsdma_pdesc *rxd; | ||
| 168 | struct mtk_hsdma_cb *cb; | ||
| 169 | dma_addr_t tphys; | ||
| 170 | dma_addr_t rphys; | ||
| 171 | u16 cur_tptr; | ||
| 172 | u16 cur_rptr; | ||
| 173 | }; | ||
| 174 | |||
| 175 | /** | ||
| 176 | * struct mtk_hsdma_pchan - This is the struct holding info describing physical | ||
| 177 | * channel (PC) | ||
| 178 | * @ring: An instance for the underlying ring | ||
| 179 | * @sz_ring: Total size allocated for the ring | ||
| 180 | * @nr_free: Total number of free rooms in the ring. It would | ||
| 181 | * be accessed and updated frequently between IRQ | ||
| 182 | * context and user context to reflect whether ring | ||
| 183 | * can accept requests from VD. | ||
| 184 | */ | ||
| 185 | struct mtk_hsdma_pchan { | ||
| 186 | struct mtk_hsdma_ring ring; | ||
| 187 | size_t sz_ring; | ||
| 188 | atomic_t nr_free; | ||
| 189 | }; | ||
| 190 | |||
| 191 | /** | ||
| 192 | * struct mtk_hsdma_vchan - This is the struct holding info describing virtual | ||
| 193 | * channel (VC) | ||
| 194 | * @vc: An instance for struct virt_dma_chan | ||
| 195 | * @issue_completion: The wait for all issued descriptors completited | ||
| 196 | * @issue_synchronize: Bool indicating channel synchronization starts | ||
| 197 | * @desc_hw_processing: List those descriptors the hardware is processing, | ||
| 198 | * which is protected by vc.lock | ||
| 199 | */ | ||
| 200 | struct mtk_hsdma_vchan { | ||
| 201 | struct virt_dma_chan vc; | ||
| 202 | struct completion issue_completion; | ||
| 203 | bool issue_synchronize; | ||
| 204 | struct list_head desc_hw_processing; | ||
| 205 | }; | ||
| 206 | |||
| 207 | /** | ||
| 208 | * struct mtk_hsdma_soc - This is the struct holding differences among SoCs | ||
| 209 | * @ddone: Bit mask for DDONE | ||
| 210 | * @ls0: Bit mask for LS0 | ||
| 211 | */ | ||
| 212 | struct mtk_hsdma_soc { | ||
| 213 | __le32 ddone; | ||
| 214 | __le32 ls0; | ||
| 215 | }; | ||
| 216 | |||
| 217 | /** | ||
| 218 | * struct mtk_hsdma_device - This is the struct holding info describing HSDMA | ||
| 219 | * device | ||
| 220 | * @ddev: An instance for struct dma_device | ||
| 221 | * @base: The mapped register I/O base | ||
| 222 | * @clk: The clock that device internal is using | ||
| 223 | * @irq: The IRQ that device are using | ||
| 224 | * @dma_requests: The number of VCs the device supports to | ||
| 225 | * @vc: The pointer to all available VCs | ||
| 226 | * @pc: The pointer to the underlying PC | ||
| 227 | * @pc_refcnt: Track how many VCs are using the PC | ||
| 228 | * @lock: Lock protect agaisting multiple VCs access PC | ||
| 229 | * @soc: The pointer to area holding differences among | ||
| 230 | * vaious platform | ||
| 231 | */ | ||
| 232 | struct mtk_hsdma_device { | ||
| 233 | struct dma_device ddev; | ||
| 234 | void __iomem *base; | ||
| 235 | struct clk *clk; | ||
| 236 | u32 irq; | ||
| 237 | |||
| 238 | u32 dma_requests; | ||
| 239 | struct mtk_hsdma_vchan *vc; | ||
| 240 | struct mtk_hsdma_pchan *pc; | ||
| 241 | refcount_t pc_refcnt; | ||
| 242 | |||
| 243 | /* Lock used to protect against multiple VCs access PC */ | ||
| 244 | spinlock_t lock; | ||
| 245 | |||
| 246 | const struct mtk_hsdma_soc *soc; | ||
| 247 | }; | ||
| 248 | |||
| 249 | static struct mtk_hsdma_device *to_hsdma_dev(struct dma_chan *chan) | ||
| 250 | { | ||
| 251 | return container_of(chan->device, struct mtk_hsdma_device, ddev); | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline struct mtk_hsdma_vchan *to_hsdma_vchan(struct dma_chan *chan) | ||
| 255 | { | ||
| 256 | return container_of(chan, struct mtk_hsdma_vchan, vc.chan); | ||
| 257 | } | ||
| 258 | |||
| 259 | static struct mtk_hsdma_vdesc *to_hsdma_vdesc(struct virt_dma_desc *vd) | ||
| 260 | { | ||
| 261 | return container_of(vd, struct mtk_hsdma_vdesc, vd); | ||
| 262 | } | ||
| 263 | |||
| 264 | static struct device *hsdma2dev(struct mtk_hsdma_device *hsdma) | ||
| 265 | { | ||
| 266 | return hsdma->ddev.dev; | ||
| 267 | } | ||
| 268 | |||
| 269 | static u32 mtk_dma_read(struct mtk_hsdma_device *hsdma, u32 reg) | ||
| 270 | { | ||
| 271 | return readl(hsdma->base + reg); | ||
| 272 | } | ||
| 273 | |||
| 274 | static void mtk_dma_write(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) | ||
| 275 | { | ||
| 276 | writel(val, hsdma->base + reg); | ||
| 277 | } | ||
| 278 | |||
| 279 | static void mtk_dma_rmw(struct mtk_hsdma_device *hsdma, u32 reg, | ||
| 280 | u32 mask, u32 set) | ||
| 281 | { | ||
| 282 | u32 val; | ||
| 283 | |||
| 284 | val = mtk_dma_read(hsdma, reg); | ||
| 285 | val &= ~mask; | ||
| 286 | val |= set; | ||
| 287 | mtk_dma_write(hsdma, reg, val); | ||
| 288 | } | ||
| 289 | |||
| 290 | static void mtk_dma_set(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) | ||
| 291 | { | ||
| 292 | mtk_dma_rmw(hsdma, reg, 0, val); | ||
| 293 | } | ||
| 294 | |||
| 295 | static void mtk_dma_clr(struct mtk_hsdma_device *hsdma, u32 reg, u32 val) | ||
| 296 | { | ||
| 297 | mtk_dma_rmw(hsdma, reg, val, 0); | ||
| 298 | } | ||
| 299 | |||
| 300 | static void mtk_hsdma_vdesc_free(struct virt_dma_desc *vd) | ||
| 301 | { | ||
| 302 | kfree(container_of(vd, struct mtk_hsdma_vdesc, vd)); | ||
| 303 | } | ||
| 304 | |||
| 305 | static int mtk_hsdma_busy_wait(struct mtk_hsdma_device *hsdma) | ||
| 306 | { | ||
| 307 | u32 status = 0; | ||
| 308 | |||
| 309 | return readl_poll_timeout(hsdma->base + MTK_HSDMA_GLO, status, | ||
| 310 | !(status & MTK_HSDMA_GLO_BUSY), | ||
| 311 | MTK_HSDMA_USEC_POLL, | ||
| 312 | MTK_HSDMA_TIMEOUT_POLL); | ||
| 313 | } | ||
| 314 | |||
| 315 | static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma, | ||
| 316 | struct mtk_hsdma_pchan *pc) | ||
| 317 | { | ||
| 318 | struct mtk_hsdma_ring *ring = &pc->ring; | ||
| 319 | int err; | ||
| 320 | |||
| 321 | memset(pc, 0, sizeof(*pc)); | ||
| 322 | |||
| 323 | /* | ||
| 324 | * Allocate ring space where [0 ... MTK_DMA_SIZE - 1] is for TX ring | ||
| 325 | * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. | ||
| 326 | */ | ||
| 327 | pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); | ||
| 328 | ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, | ||
| 329 | &ring->tphys, GFP_NOWAIT); | ||
| 330 | if (!ring->txd) | ||
| 331 | return -ENOMEM; | ||
| 332 | |||
| 333 | ring->rxd = &ring->txd[MTK_DMA_SIZE]; | ||
| 334 | ring->rphys = ring->tphys + MTK_DMA_SIZE * sizeof(*ring->txd); | ||
| 335 | ring->cur_tptr = 0; | ||
| 336 | ring->cur_rptr = MTK_DMA_SIZE - 1; | ||
| 337 | |||
| 338 | ring->cb = kcalloc(MTK_DMA_SIZE, sizeof(*ring->cb), GFP_NOWAIT); | ||
| 339 | if (!ring->cb) { | ||
| 340 | err = -ENOMEM; | ||
| 341 | goto err_free_dma; | ||
| 342 | } | ||
| 343 | |||
| 344 | atomic_set(&pc->nr_free, MTK_DMA_SIZE - 1); | ||
| 345 | |||
| 346 | /* Disable HSDMA and wait for the completion */ | ||
| 347 | mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); | ||
| 348 | err = mtk_hsdma_busy_wait(hsdma); | ||
| 349 | if (err) | ||
| 350 | goto err_free_cb; | ||
| 351 | |||
| 352 | /* Reset */ | ||
| 353 | mtk_dma_set(hsdma, MTK_HSDMA_RESET, | ||
| 354 | MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); | ||
| 355 | mtk_dma_clr(hsdma, MTK_HSDMA_RESET, | ||
| 356 | MTK_HSDMA_RST_TX | MTK_HSDMA_RST_RX); | ||
| 357 | |||
| 358 | /* Setup HSDMA initial pointer in the ring */ | ||
| 359 | mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, ring->tphys); | ||
| 360 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, MTK_DMA_SIZE); | ||
| 361 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); | ||
| 362 | mtk_dma_write(hsdma, MTK_HSDMA_TX_DMA, 0); | ||
| 363 | mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, ring->rphys); | ||
| 364 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, MTK_DMA_SIZE); | ||
| 365 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, ring->cur_rptr); | ||
| 366 | mtk_dma_write(hsdma, MTK_HSDMA_RX_DMA, 0); | ||
| 367 | |||
| 368 | /* Enable HSDMA */ | ||
| 369 | mtk_dma_set(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); | ||
| 370 | |||
| 371 | /* Setup delayed interrupt */ | ||
| 372 | mtk_dma_write(hsdma, MTK_HSDMA_DLYINT, MTK_HSDMA_DLYINT_DEFAULT); | ||
| 373 | |||
| 374 | /* Enable interrupt */ | ||
| 375 | mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
| 376 | |||
| 377 | return 0; | ||
| 378 | |||
| 379 | err_free_cb: | ||
| 380 | kfree(ring->cb); | ||
| 381 | |||
| 382 | err_free_dma: | ||
| 383 | dma_free_coherent(hsdma2dev(hsdma), | ||
| 384 | pc->sz_ring, ring->txd, ring->tphys); | ||
| 385 | return err; | ||
| 386 | } | ||
| 387 | |||
| 388 | static void mtk_hsdma_free_pchan(struct mtk_hsdma_device *hsdma, | ||
| 389 | struct mtk_hsdma_pchan *pc) | ||
| 390 | { | ||
| 391 | struct mtk_hsdma_ring *ring = &pc->ring; | ||
| 392 | |||
| 393 | /* Disable HSDMA and then wait for the completion */ | ||
| 394 | mtk_dma_clr(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DMA); | ||
| 395 | mtk_hsdma_busy_wait(hsdma); | ||
| 396 | |||
| 397 | /* Reset pointer in the ring */ | ||
| 398 | mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
| 399 | mtk_dma_write(hsdma, MTK_HSDMA_TX_BASE, 0); | ||
| 400 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CNT, 0); | ||
| 401 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, 0); | ||
| 402 | mtk_dma_write(hsdma, MTK_HSDMA_RX_BASE, 0); | ||
| 403 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CNT, 0); | ||
| 404 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, MTK_DMA_SIZE - 1); | ||
| 405 | |||
| 406 | kfree(ring->cb); | ||
| 407 | |||
| 408 | dma_free_coherent(hsdma2dev(hsdma), | ||
| 409 | pc->sz_ring, ring->txd, ring->tphys); | ||
| 410 | } | ||
| 411 | |||
| 412 | static int mtk_hsdma_issue_pending_vdesc(struct mtk_hsdma_device *hsdma, | ||
| 413 | struct mtk_hsdma_pchan *pc, | ||
| 414 | struct mtk_hsdma_vdesc *hvd) | ||
| 415 | { | ||
| 416 | struct mtk_hsdma_ring *ring = &pc->ring; | ||
| 417 | struct mtk_hsdma_pdesc *txd, *rxd; | ||
| 418 | u16 reserved, prev, tlen, num_sgs; | ||
| 419 | unsigned long flags; | ||
| 420 | |||
| 421 | /* Protect against PC is accessed by multiple VCs simultaneously */ | ||
| 422 | spin_lock_irqsave(&hsdma->lock, flags); | ||
| 423 | |||
| 424 | /* | ||
| 425 | * Reserve rooms, where pc->nr_free is used to track how many free | ||
| 426 | * rooms in the ring being updated in user and IRQ context. | ||
| 427 | */ | ||
| 428 | num_sgs = DIV_ROUND_UP(hvd->len, MTK_HSDMA_MAX_LEN); | ||
| 429 | reserved = min_t(u16, num_sgs, atomic_read(&pc->nr_free)); | ||
| 430 | |||
| 431 | if (!reserved) { | ||
| 432 | spin_unlock_irqrestore(&hsdma->lock, flags); | ||
| 433 | return -ENOSPC; | ||
| 434 | } | ||
| 435 | |||
| 436 | atomic_sub(reserved, &pc->nr_free); | ||
| 437 | |||
| 438 | while (reserved--) { | ||
| 439 | /* Limit size by PD capability for valid data moving */ | ||
| 440 | tlen = (hvd->len > MTK_HSDMA_MAX_LEN) ? | ||
| 441 | MTK_HSDMA_MAX_LEN : hvd->len; | ||
| 442 | |||
| 443 | /* | ||
| 444 | * Setup PDs using the remaining VD info mapped on those | ||
| 445 | * reserved rooms. And since RXD is shared memory between the | ||
| 446 | * host and the device allocated by dma_alloc_coherent call, | ||
| 447 | * the helper macro WRITE_ONCE can ensure the data written to | ||
| 448 | * RAM would really happens. | ||
| 449 | */ | ||
| 450 | txd = &ring->txd[ring->cur_tptr]; | ||
| 451 | WRITE_ONCE(txd->desc1, hvd->src); | ||
| 452 | WRITE_ONCE(txd->desc2, | ||
| 453 | hsdma->soc->ls0 | MTK_HSDMA_DESC_PLEN(tlen)); | ||
| 454 | |||
| 455 | rxd = &ring->rxd[ring->cur_tptr]; | ||
| 456 | WRITE_ONCE(rxd->desc1, hvd->dest); | ||
| 457 | WRITE_ONCE(rxd->desc2, MTK_HSDMA_DESC_PLEN(tlen)); | ||
| 458 | |||
| 459 | /* Associate VD, the PD belonged to */ | ||
| 460 | ring->cb[ring->cur_tptr].vd = &hvd->vd; | ||
| 461 | |||
| 462 | /* Move forward the pointer of TX ring */ | ||
| 463 | ring->cur_tptr = MTK_HSDMA_NEXT_DESP_IDX(ring->cur_tptr, | ||
| 464 | MTK_DMA_SIZE); | ||
| 465 | |||
| 466 | /* Update VD with remaining data */ | ||
| 467 | hvd->src += tlen; | ||
| 468 | hvd->dest += tlen; | ||
| 469 | hvd->len -= tlen; | ||
| 470 | } | ||
| 471 | |||
| 472 | /* | ||
| 473 | * Tagging flag for the last PD for VD will be responsible for | ||
| 474 | * completing VD. | ||
| 475 | */ | ||
| 476 | if (!hvd->len) { | ||
| 477 | prev = MTK_HSDMA_LAST_DESP_IDX(ring->cur_tptr, MTK_DMA_SIZE); | ||
| 478 | ring->cb[prev].flag = MTK_HSDMA_VDESC_FINISHED; | ||
| 479 | } | ||
| 480 | |||
| 481 | /* Ensure all changes indeed done before we're going on */ | ||
| 482 | wmb(); | ||
| 483 | |||
| 484 | /* | ||
| 485 | * Updating into hardware the pointer of TX ring lets HSDMA to take | ||
| 486 | * action for those pending PDs. | ||
| 487 | */ | ||
| 488 | mtk_dma_write(hsdma, MTK_HSDMA_TX_CPU, ring->cur_tptr); | ||
| 489 | |||
| 490 | spin_unlock_irqrestore(&hsdma->lock, flags); | ||
| 491 | |||
| 492 | return 0; | ||
| 493 | } | ||
| 494 | |||
| 495 | static void mtk_hsdma_issue_vchan_pending(struct mtk_hsdma_device *hsdma, | ||
| 496 | struct mtk_hsdma_vchan *hvc) | ||
| 497 | { | ||
| 498 | struct virt_dma_desc *vd, *vd2; | ||
| 499 | int err; | ||
| 500 | |||
| 501 | lockdep_assert_held(&hvc->vc.lock); | ||
| 502 | |||
| 503 | list_for_each_entry_safe(vd, vd2, &hvc->vc.desc_issued, node) { | ||
| 504 | struct mtk_hsdma_vdesc *hvd; | ||
| 505 | |||
| 506 | hvd = to_hsdma_vdesc(vd); | ||
| 507 | |||
| 508 | /* Map VD into PC and all VCs shares a single PC */ | ||
| 509 | err = mtk_hsdma_issue_pending_vdesc(hsdma, hsdma->pc, hvd); | ||
| 510 | |||
| 511 | /* | ||
| 512 | * Move VD from desc_issued to desc_hw_processing when entire | ||
| 513 | * VD is fit into available PDs. Otherwise, the uncompleted | ||
| 514 | * VDs would stay in list desc_issued and then restart the | ||
| 515 | * processing as soon as possible once underlying ring space | ||
| 516 | * got freed. | ||
| 517 | */ | ||
| 518 | if (err == -ENOSPC || hvd->len > 0) | ||
| 519 | break; | ||
| 520 | |||
| 521 | /* | ||
| 522 | * The extra list desc_hw_processing is used because | ||
| 523 | * hardware can't provide sufficient information allowing us | ||
| 524 | * to know what VDs are still working on the underlying ring. | ||
| 525 | * Through the additional list, it can help us to implement | ||
| 526 | * terminate_all, residue calculation and such thing needed | ||
| 527 | * to know detail descriptor status on the hardware. | ||
| 528 | */ | ||
| 529 | list_move_tail(&vd->node, &hvc->desc_hw_processing); | ||
| 530 | } | ||
| 531 | } | ||
| 532 | |||
| 533 | static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) | ||
| 534 | { | ||
| 535 | struct mtk_hsdma_vchan *hvc; | ||
| 536 | struct mtk_hsdma_pdesc *rxd; | ||
| 537 | struct mtk_hsdma_vdesc *hvd; | ||
| 538 | struct mtk_hsdma_pchan *pc; | ||
| 539 | struct mtk_hsdma_cb *cb; | ||
| 540 | int i = MTK_DMA_SIZE; | ||
| 541 | __le32 desc2; | ||
| 542 | u32 status; | ||
| 543 | u16 next; | ||
| 544 | |||
| 545 | /* Read IRQ status */ | ||
| 546 | status = mtk_dma_read(hsdma, MTK_HSDMA_INT_STATUS); | ||
| 547 | if (unlikely(!(status & MTK_HSDMA_INT_RXDONE))) | ||
| 548 | goto rx_done; | ||
| 549 | |||
| 550 | pc = hsdma->pc; | ||
| 551 | |||
| 552 | /* | ||
| 553 | * Using a fail-safe loop with iterations of up to MTK_DMA_SIZE to | ||
| 554 | * reclaim these finished descriptors: The most number of PDs the ISR | ||
| 555 | * can handle at one time shouldn't be more than MTK_DMA_SIZE so we | ||
| 556 | * take it as limited count instead of just using a dangerous infinite | ||
| 557 | * poll. | ||
| 558 | */ | ||
| 559 | while (i--) { | ||
| 560 | next = MTK_HSDMA_NEXT_DESP_IDX(pc->ring.cur_rptr, | ||
| 561 | MTK_DMA_SIZE); | ||
| 562 | rxd = &pc->ring.rxd[next]; | ||
| 563 | |||
| 564 | /* | ||
| 565 | * If MTK_HSDMA_DESC_DDONE is no specified, that means data | ||
| 566 | * moving for the PD is still under going. | ||
| 567 | */ | ||
| 568 | desc2 = READ_ONCE(rxd->desc2); | ||
| 569 | if (!(desc2 & hsdma->soc->ddone)) | ||
| 570 | break; | ||
| 571 | |||
| 572 | cb = &pc->ring.cb[next]; | ||
| 573 | if (unlikely(!cb->vd)) { | ||
| 574 | dev_err(hsdma2dev(hsdma), "cb->vd cannot be null\n"); | ||
| 575 | break; | ||
| 576 | } | ||
| 577 | |||
| 578 | /* Update residue of VD the associated PD belonged to */ | ||
| 579 | hvd = to_hsdma_vdesc(cb->vd); | ||
| 580 | hvd->residue -= MTK_HSDMA_DESC_PLEN_GET(rxd->desc2); | ||
| 581 | |||
| 582 | /* Complete VD until the relevant last PD is finished */ | ||
| 583 | if (IS_MTK_HSDMA_VDESC_FINISHED(cb->flag)) { | ||
| 584 | hvc = to_hsdma_vchan(cb->vd->tx.chan); | ||
| 585 | |||
| 586 | spin_lock(&hvc->vc.lock); | ||
| 587 | |||
| 588 | /* Remove VD from list desc_hw_processing */ | ||
| 589 | list_del(&cb->vd->node); | ||
| 590 | |||
| 591 | /* Add VD into list desc_completed */ | ||
| 592 | vchan_cookie_complete(cb->vd); | ||
| 593 | |||
| 594 | if (hvc->issue_synchronize && | ||
| 595 | list_empty(&hvc->desc_hw_processing)) { | ||
| 596 | complete(&hvc->issue_completion); | ||
| 597 | hvc->issue_synchronize = false; | ||
| 598 | } | ||
| 599 | spin_unlock(&hvc->vc.lock); | ||
| 600 | |||
| 601 | cb->flag = 0; | ||
| 602 | } | ||
| 603 | |||
| 604 | cb->vd = 0; | ||
| 605 | |||
| 606 | /* | ||
| 607 | * Recycle the RXD with the helper WRITE_ONCE that can ensure | ||
| 608 | * data written into RAM would really happens. | ||
| 609 | */ | ||
| 610 | WRITE_ONCE(rxd->desc1, 0); | ||
| 611 | WRITE_ONCE(rxd->desc2, 0); | ||
| 612 | pc->ring.cur_rptr = next; | ||
| 613 | |||
| 614 | /* Release rooms */ | ||
| 615 | atomic_inc(&pc->nr_free); | ||
| 616 | } | ||
| 617 | |||
| 618 | /* Ensure all changes indeed done before we're going on */ | ||
| 619 | wmb(); | ||
| 620 | |||
| 621 | /* Update CPU pointer for those completed PDs */ | ||
| 622 | mtk_dma_write(hsdma, MTK_HSDMA_RX_CPU, pc->ring.cur_rptr); | ||
| 623 | |||
| 624 | /* | ||
| 625 | * Acking the pending IRQ allows hardware no longer to keep the used | ||
| 626 | * IRQ line in certain trigger state when software has completed all | ||
| 627 | * the finished physical descriptors. | ||
| 628 | */ | ||
| 629 | if (atomic_read(&pc->nr_free) >= MTK_DMA_SIZE - 1) | ||
| 630 | mtk_dma_write(hsdma, MTK_HSDMA_INT_STATUS, status); | ||
| 631 | |||
| 632 | /* ASAP handles pending VDs in all VCs after freeing some rooms */ | ||
| 633 | for (i = 0; i < hsdma->dma_requests; i++) { | ||
| 634 | hvc = &hsdma->vc[i]; | ||
| 635 | spin_lock(&hvc->vc.lock); | ||
| 636 | mtk_hsdma_issue_vchan_pending(hsdma, hvc); | ||
| 637 | spin_unlock(&hvc->vc.lock); | ||
| 638 | } | ||
| 639 | |||
| 640 | rx_done: | ||
| 641 | /* All completed PDs are cleaned up, so enable interrupt again */ | ||
| 642 | mtk_dma_set(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
| 643 | } | ||
| 644 | |||
| 645 | static irqreturn_t mtk_hsdma_irq(int irq, void *devid) | ||
| 646 | { | ||
| 647 | struct mtk_hsdma_device *hsdma = devid; | ||
| 648 | |||
| 649 | /* | ||
| 650 | * Disable interrupt until all completed PDs are cleaned up in | ||
| 651 | * mtk_hsdma_free_rooms call. | ||
| 652 | */ | ||
| 653 | mtk_dma_clr(hsdma, MTK_HSDMA_INT_ENABLE, MTK_HSDMA_INT_RXDONE); | ||
| 654 | |||
| 655 | mtk_hsdma_free_rooms_in_ring(hsdma); | ||
| 656 | |||
| 657 | return IRQ_HANDLED; | ||
| 658 | } | ||
| 659 | |||
| 660 | static struct virt_dma_desc *mtk_hsdma_find_active_desc(struct dma_chan *c, | ||
| 661 | dma_cookie_t cookie) | ||
| 662 | { | ||
| 663 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
| 664 | struct virt_dma_desc *vd; | ||
| 665 | |||
| 666 | list_for_each_entry(vd, &hvc->desc_hw_processing, node) | ||
| 667 | if (vd->tx.cookie == cookie) | ||
| 668 | return vd; | ||
| 669 | |||
| 670 | list_for_each_entry(vd, &hvc->vc.desc_issued, node) | ||
| 671 | if (vd->tx.cookie == cookie) | ||
| 672 | return vd; | ||
| 673 | |||
| 674 | return NULL; | ||
| 675 | } | ||
| 676 | |||
| 677 | static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, | ||
| 678 | dma_cookie_t cookie, | ||
| 679 | struct dma_tx_state *txstate) | ||
| 680 | { | ||
| 681 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
| 682 | struct mtk_hsdma_vdesc *hvd; | ||
| 683 | struct virt_dma_desc *vd; | ||
| 684 | enum dma_status ret; | ||
| 685 | unsigned long flags; | ||
| 686 | size_t bytes = 0; | ||
| 687 | |||
| 688 | ret = dma_cookie_status(c, cookie, txstate); | ||
| 689 | if (ret == DMA_COMPLETE || !txstate) | ||
| 690 | return ret; | ||
| 691 | |||
| 692 | spin_lock_irqsave(&hvc->vc.lock, flags); | ||
| 693 | vd = mtk_hsdma_find_active_desc(c, cookie); | ||
| 694 | spin_unlock_irqrestore(&hvc->vc.lock, flags); | ||
| 695 | |||
| 696 | if (vd) { | ||
| 697 | hvd = to_hsdma_vdesc(vd); | ||
| 698 | bytes = hvd->residue; | ||
| 699 | } | ||
| 700 | |||
| 701 | dma_set_residue(txstate, bytes); | ||
| 702 | |||
| 703 | return ret; | ||
| 704 | } | ||
| 705 | |||
| 706 | static void mtk_hsdma_issue_pending(struct dma_chan *c) | ||
| 707 | { | ||
| 708 | struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); | ||
| 709 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
| 710 | unsigned long flags; | ||
| 711 | |||
| 712 | spin_lock_irqsave(&hvc->vc.lock, flags); | ||
| 713 | |||
| 714 | if (vchan_issue_pending(&hvc->vc)) | ||
| 715 | mtk_hsdma_issue_vchan_pending(hsdma, hvc); | ||
| 716 | |||
| 717 | spin_unlock_irqrestore(&hvc->vc.lock, flags); | ||
| 718 | } | ||
| 719 | |||
| 720 | static struct dma_async_tx_descriptor * | ||
| 721 | mtk_hsdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, | ||
| 722 | dma_addr_t src, size_t len, unsigned long flags) | ||
| 723 | { | ||
| 724 | struct mtk_hsdma_vdesc *hvd; | ||
| 725 | |||
| 726 | hvd = kzalloc(sizeof(*hvd), GFP_NOWAIT); | ||
| 727 | if (!hvd) | ||
| 728 | return NULL; | ||
| 729 | |||
| 730 | hvd->len = len; | ||
| 731 | hvd->residue = len; | ||
| 732 | hvd->src = src; | ||
| 733 | hvd->dest = dest; | ||
| 734 | |||
| 735 | return vchan_tx_prep(to_virt_chan(c), &hvd->vd, flags); | ||
| 736 | } | ||
| 737 | |||
| 738 | static int mtk_hsdma_free_inactive_desc(struct dma_chan *c) | ||
| 739 | { | ||
| 740 | struct virt_dma_chan *vc = to_virt_chan(c); | ||
| 741 | unsigned long flags; | ||
| 742 | LIST_HEAD(head); | ||
| 743 | |||
| 744 | spin_lock_irqsave(&vc->lock, flags); | ||
| 745 | list_splice_tail_init(&vc->desc_allocated, &head); | ||
| 746 | list_splice_tail_init(&vc->desc_submitted, &head); | ||
| 747 | list_splice_tail_init(&vc->desc_issued, &head); | ||
| 748 | spin_unlock_irqrestore(&vc->lock, flags); | ||
| 749 | |||
| 750 | /* At the point, we don't expect users put descriptor into VC again */ | ||
| 751 | vchan_dma_desc_free_list(vc, &head); | ||
| 752 | |||
| 753 | return 0; | ||
| 754 | } | ||
| 755 | |||
| 756 | static void mtk_hsdma_free_active_desc(struct dma_chan *c) | ||
| 757 | { | ||
| 758 | struct mtk_hsdma_vchan *hvc = to_hsdma_vchan(c); | ||
| 759 | bool sync_needed = false; | ||
| 760 | |||
| 761 | /* | ||
| 762 | * Once issue_synchronize is being set, which means once the hardware | ||
| 763 | * consumes all descriptors for the channel in the ring, the | ||
| 764 | * synchronization must be be notified immediately it is completed. | ||
| 765 | */ | ||
| 766 | spin_lock(&hvc->vc.lock); | ||
| 767 | if (!list_empty(&hvc->desc_hw_processing)) { | ||
| 768 | hvc->issue_synchronize = true; | ||
| 769 | sync_needed = true; | ||
| 770 | } | ||
| 771 | spin_unlock(&hvc->vc.lock); | ||
| 772 | |||
| 773 | if (sync_needed) | ||
| 774 | wait_for_completion(&hvc->issue_completion); | ||
| 775 | /* | ||
| 776 | * At the point, we expect that all remaining descriptors in the ring | ||
| 777 | * for the channel should be all processing done. | ||
| 778 | */ | ||
| 779 | WARN_ONCE(!list_empty(&hvc->desc_hw_processing), | ||
| 780 | "Desc pending still in list desc_hw_processing\n"); | ||
| 781 | |||
| 782 | /* Free all descriptors in list desc_completed */ | ||
| 783 | vchan_synchronize(&hvc->vc); | ||
| 784 | |||
| 785 | WARN_ONCE(!list_empty(&hvc->vc.desc_completed), | ||
| 786 | "Desc pending still in list desc_completed\n"); | ||
| 787 | } | ||
| 788 | |||
| 789 | static int mtk_hsdma_terminate_all(struct dma_chan *c) | ||
| 790 | { | ||
| 791 | /* | ||
| 792 | * Free pending descriptors not processed yet by hardware that have | ||
| 793 | * previously been submitted to the channel. | ||
| 794 | */ | ||
| 795 | mtk_hsdma_free_inactive_desc(c); | ||
| 796 | |||
| 797 | /* | ||
| 798 | * However, the DMA engine doesn't provide any way to stop these | ||
| 799 | * descriptors being processed currently by hardware. The only way is | ||
| 800 | * to just waiting until these descriptors are all processed completely | ||
| 801 | * through mtk_hsdma_free_active_desc call. | ||
| 802 | */ | ||
| 803 | mtk_hsdma_free_active_desc(c); | ||
| 804 | |||
| 805 | return 0; | ||
| 806 | } | ||
| 807 | |||
| 808 | static int mtk_hsdma_alloc_chan_resources(struct dma_chan *c) | ||
| 809 | { | ||
| 810 | struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); | ||
| 811 | int err; | ||
| 812 | |||
| 813 | /* | ||
| 814 | * Since HSDMA has only one PC, the resource for PC is being allocated | ||
| 815 | * when the first VC is being created and the other VCs would run on | ||
| 816 | * the same PC. | ||
| 817 | */ | ||
| 818 | if (!refcount_read(&hsdma->pc_refcnt)) { | ||
| 819 | err = mtk_hsdma_alloc_pchan(hsdma, hsdma->pc); | ||
| 820 | if (err) | ||
| 821 | return err; | ||
| 822 | /* | ||
| 823 | * refcount_inc would complain increment on 0; use-after-free. | ||
| 824 | * Thus, we need to explicitly set it as 1 initially. | ||
| 825 | */ | ||
| 826 | refcount_set(&hsdma->pc_refcnt, 1); | ||
| 827 | } else { | ||
| 828 | refcount_inc(&hsdma->pc_refcnt); | ||
| 829 | } | ||
| 830 | |||
| 831 | return 0; | ||
| 832 | } | ||
| 833 | |||
| 834 | static void mtk_hsdma_free_chan_resources(struct dma_chan *c) | ||
| 835 | { | ||
| 836 | struct mtk_hsdma_device *hsdma = to_hsdma_dev(c); | ||
| 837 | |||
| 838 | /* Free all descriptors in all lists on the VC */ | ||
| 839 | mtk_hsdma_terminate_all(c); | ||
| 840 | |||
| 841 | /* The resource for PC is not freed until all the VCs are destroyed */ | ||
| 842 | if (!refcount_dec_and_test(&hsdma->pc_refcnt)) | ||
| 843 | return; | ||
| 844 | |||
| 845 | mtk_hsdma_free_pchan(hsdma, hsdma->pc); | ||
| 846 | } | ||
| 847 | |||
| 848 | static int mtk_hsdma_hw_init(struct mtk_hsdma_device *hsdma) | ||
| 849 | { | ||
| 850 | int err; | ||
| 851 | |||
| 852 | pm_runtime_enable(hsdma2dev(hsdma)); | ||
| 853 | pm_runtime_get_sync(hsdma2dev(hsdma)); | ||
| 854 | |||
| 855 | err = clk_prepare_enable(hsdma->clk); | ||
| 856 | if (err) | ||
| 857 | return err; | ||
| 858 | |||
| 859 | mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); | ||
| 860 | mtk_dma_write(hsdma, MTK_HSDMA_GLO, MTK_HSDMA_GLO_DEFAULT); | ||
| 861 | |||
| 862 | return 0; | ||
| 863 | } | ||
| 864 | |||
| 865 | static int mtk_hsdma_hw_deinit(struct mtk_hsdma_device *hsdma) | ||
| 866 | { | ||
| 867 | mtk_dma_write(hsdma, MTK_HSDMA_GLO, 0); | ||
| 868 | |||
| 869 | clk_disable_unprepare(hsdma->clk); | ||
| 870 | |||
| 871 | pm_runtime_put_sync(hsdma2dev(hsdma)); | ||
| 872 | pm_runtime_disable(hsdma2dev(hsdma)); | ||
| 873 | |||
| 874 | return 0; | ||
| 875 | } | ||
| 876 | |||
| 877 | static const struct mtk_hsdma_soc mt7623_soc = { | ||
| 878 | .ddone = BIT(31), | ||
| 879 | .ls0 = BIT(30), | ||
| 880 | }; | ||
| 881 | |||
| 882 | static const struct mtk_hsdma_soc mt7622_soc = { | ||
| 883 | .ddone = BIT(15), | ||
| 884 | .ls0 = BIT(14), | ||
| 885 | }; | ||
| 886 | |||
| 887 | static const struct of_device_id mtk_hsdma_match[] = { | ||
| 888 | { .compatible = "mediatek,mt7623-hsdma", .data = &mt7623_soc}, | ||
| 889 | { .compatible = "mediatek,mt7622-hsdma", .data = &mt7622_soc}, | ||
| 890 | { /* sentinel */ } | ||
| 891 | }; | ||
| 892 | MODULE_DEVICE_TABLE(of, mtk_hsdma_match); | ||
| 893 | |||
| 894 | static int mtk_hsdma_probe(struct platform_device *pdev) | ||
| 895 | { | ||
| 896 | struct mtk_hsdma_device *hsdma; | ||
| 897 | struct mtk_hsdma_vchan *vc; | ||
| 898 | struct dma_device *dd; | ||
| 899 | struct resource *res; | ||
| 900 | int i, err; | ||
| 901 | |||
| 902 | hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); | ||
| 903 | if (!hsdma) | ||
| 904 | return -ENOMEM; | ||
| 905 | |||
| 906 | dd = &hsdma->ddev; | ||
| 907 | |||
| 908 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 909 | hsdma->base = devm_ioremap_resource(&pdev->dev, res); | ||
| 910 | if (IS_ERR(hsdma->base)) | ||
| 911 | return PTR_ERR(hsdma->base); | ||
| 912 | |||
| 913 | hsdma->soc = of_device_get_match_data(&pdev->dev); | ||
| 914 | if (!hsdma->soc) { | ||
| 915 | dev_err(&pdev->dev, "No device match found\n"); | ||
| 916 | return -ENODEV; | ||
| 917 | } | ||
| 918 | |||
| 919 | hsdma->clk = devm_clk_get(&pdev->dev, "hsdma"); | ||
| 920 | if (IS_ERR(hsdma->clk)) { | ||
| 921 | dev_err(&pdev->dev, "No clock for %s\n", | ||
| 922 | dev_name(&pdev->dev)); | ||
| 923 | return PTR_ERR(hsdma->clk); | ||
| 924 | } | ||
| 925 | |||
| 926 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
| 927 | if (!res) { | ||
| 928 | dev_err(&pdev->dev, "No irq resource for %s\n", | ||
| 929 | dev_name(&pdev->dev)); | ||
| 930 | return -EINVAL; | ||
| 931 | } | ||
| 932 | hsdma->irq = res->start; | ||
| 933 | |||
| 934 | refcount_set(&hsdma->pc_refcnt, 0); | ||
| 935 | spin_lock_init(&hsdma->lock); | ||
| 936 | |||
| 937 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | ||
| 938 | |||
| 939 | dd->copy_align = MTK_HSDMA_ALIGN_SIZE; | ||
| 940 | dd->device_alloc_chan_resources = mtk_hsdma_alloc_chan_resources; | ||
| 941 | dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; | ||
| 942 | dd->device_tx_status = mtk_hsdma_tx_status; | ||
| 943 | dd->device_issue_pending = mtk_hsdma_issue_pending; | ||
| 944 | dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; | ||
| 945 | dd->device_terminate_all = mtk_hsdma_terminate_all; | ||
| 946 | dd->src_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; | ||
| 947 | dd->dst_addr_widths = MTK_HSDMA_DMA_BUSWIDTHS; | ||
| 948 | dd->directions = BIT(DMA_MEM_TO_MEM); | ||
| 949 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | ||
| 950 | dd->dev = &pdev->dev; | ||
| 951 | INIT_LIST_HEAD(&dd->channels); | ||
| 952 | |||
| 953 | hsdma->dma_requests = MTK_HSDMA_NR_VCHANS; | ||
| 954 | if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, | ||
| 955 | "dma-requests", | ||
| 956 | &hsdma->dma_requests)) { | ||
| 957 | dev_info(&pdev->dev, | ||
| 958 | "Using %u as missing dma-requests property\n", | ||
| 959 | MTK_HSDMA_NR_VCHANS); | ||
| 960 | } | ||
| 961 | |||
| 962 | hsdma->pc = devm_kcalloc(&pdev->dev, MTK_HSDMA_NR_MAX_PCHANS, | ||
| 963 | sizeof(*hsdma->pc), GFP_KERNEL); | ||
| 964 | if (!hsdma->pc) | ||
| 965 | return -ENOMEM; | ||
| 966 | |||
| 967 | hsdma->vc = devm_kcalloc(&pdev->dev, hsdma->dma_requests, | ||
| 968 | sizeof(*hsdma->vc), GFP_KERNEL); | ||
| 969 | if (!hsdma->vc) | ||
| 970 | return -ENOMEM; | ||
| 971 | |||
| 972 | for (i = 0; i < hsdma->dma_requests; i++) { | ||
| 973 | vc = &hsdma->vc[i]; | ||
| 974 | vc->vc.desc_free = mtk_hsdma_vdesc_free; | ||
| 975 | vchan_init(&vc->vc, dd); | ||
| 976 | init_completion(&vc->issue_completion); | ||
| 977 | INIT_LIST_HEAD(&vc->desc_hw_processing); | ||
| 978 | } | ||
| 979 | |||
| 980 | err = dma_async_device_register(dd); | ||
| 981 | if (err) | ||
| 982 | return err; | ||
| 983 | |||
| 984 | err = of_dma_controller_register(pdev->dev.of_node, | ||
| 985 | of_dma_xlate_by_chan_id, hsdma); | ||
| 986 | if (err) { | ||
| 987 | dev_err(&pdev->dev, | ||
| 988 | "MediaTek HSDMA OF registration failed %d\n", err); | ||
| 989 | goto err_unregister; | ||
| 990 | } | ||
| 991 | |||
| 992 | mtk_hsdma_hw_init(hsdma); | ||
| 993 | |||
| 994 | err = devm_request_irq(&pdev->dev, hsdma->irq, | ||
| 995 | mtk_hsdma_irq, 0, | ||
| 996 | dev_name(&pdev->dev), hsdma); | ||
| 997 | if (err) { | ||
| 998 | dev_err(&pdev->dev, | ||
| 999 | "request_irq failed with err %d\n", err); | ||
| 1000 | goto err_unregister; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | platform_set_drvdata(pdev, hsdma); | ||
| 1004 | |||
| 1005 | dev_info(&pdev->dev, "MediaTek HSDMA driver registered\n"); | ||
| 1006 | |||
| 1007 | return 0; | ||
| 1008 | |||
| 1009 | err_unregister: | ||
| 1010 | dma_async_device_unregister(dd); | ||
| 1011 | |||
| 1012 | return err; | ||
| 1013 | } | ||
| 1014 | |||
| 1015 | static int mtk_hsdma_remove(struct platform_device *pdev) | ||
| 1016 | { | ||
| 1017 | struct mtk_hsdma_device *hsdma = platform_get_drvdata(pdev); | ||
| 1018 | struct mtk_hsdma_vchan *vc; | ||
| 1019 | int i; | ||
| 1020 | |||
| 1021 | /* Kill VC task */ | ||
| 1022 | for (i = 0; i < hsdma->dma_requests; i++) { | ||
| 1023 | vc = &hsdma->vc[i]; | ||
| 1024 | |||
| 1025 | list_del(&vc->vc.chan.device_node); | ||
| 1026 | tasklet_kill(&vc->vc.task); | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | /* Disable DMA interrupt */ | ||
| 1030 | mtk_dma_write(hsdma, MTK_HSDMA_INT_ENABLE, 0); | ||
| 1031 | |||
| 1032 | /* Waits for any pending IRQ handlers to complete */ | ||
| 1033 | synchronize_irq(hsdma->irq); | ||
| 1034 | |||
| 1035 | /* Disable hardware */ | ||
| 1036 | mtk_hsdma_hw_deinit(hsdma); | ||
| 1037 | |||
| 1038 | dma_async_device_unregister(&hsdma->ddev); | ||
| 1039 | of_dma_controller_free(pdev->dev.of_node); | ||
| 1040 | |||
| 1041 | return 0; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | static struct platform_driver mtk_hsdma_driver = { | ||
| 1045 | .probe = mtk_hsdma_probe, | ||
| 1046 | .remove = mtk_hsdma_remove, | ||
| 1047 | .driver = { | ||
| 1048 | .name = KBUILD_MODNAME, | ||
| 1049 | .of_match_table = mtk_hsdma_match, | ||
| 1050 | }, | ||
| 1051 | }; | ||
| 1052 | module_platform_driver(mtk_hsdma_driver); | ||
| 1053 | |||
| 1054 | MODULE_DESCRIPTION("MediaTek High-Speed DMA Controller Driver"); | ||
| 1055 | MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); | ||
| 1056 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index d7327fd5f445..de1fd59fe136 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data) | |||
| 1510 | /* Returns 1 if state was updated, 0 otherwise */ | 1510 | /* Returns 1 if state was updated, 0 otherwise */ |
| 1511 | static int pl330_update(struct pl330_dmac *pl330) | 1511 | static int pl330_update(struct pl330_dmac *pl330) |
| 1512 | { | 1512 | { |
| 1513 | struct dma_pl330_desc *descdone, *tmp; | 1513 | struct dma_pl330_desc *descdone; |
| 1514 | unsigned long flags; | 1514 | unsigned long flags; |
| 1515 | void __iomem *regs; | 1515 | void __iomem *regs; |
| 1516 | u32 val; | 1516 | u32 val; |
| @@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330) | |||
| 1588 | } | 1588 | } |
| 1589 | 1589 | ||
| 1590 | /* Now that we are in no hurry, do the callbacks */ | 1590 | /* Now that we are in no hurry, do the callbacks */ |
| 1591 | list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { | 1591 | while (!list_empty(&pl330->req_done)) { |
| 1592 | descdone = list_first_entry(&pl330->req_done, | ||
| 1593 | struct dma_pl330_desc, rqd); | ||
| 1592 | list_del(&descdone->rqd); | 1594 | list_del(&descdone->rqd); |
| 1593 | spin_unlock_irqrestore(&pl330->lock, flags); | 1595 | spin_unlock_irqrestore(&pl330->lock, flags); |
| 1594 | dma_pl330_rqcb(descdone, PL330_ERR_NONE); | 1596 | dma_pl330_rqcb(descdone, PL330_ERR_NONE); |
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index d076940e0c69..d29275b97e84 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
| @@ -393,6 +393,7 @@ struct bam_device { | |||
| 393 | struct device_dma_parameters dma_parms; | 393 | struct device_dma_parameters dma_parms; |
| 394 | struct bam_chan *channels; | 394 | struct bam_chan *channels; |
| 395 | u32 num_channels; | 395 | u32 num_channels; |
| 396 | u32 num_ees; | ||
| 396 | 397 | ||
| 397 | /* execution environment ID, from DT */ | 398 | /* execution environment ID, from DT */ |
| 398 | u32 ee; | 399 | u32 ee; |
| @@ -934,12 +935,15 @@ static void bam_apply_new_config(struct bam_chan *bchan, | |||
| 934 | struct bam_device *bdev = bchan->bdev; | 935 | struct bam_device *bdev = bchan->bdev; |
| 935 | u32 maxburst; | 936 | u32 maxburst; |
| 936 | 937 | ||
| 937 | if (dir == DMA_DEV_TO_MEM) | 938 | if (!bdev->controlled_remotely) { |
| 938 | maxburst = bchan->slave.src_maxburst; | 939 | if (dir == DMA_DEV_TO_MEM) |
| 939 | else | 940 | maxburst = bchan->slave.src_maxburst; |
| 940 | maxburst = bchan->slave.dst_maxburst; | 941 | else |
| 942 | maxburst = bchan->slave.dst_maxburst; | ||
| 941 | 943 | ||
| 942 | writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); | 944 | writel_relaxed(maxburst, |
| 945 | bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); | ||
| 946 | } | ||
| 943 | 947 | ||
| 944 | bchan->reconfigure = 0; | 948 | bchan->reconfigure = 0; |
| 945 | } | 949 | } |
| @@ -1128,15 +1132,19 @@ static int bam_init(struct bam_device *bdev) | |||
| 1128 | u32 val; | 1132 | u32 val; |
| 1129 | 1133 | ||
| 1130 | /* read revision and configuration information */ | 1134 | /* read revision and configuration information */ |
| 1131 | val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT; | 1135 | if (!bdev->num_ees) { |
| 1132 | val &= NUM_EES_MASK; | 1136 | val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); |
| 1137 | bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK; | ||
| 1138 | } | ||
| 1133 | 1139 | ||
| 1134 | /* check that configured EE is within range */ | 1140 | /* check that configured EE is within range */ |
| 1135 | if (bdev->ee >= val) | 1141 | if (bdev->ee >= bdev->num_ees) |
| 1136 | return -EINVAL; | 1142 | return -EINVAL; |
| 1137 | 1143 | ||
| 1138 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); | 1144 | if (!bdev->num_channels) { |
| 1139 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; | 1145 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); |
| 1146 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; | ||
| 1147 | } | ||
| 1140 | 1148 | ||
| 1141 | if (bdev->controlled_remotely) | 1149 | if (bdev->controlled_remotely) |
| 1142 | return 0; | 1150 | return 0; |
| @@ -1232,9 +1240,25 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1232 | bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, | 1240 | bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node, |
| 1233 | "qcom,controlled-remotely"); | 1241 | "qcom,controlled-remotely"); |
| 1234 | 1242 | ||
| 1243 | if (bdev->controlled_remotely) { | ||
| 1244 | ret = of_property_read_u32(pdev->dev.of_node, "num-channels", | ||
| 1245 | &bdev->num_channels); | ||
| 1246 | if (ret) | ||
| 1247 | dev_err(bdev->dev, "num-channels unspecified in dt\n"); | ||
| 1248 | |||
| 1249 | ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees", | ||
| 1250 | &bdev->num_ees); | ||
| 1251 | if (ret) | ||
| 1252 | dev_err(bdev->dev, "num-ees unspecified in dt\n"); | ||
| 1253 | } | ||
| 1254 | |||
| 1235 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); | 1255 | bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); |
| 1236 | if (IS_ERR(bdev->bamclk)) | 1256 | if (IS_ERR(bdev->bamclk)) { |
| 1237 | return PTR_ERR(bdev->bamclk); | 1257 | if (!bdev->controlled_remotely) |
| 1258 | return PTR_ERR(bdev->bamclk); | ||
| 1259 | |||
| 1260 | bdev->bamclk = NULL; | ||
| 1261 | } | ||
| 1238 | 1262 | ||
| 1239 | ret = clk_prepare_enable(bdev->bamclk); | 1263 | ret = clk_prepare_enable(bdev->bamclk); |
| 1240 | if (ret) { | 1264 | if (ret) { |
| @@ -1309,6 +1333,11 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1309 | if (ret) | 1333 | if (ret) |
| 1310 | goto err_unregister_dma; | 1334 | goto err_unregister_dma; |
| 1311 | 1335 | ||
| 1336 | if (bdev->controlled_remotely) { | ||
| 1337 | pm_runtime_disable(&pdev->dev); | ||
| 1338 | return 0; | ||
| 1339 | } | ||
| 1340 | |||
| 1312 | pm_runtime_irq_safe(&pdev->dev); | 1341 | pm_runtime_irq_safe(&pdev->dev); |
| 1313 | pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); | 1342 | pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); |
| 1314 | pm_runtime_use_autosuspend(&pdev->dev); | 1343 | pm_runtime_use_autosuspend(&pdev->dev); |
| @@ -1392,7 +1421,8 @@ static int __maybe_unused bam_dma_suspend(struct device *dev) | |||
| 1392 | { | 1421 | { |
| 1393 | struct bam_device *bdev = dev_get_drvdata(dev); | 1422 | struct bam_device *bdev = dev_get_drvdata(dev); |
| 1394 | 1423 | ||
| 1395 | pm_runtime_force_suspend(dev); | 1424 | if (!bdev->controlled_remotely) |
| 1425 | pm_runtime_force_suspend(dev); | ||
| 1396 | 1426 | ||
| 1397 | clk_unprepare(bdev->bamclk); | 1427 | clk_unprepare(bdev->bamclk); |
| 1398 | 1428 | ||
| @@ -1408,7 +1438,8 @@ static int __maybe_unused bam_dma_resume(struct device *dev) | |||
| 1408 | if (ret) | 1438 | if (ret) |
| 1409 | return ret; | 1439 | return ret; |
| 1410 | 1440 | ||
| 1411 | pm_runtime_force_resume(dev); | 1441 | if (!bdev->controlled_remotely) |
| 1442 | pm_runtime_force_resume(dev); | ||
| 1412 | 1443 | ||
| 1413 | return 0; | 1444 | return 0; |
| 1414 | } | 1445 | } |
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index d0cacdb0713e..2a2ccd9c78e4 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
| @@ -1301,8 +1301,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | |||
| 1301 | * If the cookie doesn't correspond to the currently running transfer | 1301 | * If the cookie doesn't correspond to the currently running transfer |
| 1302 | * then the descriptor hasn't been processed yet, and the residue is | 1302 | * then the descriptor hasn't been processed yet, and the residue is |
| 1303 | * equal to the full descriptor size. | 1303 | * equal to the full descriptor size. |
| 1304 | * Also, a client driver is possible to call this function before | ||
| 1305 | * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running" | ||
| 1306 | * will be the next descriptor, and the done list will appear. So, if | ||
| 1307 | * the argument cookie matches the done list's cookie, we can assume | ||
| 1308 | * the residue is zero. | ||
| 1304 | */ | 1309 | */ |
| 1305 | if (cookie != desc->async_tx.cookie) { | 1310 | if (cookie != desc->async_tx.cookie) { |
| 1311 | list_for_each_entry(desc, &chan->desc.done, node) { | ||
| 1312 | if (cookie == desc->async_tx.cookie) | ||
| 1313 | return 0; | ||
| 1314 | } | ||
| 1306 | list_for_each_entry(desc, &chan->desc.pending, node) { | 1315 | list_for_each_entry(desc, &chan->desc.pending, node) { |
| 1307 | if (cookie == desc->async_tx.cookie) | 1316 | if (cookie == desc->async_tx.cookie) |
| 1308 | return desc->size; | 1317 | return desc->size; |
| @@ -1677,8 +1686,8 @@ static const struct dev_pm_ops rcar_dmac_pm = { | |||
| 1677 | * - Wait for the current transfer to complete and stop the device, | 1686 | * - Wait for the current transfer to complete and stop the device, |
| 1678 | * - Resume transfers, if any. | 1687 | * - Resume transfers, if any. |
| 1679 | */ | 1688 | */ |
| 1680 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | 1689 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
| 1681 | pm_runtime_force_resume) | 1690 | pm_runtime_force_resume) |
| 1682 | SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, | 1691 | SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, |
| 1683 | NULL) | 1692 | NULL) |
| 1684 | }; | 1693 | }; |
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 786fc8fcc38e..8c5807362a25 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * Copyright (C) M'boumba Cedric Madianga 2015 | 6 | * Copyright (C) M'boumba Cedric Madianga 2015 |
| 7 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | 7 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> |
| 8 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> | ||
| 8 | * | 9 | * |
| 9 | * License terms: GNU General Public License (GPL), version 2 | 10 | * License terms: GNU General Public License (GPL), version 2 |
| 10 | */ | 11 | */ |
| @@ -33,9 +34,14 @@ | |||
| 33 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ | 34 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ |
| 34 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ | 35 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ |
| 35 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ | 36 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ |
| 37 | #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */ | ||
| 36 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ | 38 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ |
| 37 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ | 39 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ |
| 38 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ | 40 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ |
| 41 | #define STM32_DMA_MASKI (STM32_DMA_TCI \ | ||
| 42 | | STM32_DMA_TEI \ | ||
| 43 | | STM32_DMA_DMEI \ | ||
| 44 | | STM32_DMA_FEI) | ||
| 39 | 45 | ||
| 40 | /* DMA Stream x Configuration Register */ | 46 | /* DMA Stream x Configuration Register */ |
| 41 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ | 47 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ |
| @@ -60,7 +66,8 @@ | |||
| 60 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ | 66 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ |
| 61 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ | 67 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ |
| 62 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ | 68 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ |
| 63 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Cplete Int Enable*/ | 69 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable |
| 70 | */ | ||
| 64 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ | 71 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ |
| 65 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ | 72 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ |
| 66 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ | 73 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ |
| @@ -111,11 +118,24 @@ | |||
| 111 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 | 118 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 |
| 112 | 119 | ||
| 113 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff | 120 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff |
| 121 | /* | ||
| 122 | * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter | ||
| 123 | * gather at boundary. Thus it's safer to round down this value on FIFO | ||
| 124 | * size (16 Bytes) | ||
| 125 | */ | ||
| 126 | #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \ | ||
| 127 | ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16) | ||
| 114 | #define STM32_DMA_MAX_CHANNELS 0x08 | 128 | #define STM32_DMA_MAX_CHANNELS 0x08 |
| 115 | #define STM32_DMA_MAX_REQUEST_ID 0x08 | 129 | #define STM32_DMA_MAX_REQUEST_ID 0x08 |
| 116 | #define STM32_DMA_MAX_DATA_PARAM 0x03 | 130 | #define STM32_DMA_MAX_DATA_PARAM 0x03 |
| 131 | #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */ | ||
| 132 | #define STM32_DMA_MIN_BURST 4 | ||
| 117 | #define STM32_DMA_MAX_BURST 16 | 133 | #define STM32_DMA_MAX_BURST 16 |
| 118 | 134 | ||
| 135 | /* DMA Features */ | ||
| 136 | #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) | ||
| 137 | #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) | ||
| 138 | |||
| 119 | enum stm32_dma_width { | 139 | enum stm32_dma_width { |
| 120 | STM32_DMA_BYTE, | 140 | STM32_DMA_BYTE, |
| 121 | STM32_DMA_HALF_WORD, | 141 | STM32_DMA_HALF_WORD, |
| @@ -129,11 +149,18 @@ enum stm32_dma_burst_size { | |||
| 129 | STM32_DMA_BURST_INCR16, | 149 | STM32_DMA_BURST_INCR16, |
| 130 | }; | 150 | }; |
| 131 | 151 | ||
| 152 | /** | ||
| 153 | * struct stm32_dma_cfg - STM32 DMA custom configuration | ||
| 154 | * @channel_id: channel ID | ||
| 155 | * @request_line: DMA request | ||
| 156 | * @stream_config: 32bit mask specifying the DMA channel configuration | ||
| 157 | * @features: 32bit mask specifying the DMA Feature list | ||
| 158 | */ | ||
| 132 | struct stm32_dma_cfg { | 159 | struct stm32_dma_cfg { |
| 133 | u32 channel_id; | 160 | u32 channel_id; |
| 134 | u32 request_line; | 161 | u32 request_line; |
| 135 | u32 stream_config; | 162 | u32 stream_config; |
| 136 | u32 threshold; | 163 | u32 features; |
| 137 | }; | 164 | }; |
| 138 | 165 | ||
| 139 | struct stm32_dma_chan_reg { | 166 | struct stm32_dma_chan_reg { |
| @@ -171,6 +198,9 @@ struct stm32_dma_chan { | |||
| 171 | u32 next_sg; | 198 | u32 next_sg; |
| 172 | struct dma_slave_config dma_sconfig; | 199 | struct dma_slave_config dma_sconfig; |
| 173 | struct stm32_dma_chan_reg chan_reg; | 200 | struct stm32_dma_chan_reg chan_reg; |
| 201 | u32 threshold; | ||
| 202 | u32 mem_burst; | ||
| 203 | u32 mem_width; | ||
| 174 | }; | 204 | }; |
| 175 | 205 | ||
| 176 | struct stm32_dma_device { | 206 | struct stm32_dma_device { |
| @@ -235,6 +265,85 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan, | |||
| 235 | } | 265 | } |
| 236 | } | 266 | } |
| 237 | 267 | ||
| 268 | static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, | ||
| 269 | u32 threshold) | ||
| 270 | { | ||
| 271 | enum dma_slave_buswidth max_width; | ||
| 272 | |||
| 273 | if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) | ||
| 274 | max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 275 | else | ||
| 276 | max_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
| 277 | |||
| 278 | while ((buf_len < max_width || buf_len % max_width) && | ||
| 279 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
| 280 | max_width = max_width >> 1; | ||
| 281 | |||
| 282 | return max_width; | ||
| 283 | } | ||
| 284 | |||
| 285 | static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, | ||
| 286 | enum dma_slave_buswidth width) | ||
| 287 | { | ||
| 288 | u32 remaining; | ||
| 289 | |||
| 290 | if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
| 291 | if (burst != 0) { | ||
| 292 | /* | ||
| 293 | * If number of beats fit in several whole bursts | ||
| 294 | * this configuration is allowed. | ||
| 295 | */ | ||
| 296 | remaining = ((STM32_DMA_FIFO_SIZE / width) * | ||
| 297 | (threshold + 1) / 4) % burst; | ||
| 298 | |||
| 299 | if (remaining == 0) | ||
| 300 | return true; | ||
| 301 | } else { | ||
| 302 | return true; | ||
| 303 | } | ||
| 304 | } | ||
| 305 | |||
| 306 | return false; | ||
| 307 | } | ||
| 308 | |||
| 309 | static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) | ||
| 310 | { | ||
| 311 | switch (threshold) { | ||
| 312 | case STM32_DMA_FIFO_THRESHOLD_FULL: | ||
| 313 | if (buf_len >= STM32_DMA_MAX_BURST) | ||
| 314 | return true; | ||
| 315 | else | ||
| 316 | return false; | ||
| 317 | case STM32_DMA_FIFO_THRESHOLD_HALFFULL: | ||
| 318 | if (buf_len >= STM32_DMA_MAX_BURST / 2) | ||
| 319 | return true; | ||
| 320 | else | ||
| 321 | return false; | ||
| 322 | default: | ||
| 323 | return false; | ||
| 324 | } | ||
| 325 | } | ||
| 326 | |||
| 327 | static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, | ||
| 328 | enum dma_slave_buswidth width) | ||
| 329 | { | ||
| 330 | u32 best_burst = max_burst; | ||
| 331 | |||
| 332 | if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold)) | ||
| 333 | return 0; | ||
| 334 | |||
| 335 | while ((buf_len < best_burst * width && best_burst > 1) || | ||
| 336 | !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold, | ||
| 337 | width)) { | ||
| 338 | if (best_burst > STM32_DMA_MIN_BURST) | ||
| 339 | best_burst = best_burst >> 1; | ||
| 340 | else | ||
| 341 | best_burst = 0; | ||
| 342 | } | ||
| 343 | |||
| 344 | return best_burst; | ||
| 345 | } | ||
| 346 | |||
| 238 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) | 347 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) |
| 239 | { | 348 | { |
| 240 | switch (maxburst) { | 349 | switch (maxburst) { |
| @@ -254,12 +363,12 @@ static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) | |||
| 254 | } | 363 | } |
| 255 | 364 | ||
| 256 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, | 365 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, |
| 257 | u32 src_maxburst, u32 dst_maxburst) | 366 | u32 src_burst, u32 dst_burst) |
| 258 | { | 367 | { |
| 259 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; | 368 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; |
| 260 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; | 369 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; |
| 261 | 370 | ||
| 262 | if ((!src_maxburst) && (!dst_maxburst)) { | 371 | if (!src_burst && !dst_burst) { |
| 263 | /* Using direct mode */ | 372 | /* Using direct mode */ |
| 264 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; | 373 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; |
| 265 | } else { | 374 | } else { |
| @@ -300,7 +409,7 @@ static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) | |||
| 300 | 409 | ||
| 301 | flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | 410 | flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); |
| 302 | 411 | ||
| 303 | return flags; | 412 | return flags & STM32_DMA_MASKI; |
| 304 | } | 413 | } |
| 305 | 414 | ||
| 306 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) | 415 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) |
| @@ -315,6 +424,7 @@ static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) | |||
| 315 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | 424 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. |
| 316 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | 425 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. |
| 317 | */ | 426 | */ |
| 427 | flags &= STM32_DMA_MASKI; | ||
| 318 | dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | 428 | dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); |
| 319 | 429 | ||
| 320 | if (chan->id & 4) | 430 | if (chan->id & 4) |
| @@ -429,6 +539,8 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) | |||
| 429 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); | 539 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); |
| 430 | } | 540 | } |
| 431 | 541 | ||
| 542 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); | ||
| 543 | |||
| 432 | static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) | 544 | static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) |
| 433 | { | 545 | { |
| 434 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | 546 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); |
| @@ -471,6 +583,9 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) | |||
| 471 | if (status) | 583 | if (status) |
| 472 | stm32_dma_irq_clear(chan, status); | 584 | stm32_dma_irq_clear(chan, status); |
| 473 | 585 | ||
| 586 | if (chan->desc->cyclic) | ||
| 587 | stm32_dma_configure_next_sg(chan); | ||
| 588 | |||
| 474 | stm32_dma_dump_reg(chan); | 589 | stm32_dma_dump_reg(chan); |
| 475 | 590 | ||
| 476 | /* Start DMA */ | 591 | /* Start DMA */ |
| @@ -541,13 +656,29 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |||
| 541 | status = stm32_dma_irq_status(chan); | 656 | status = stm32_dma_irq_status(chan); |
| 542 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | 657 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); |
| 543 | 658 | ||
| 544 | if ((status & STM32_DMA_TCI) && (scr & STM32_DMA_SCR_TCIE)) { | 659 | if (status & STM32_DMA_TCI) { |
| 545 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); | 660 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
| 546 | stm32_dma_handle_chan_done(chan); | 661 | if (scr & STM32_DMA_SCR_TCIE) |
| 547 | 662 | stm32_dma_handle_chan_done(chan); | |
| 548 | } else { | 663 | status &= ~STM32_DMA_TCI; |
| 664 | } | ||
| 665 | if (status & STM32_DMA_HTI) { | ||
| 666 | stm32_dma_irq_clear(chan, STM32_DMA_HTI); | ||
| 667 | status &= ~STM32_DMA_HTI; | ||
| 668 | } | ||
| 669 | if (status & STM32_DMA_FEI) { | ||
| 670 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); | ||
| 671 | status &= ~STM32_DMA_FEI; | ||
| 672 | if (!(scr & STM32_DMA_SCR_EN)) | ||
| 673 | dev_err(chan2dev(chan), "FIFO Error\n"); | ||
| 674 | else | ||
| 675 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | ||
| 676 | } | ||
| 677 | if (status) { | ||
| 549 | stm32_dma_irq_clear(chan, status); | 678 | stm32_dma_irq_clear(chan, status); |
| 550 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); | 679 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); |
| 680 | if (!(scr & STM32_DMA_SCR_EN)) | ||
| 681 | dev_err(chan2dev(chan), "chan disabled by HW\n"); | ||
| 551 | } | 682 | } |
| 552 | 683 | ||
| 553 | spin_unlock(&chan->vchan.lock); | 684 | spin_unlock(&chan->vchan.lock); |
| @@ -564,45 +695,59 @@ static void stm32_dma_issue_pending(struct dma_chan *c) | |||
| 564 | if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { | 695 | if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { |
| 565 | dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); | 696 | dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); |
| 566 | stm32_dma_start_transfer(chan); | 697 | stm32_dma_start_transfer(chan); |
| 567 | if (chan->desc->cyclic) | 698 | |
| 568 | stm32_dma_configure_next_sg(chan); | ||
| 569 | } | 699 | } |
| 570 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | 700 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
| 571 | } | 701 | } |
| 572 | 702 | ||
| 573 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | 703 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, |
| 574 | enum dma_transfer_direction direction, | 704 | enum dma_transfer_direction direction, |
| 575 | enum dma_slave_buswidth *buswidth) | 705 | enum dma_slave_buswidth *buswidth, |
| 706 | u32 buf_len) | ||
| 576 | { | 707 | { |
| 577 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | 708 | enum dma_slave_buswidth src_addr_width, dst_addr_width; |
| 578 | int src_bus_width, dst_bus_width; | 709 | int src_bus_width, dst_bus_width; |
| 579 | int src_burst_size, dst_burst_size; | 710 | int src_burst_size, dst_burst_size; |
| 580 | u32 src_maxburst, dst_maxburst; | 711 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; |
| 581 | u32 dma_scr = 0; | 712 | u32 dma_scr, threshold; |
| 582 | 713 | ||
| 583 | src_addr_width = chan->dma_sconfig.src_addr_width; | 714 | src_addr_width = chan->dma_sconfig.src_addr_width; |
| 584 | dst_addr_width = chan->dma_sconfig.dst_addr_width; | 715 | dst_addr_width = chan->dma_sconfig.dst_addr_width; |
| 585 | src_maxburst = chan->dma_sconfig.src_maxburst; | 716 | src_maxburst = chan->dma_sconfig.src_maxburst; |
| 586 | dst_maxburst = chan->dma_sconfig.dst_maxburst; | 717 | dst_maxburst = chan->dma_sconfig.dst_maxburst; |
| 718 | threshold = chan->threshold; | ||
| 587 | 719 | ||
| 588 | switch (direction) { | 720 | switch (direction) { |
| 589 | case DMA_MEM_TO_DEV: | 721 | case DMA_MEM_TO_DEV: |
| 722 | /* Set device data size */ | ||
| 590 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); | 723 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
| 591 | if (dst_bus_width < 0) | 724 | if (dst_bus_width < 0) |
| 592 | return dst_bus_width; | 725 | return dst_bus_width; |
| 593 | 726 | ||
| 594 | dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst); | 727 | /* Set device burst size */ |
| 728 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | ||
| 729 | dst_maxburst, | ||
| 730 | threshold, | ||
| 731 | dst_addr_width); | ||
| 732 | |||
| 733 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | ||
| 595 | if (dst_burst_size < 0) | 734 | if (dst_burst_size < 0) |
| 596 | return dst_burst_size; | 735 | return dst_burst_size; |
| 597 | 736 | ||
| 598 | if (!src_addr_width) | 737 | /* Set memory data size */ |
| 599 | src_addr_width = dst_addr_width; | 738 | src_addr_width = stm32_dma_get_max_width(buf_len, threshold); |
| 600 | 739 | chan->mem_width = src_addr_width; | |
| 601 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); | 740 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
| 602 | if (src_bus_width < 0) | 741 | if (src_bus_width < 0) |
| 603 | return src_bus_width; | 742 | return src_bus_width; |
| 604 | 743 | ||
| 605 | src_burst_size = stm32_dma_get_burst(chan, src_maxburst); | 744 | /* Set memory burst size */ |
| 745 | src_maxburst = STM32_DMA_MAX_BURST; | ||
| 746 | src_best_burst = stm32_dma_get_best_burst(buf_len, | ||
| 747 | src_maxburst, | ||
| 748 | threshold, | ||
| 749 | src_addr_width); | ||
| 750 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | ||
| 606 | if (src_burst_size < 0) | 751 | if (src_burst_size < 0) |
| 607 | return src_burst_size; | 752 | return src_burst_size; |
| 608 | 753 | ||
| @@ -612,27 +757,46 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |||
| 612 | STM32_DMA_SCR_PBURST(dst_burst_size) | | 757 | STM32_DMA_SCR_PBURST(dst_burst_size) | |
| 613 | STM32_DMA_SCR_MBURST(src_burst_size); | 758 | STM32_DMA_SCR_MBURST(src_burst_size); |
| 614 | 759 | ||
| 760 | /* Set FIFO threshold */ | ||
| 761 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | ||
| 762 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); | ||
| 763 | |||
| 764 | /* Set peripheral address */ | ||
| 615 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; | 765 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; |
| 616 | *buswidth = dst_addr_width; | 766 | *buswidth = dst_addr_width; |
| 617 | break; | 767 | break; |
| 618 | 768 | ||
| 619 | case DMA_DEV_TO_MEM: | 769 | case DMA_DEV_TO_MEM: |
| 770 | /* Set device data size */ | ||
| 620 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); | 771 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
| 621 | if (src_bus_width < 0) | 772 | if (src_bus_width < 0) |
| 622 | return src_bus_width; | 773 | return src_bus_width; |
| 623 | 774 | ||
| 624 | src_burst_size = stm32_dma_get_burst(chan, src_maxburst); | 775 | /* Set device burst size */ |
| 776 | src_best_burst = stm32_dma_get_best_burst(buf_len, | ||
| 777 | src_maxburst, | ||
| 778 | threshold, | ||
| 779 | src_addr_width); | ||
| 780 | chan->mem_burst = src_best_burst; | ||
| 781 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | ||
| 625 | if (src_burst_size < 0) | 782 | if (src_burst_size < 0) |
| 626 | return src_burst_size; | 783 | return src_burst_size; |
| 627 | 784 | ||
| 628 | if (!dst_addr_width) | 785 | /* Set memory data size */ |
| 629 | dst_addr_width = src_addr_width; | 786 | dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); |
| 630 | 787 | chan->mem_width = dst_addr_width; | |
| 631 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); | 788 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
| 632 | if (dst_bus_width < 0) | 789 | if (dst_bus_width < 0) |
| 633 | return dst_bus_width; | 790 | return dst_bus_width; |
| 634 | 791 | ||
| 635 | dst_burst_size = stm32_dma_get_burst(chan, dst_maxburst); | 792 | /* Set memory burst size */ |
| 793 | dst_maxburst = STM32_DMA_MAX_BURST; | ||
| 794 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | ||
| 795 | dst_maxburst, | ||
| 796 | threshold, | ||
| 797 | dst_addr_width); | ||
| 798 | chan->mem_burst = dst_best_burst; | ||
| 799 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | ||
| 636 | if (dst_burst_size < 0) | 800 | if (dst_burst_size < 0) |
| 637 | return dst_burst_size; | 801 | return dst_burst_size; |
| 638 | 802 | ||
| @@ -642,6 +806,11 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |||
| 642 | STM32_DMA_SCR_PBURST(src_burst_size) | | 806 | STM32_DMA_SCR_PBURST(src_burst_size) | |
| 643 | STM32_DMA_SCR_MBURST(dst_burst_size); | 807 | STM32_DMA_SCR_MBURST(dst_burst_size); |
| 644 | 808 | ||
| 809 | /* Set FIFO threshold */ | ||
| 810 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | ||
| 811 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); | ||
| 812 | |||
| 813 | /* Set peripheral address */ | ||
| 645 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; | 814 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; |
| 646 | *buswidth = chan->dma_sconfig.src_addr_width; | 815 | *buswidth = chan->dma_sconfig.src_addr_width; |
| 647 | break; | 816 | break; |
| @@ -651,8 +820,9 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |||
| 651 | return -EINVAL; | 820 | return -EINVAL; |
| 652 | } | 821 | } |
| 653 | 822 | ||
| 654 | stm32_dma_set_fifo_config(chan, src_maxburst, dst_maxburst); | 823 | stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst); |
| 655 | 824 | ||
| 825 | /* Set DMA control register */ | ||
| 656 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | | 826 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | |
| 657 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | | 827 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | |
| 658 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); | 828 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); |
| @@ -692,10 +862,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( | |||
| 692 | if (!desc) | 862 | if (!desc) |
| 693 | return NULL; | 863 | return NULL; |
| 694 | 864 | ||
| 695 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth); | ||
| 696 | if (ret < 0) | ||
| 697 | goto err; | ||
| 698 | |||
| 699 | /* Set peripheral flow controller */ | 865 | /* Set peripheral flow controller */ |
| 700 | if (chan->dma_sconfig.device_fc) | 866 | if (chan->dma_sconfig.device_fc) |
| 701 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; | 867 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; |
| @@ -703,10 +869,15 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( | |||
| 703 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | 869 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; |
| 704 | 870 | ||
| 705 | for_each_sg(sgl, sg, sg_len, i) { | 871 | for_each_sg(sgl, sg, sg_len, i) { |
| 872 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, | ||
| 873 | sg_dma_len(sg)); | ||
| 874 | if (ret < 0) | ||
| 875 | goto err; | ||
| 876 | |||
| 706 | desc->sg_req[i].len = sg_dma_len(sg); | 877 | desc->sg_req[i].len = sg_dma_len(sg); |
| 707 | 878 | ||
| 708 | nb_data_items = desc->sg_req[i].len / buswidth; | 879 | nb_data_items = desc->sg_req[i].len / buswidth; |
| 709 | if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) { | 880 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
| 710 | dev_err(chan2dev(chan), "nb items not supported\n"); | 881 | dev_err(chan2dev(chan), "nb items not supported\n"); |
| 711 | goto err; | 882 | goto err; |
| 712 | } | 883 | } |
| @@ -767,12 +938,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( | |||
| 767 | return NULL; | 938 | return NULL; |
| 768 | } | 939 | } |
| 769 | 940 | ||
| 770 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth); | 941 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len); |
| 771 | if (ret < 0) | 942 | if (ret < 0) |
| 772 | return NULL; | 943 | return NULL; |
| 773 | 944 | ||
| 774 | nb_data_items = period_len / buswidth; | 945 | nb_data_items = period_len / buswidth; |
| 775 | if (nb_data_items > STM32_DMA_MAX_DATA_ITEMS) { | 946 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
| 776 | dev_err(chan2dev(chan), "number of items not supported\n"); | 947 | dev_err(chan2dev(chan), "number of items not supported\n"); |
| 777 | return NULL; | 948 | return NULL; |
| 778 | } | 949 | } |
| @@ -816,35 +987,45 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( | |||
| 816 | dma_addr_t src, size_t len, unsigned long flags) | 987 | dma_addr_t src, size_t len, unsigned long flags) |
| 817 | { | 988 | { |
| 818 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | 989 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); |
| 819 | u32 num_sgs; | 990 | enum dma_slave_buswidth max_width; |
| 820 | struct stm32_dma_desc *desc; | 991 | struct stm32_dma_desc *desc; |
| 821 | size_t xfer_count, offset; | 992 | size_t xfer_count, offset; |
| 993 | u32 num_sgs, best_burst, dma_burst, threshold; | ||
| 822 | int i; | 994 | int i; |
| 823 | 995 | ||
| 824 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_MAX_DATA_ITEMS); | 996 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
| 825 | desc = stm32_dma_alloc_desc(num_sgs); | 997 | desc = stm32_dma_alloc_desc(num_sgs); |
| 826 | if (!desc) | 998 | if (!desc) |
| 827 | return NULL; | 999 | return NULL; |
| 828 | 1000 | ||
| 1001 | threshold = chan->threshold; | ||
| 1002 | |||
| 829 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { | 1003 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { |
| 830 | xfer_count = min_t(size_t, len - offset, | 1004 | xfer_count = min_t(size_t, len - offset, |
| 831 | STM32_DMA_MAX_DATA_ITEMS); | 1005 | STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
| 832 | 1006 | ||
| 833 | desc->sg_req[i].len = xfer_count; | 1007 | /* Compute best burst size */ |
| 1008 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
| 1009 | best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, | ||
| 1010 | threshold, max_width); | ||
| 1011 | dma_burst = stm32_dma_get_burst(chan, best_burst); | ||
| 834 | 1012 | ||
| 835 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | 1013 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); |
| 836 | desc->sg_req[i].chan_reg.dma_scr = | 1014 | desc->sg_req[i].chan_reg.dma_scr = |
| 837 | STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | | 1015 | STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | |
| 1016 | STM32_DMA_SCR_PBURST(dma_burst) | | ||
| 1017 | STM32_DMA_SCR_MBURST(dma_burst) | | ||
| 838 | STM32_DMA_SCR_MINC | | 1018 | STM32_DMA_SCR_MINC | |
| 839 | STM32_DMA_SCR_PINC | | 1019 | STM32_DMA_SCR_PINC | |
| 840 | STM32_DMA_SCR_TCIE | | 1020 | STM32_DMA_SCR_TCIE | |
| 841 | STM32_DMA_SCR_TEIE; | 1021 | STM32_DMA_SCR_TEIE; |
| 842 | desc->sg_req[i].chan_reg.dma_sfcr = STM32_DMA_SFCR_DMDIS | | 1022 | desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; |
| 843 | STM32_DMA_SFCR_FTH(STM32_DMA_FIFO_THRESHOLD_FULL) | | 1023 | desc->sg_req[i].chan_reg.dma_sfcr |= |
| 844 | STM32_DMA_SFCR_FEIE; | 1024 | STM32_DMA_SFCR_FTH(threshold); |
| 845 | desc->sg_req[i].chan_reg.dma_spar = src + offset; | 1025 | desc->sg_req[i].chan_reg.dma_spar = src + offset; |
| 846 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; | 1026 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; |
| 847 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; | 1027 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; |
| 1028 | desc->sg_req[i].len = xfer_count; | ||
| 848 | } | 1029 | } |
| 849 | 1030 | ||
| 850 | desc->num_sgs = num_sgs; | 1031 | desc->num_sgs = num_sgs; |
| @@ -869,6 +1050,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, | |||
| 869 | struct stm32_dma_desc *desc, | 1050 | struct stm32_dma_desc *desc, |
| 870 | u32 next_sg) | 1051 | u32 next_sg) |
| 871 | { | 1052 | { |
| 1053 | u32 modulo, burst_size; | ||
| 872 | u32 residue = 0; | 1054 | u32 residue = 0; |
| 873 | int i; | 1055 | int i; |
| 874 | 1056 | ||
| @@ -876,8 +1058,10 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, | |||
| 876 | * In cyclic mode, for the last period, residue = remaining bytes from | 1058 | * In cyclic mode, for the last period, residue = remaining bytes from |
| 877 | * NDTR | 1059 | * NDTR |
| 878 | */ | 1060 | */ |
| 879 | if (chan->desc->cyclic && next_sg == 0) | 1061 | if (chan->desc->cyclic && next_sg == 0) { |
| 880 | return stm32_dma_get_remaining_bytes(chan); | 1062 | residue = stm32_dma_get_remaining_bytes(chan); |
| 1063 | goto end; | ||
| 1064 | } | ||
| 881 | 1065 | ||
| 882 | /* | 1066 | /* |
| 883 | * For all other periods in cyclic mode, and in sg mode, | 1067 | * For all other periods in cyclic mode, and in sg mode, |
| @@ -888,6 +1072,15 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, | |||
| 888 | residue += desc->sg_req[i].len; | 1072 | residue += desc->sg_req[i].len; |
| 889 | residue += stm32_dma_get_remaining_bytes(chan); | 1073 | residue += stm32_dma_get_remaining_bytes(chan); |
| 890 | 1074 | ||
| 1075 | end: | ||
| 1076 | if (!chan->mem_burst) | ||
| 1077 | return residue; | ||
| 1078 | |||
| 1079 | burst_size = chan->mem_burst * chan->mem_width; | ||
| 1080 | modulo = residue % burst_size; | ||
| 1081 | if (modulo) | ||
| 1082 | residue = residue - modulo + burst_size; | ||
| 1083 | |||
| 891 | return residue; | 1084 | return residue; |
| 892 | } | 1085 | } |
| 893 | 1086 | ||
| @@ -902,7 +1095,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, | |||
| 902 | u32 residue = 0; | 1095 | u32 residue = 0; |
| 903 | 1096 | ||
| 904 | status = dma_cookie_status(c, cookie, state); | 1097 | status = dma_cookie_status(c, cookie, state); |
| 905 | if ((status == DMA_COMPLETE) || (!state)) | 1098 | if (status == DMA_COMPLETE || !state) |
| 906 | return status; | 1099 | return status; |
| 907 | 1100 | ||
| 908 | spin_lock_irqsave(&chan->vchan.lock, flags); | 1101 | spin_lock_irqsave(&chan->vchan.lock, flags); |
| @@ -966,7 +1159,7 @@ static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) | |||
| 966 | } | 1159 | } |
| 967 | 1160 | ||
| 968 | static void stm32_dma_set_config(struct stm32_dma_chan *chan, | 1161 | static void stm32_dma_set_config(struct stm32_dma_chan *chan, |
| 969 | struct stm32_dma_cfg *cfg) | 1162 | struct stm32_dma_cfg *cfg) |
| 970 | { | 1163 | { |
| 971 | stm32_dma_clear_reg(&chan->chan_reg); | 1164 | stm32_dma_clear_reg(&chan->chan_reg); |
| 972 | 1165 | ||
| @@ -976,7 +1169,7 @@ static void stm32_dma_set_config(struct stm32_dma_chan *chan, | |||
| 976 | /* Enable Interrupts */ | 1169 | /* Enable Interrupts */ |
| 977 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; | 1170 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; |
| 978 | 1171 | ||
| 979 | chan->chan_reg.dma_sfcr = cfg->threshold & STM32_DMA_SFCR_FTH_MASK; | 1172 | chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); |
| 980 | } | 1173 | } |
| 981 | 1174 | ||
| 982 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | 1175 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, |
| @@ -996,10 +1189,10 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | |||
| 996 | cfg.channel_id = dma_spec->args[0]; | 1189 | cfg.channel_id = dma_spec->args[0]; |
| 997 | cfg.request_line = dma_spec->args[1]; | 1190 | cfg.request_line = dma_spec->args[1]; |
| 998 | cfg.stream_config = dma_spec->args[2]; | 1191 | cfg.stream_config = dma_spec->args[2]; |
| 999 | cfg.threshold = dma_spec->args[3]; | 1192 | cfg.features = dma_spec->args[3]; |
| 1000 | 1193 | ||
| 1001 | if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || | 1194 | if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS || |
| 1002 | (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) { | 1195 | cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) { |
| 1003 | dev_err(dev, "Bad channel and/or request id\n"); | 1196 | dev_err(dev, "Bad channel and/or request id\n"); |
| 1004 | return NULL; | 1197 | return NULL; |
| 1005 | } | 1198 | } |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index f838764993eb..861be5cab1df 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -470,7 +470,11 @@ typedef void (*dma_async_tx_callback_result)(void *dma_async_param, | |||
| 470 | const struct dmaengine_result *result); | 470 | const struct dmaengine_result *result); |
| 471 | 471 | ||
| 472 | struct dmaengine_unmap_data { | 472 | struct dmaengine_unmap_data { |
| 473 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) | ||
| 474 | u16 map_cnt; | ||
| 475 | #else | ||
| 473 | u8 map_cnt; | 476 | u8 map_cnt; |
| 477 | #endif | ||
| 474 | u8 to_cnt; | 478 | u8 to_cnt; |
| 475 | u8 from_cnt; | 479 | u8 from_cnt; |
| 476 | u8 bidi_cnt; | 480 | u8 bidi_cnt; |
