diff options
35 files changed, 3865 insertions, 320 deletions
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt index 1be6941ac1e5..f3d1f151ba80 100644 --- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt | |||
@@ -3,6 +3,8 @@ | |||
3 | Required Properties: | 3 | Required Properties: |
4 | -compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback. | 4 | -compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback. |
5 | Examples with soctypes are: | 5 | Examples with soctypes are: |
6 | - "renesas,r8a7743-usb-dmac" (RZ/G1M) | ||
7 | - "renesas,r8a7745-usb-dmac" (RZ/G1E) | ||
6 | - "renesas,r8a7790-usb-dmac" (R-Car H2) | 8 | - "renesas,r8a7790-usb-dmac" (R-Car H2) |
7 | - "renesas,r8a7791-usb-dmac" (R-Car M2-W) | 9 | - "renesas,r8a7791-usb-dmac" (R-Car M2-W) |
8 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) | 10 | - "renesas,r8a7793-usb-dmac" (R-Car M2-N) |
diff --git a/Documentation/devicetree/bindings/dma/sprd-dma.txt b/Documentation/devicetree/bindings/dma/sprd-dma.txt new file mode 100644 index 000000000000..7a10fea2e51b --- /dev/null +++ b/Documentation/devicetree/bindings/dma/sprd-dma.txt | |||
@@ -0,0 +1,41 @@ | |||
1 | * Spreadtrum DMA controller | ||
2 | |||
3 | This binding follows the generic DMA bindings defined in dma.txt. | ||
4 | |||
5 | Required properties: | ||
6 | - compatible: Should be "sprd,sc9860-dma". | ||
7 | - reg: Should contain DMA registers location and length. | ||
8 | - interrupts: Should contain one interrupt shared by all channel. | ||
9 | - #dma-cells: must be <1>. Used to represent the number of integer | ||
10 | cells in the dmas property of client device. | ||
11 | - #dma-channels : Number of DMA channels supported. Should be 32. | ||
12 | - clock-names: Should contain the clock of the DMA controller. | ||
13 | - clocks: Should contain a clock specifier for each entry in clock-names. | ||
14 | |||
15 | Example: | ||
16 | |||
17 | Controller: | ||
18 | apdma: dma-controller@20100000 { | ||
19 | compatible = "sprd,sc9860-dma"; | ||
20 | reg = <0x20100000 0x4000>; | ||
21 | interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; | ||
22 | #dma-cells = <1>; | ||
23 | #dma-channels = <32>; | ||
24 | clock-names = "enable"; | ||
25 | clocks = <&clk_ap_ahb_gates 5>; | ||
26 | }; | ||
27 | |||
28 | |||
29 | Client: | ||
30 | DMA clients connected to the Spreadtrum DMA controller must use the format | ||
31 | described in the dma.txt file, using a two-cell specifier for each channel. | ||
32 | The two cells in order are: | ||
33 | 1. A phandle pointing to the DMA controller. | ||
34 | 2. The channel id. | ||
35 | |||
36 | spi0: spi@70a00000{ | ||
37 | ... | ||
38 | dma-names = "rx_chn", "tx_chn"; | ||
39 | dmas = <&apdma 11>, <&apdma 12>; | ||
40 | ... | ||
41 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/stm32-dma.txt b/Documentation/devicetree/bindings/dma/stm32-dma.txt index 6f44df94101c..0b55718bf889 100644 --- a/Documentation/devicetree/bindings/dma/stm32-dma.txt +++ b/Documentation/devicetree/bindings/dma/stm32-dma.txt | |||
@@ -13,6 +13,7 @@ Required properties: | |||
13 | - #dma-cells : Must be <4>. See DMA client paragraph for more details. | 13 | - #dma-cells : Must be <4>. See DMA client paragraph for more details. |
14 | 14 | ||
15 | Optional properties: | 15 | Optional properties: |
16 | - dma-requests : Number of DMA requests supported. | ||
16 | - resets: Reference to a reset controller asserting the DMA controller | 17 | - resets: Reference to a reset controller asserting the DMA controller |
17 | - st,mem2mem: boolean; if defined, it indicates that the controller supports | 18 | - st,mem2mem: boolean; if defined, it indicates that the controller supports |
18 | memory-to-memory transfer | 19 | memory-to-memory transfer |
@@ -34,12 +35,13 @@ Example: | |||
34 | #dma-cells = <4>; | 35 | #dma-cells = <4>; |
35 | st,mem2mem; | 36 | st,mem2mem; |
36 | resets = <&rcc 150>; | 37 | resets = <&rcc 150>; |
38 | dma-requests = <8>; | ||
37 | }; | 39 | }; |
38 | 40 | ||
39 | * DMA client | 41 | * DMA client |
40 | 42 | ||
41 | DMA clients connected to the STM32 DMA controller must use the format | 43 | DMA clients connected to the STM32 DMA controller must use the format |
42 | described in the dma.txt file, using a five-cell specifier for each | 44 | described in the dma.txt file, using a four-cell specifier for each |
43 | channel: a phandle to the DMA controller plus the following four integer cells: | 45 | channel: a phandle to the DMA controller plus the following four integer cells: |
44 | 46 | ||
45 | 1. The channel id | 47 | 1. The channel id |
diff --git a/Documentation/devicetree/bindings/dma/stm32-dmamux.txt b/Documentation/devicetree/bindings/dma/stm32-dmamux.txt new file mode 100644 index 000000000000..1b893b235507 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/stm32-dmamux.txt | |||
@@ -0,0 +1,84 @@ | |||
1 | STM32 DMA MUX (DMA request router) | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: "st,stm32h7-dmamux" | ||
5 | - reg: Memory map for accessing module | ||
6 | - #dma-cells: Should be set to <3>. | ||
7 | First parameter is request line number. | ||
8 | Second is DMA channel configuration | ||
9 | Third is Fifo threshold | ||
10 | For more details about the three cells, please see | ||
11 | stm32-dma.txt documentation binding file | ||
12 | - dma-masters: Phandle pointing to the DMA controllers. | ||
13 | Several controllers are allowed. Only "st,stm32-dma" DMA | ||
14 | compatible are supported. | ||
15 | |||
16 | Optional properties: | ||
17 | - dma-channels : Number of DMA requests supported. | ||
18 | - dma-requests : Number of DMAMUX requests supported. | ||
19 | - resets: Reference to a reset controller asserting the DMA controller | ||
20 | - clocks: Input clock of the DMAMUX instance. | ||
21 | |||
22 | Example: | ||
23 | |||
24 | /* DMA controller 1 */ | ||
25 | dma1: dma-controller@40020000 { | ||
26 | compatible = "st,stm32-dma"; | ||
27 | reg = <0x40020000 0x400>; | ||
28 | interrupts = <11>, | ||
29 | <12>, | ||
30 | <13>, | ||
31 | <14>, | ||
32 | <15>, | ||
33 | <16>, | ||
34 | <17>, | ||
35 | <47>; | ||
36 | clocks = <&timer_clk>; | ||
37 | #dma-cells = <4>; | ||
38 | st,mem2mem; | ||
39 | resets = <&rcc 150>; | ||
40 | dma-channels = <8>; | ||
41 | dma-requests = <8>; | ||
42 | }; | ||
43 | |||
44 | /* DMA controller 1 */ | ||
45 | dma2: dma@40020400 { | ||
46 | compatible = "st,stm32-dma"; | ||
47 | reg = <0x40020400 0x400>; | ||
48 | interrupts = <56>, | ||
49 | <57>, | ||
50 | <58>, | ||
51 | <59>, | ||
52 | <60>, | ||
53 | <68>, | ||
54 | <69>, | ||
55 | <70>; | ||
56 | clocks = <&timer_clk>; | ||
57 | #dma-cells = <4>; | ||
58 | st,mem2mem; | ||
59 | resets = <&rcc 150>; | ||
60 | dma-channels = <8>; | ||
61 | dma-requests = <8>; | ||
62 | }; | ||
63 | |||
64 | /* DMA mux */ | ||
65 | dmamux1: dma-router@40020800 { | ||
66 | compatible = "st,stm32h7-dmamux"; | ||
67 | reg = <0x40020800 0x3c>; | ||
68 | #dma-cells = <3>; | ||
69 | dma-requests = <128>; | ||
70 | dma-channels = <16>; | ||
71 | dma-masters = <&dma1 &dma2>; | ||
72 | clocks = <&timer_clk>; | ||
73 | }; | ||
74 | |||
75 | /* DMA client */ | ||
76 | usart1: serial@40011000 { | ||
77 | compatible = "st,stm32-usart", "st,stm32-uart"; | ||
78 | reg = <0x40011000 0x400>; | ||
79 | interrupts = <37>; | ||
80 | clocks = <&timer_clk>; | ||
81 | dmas = <&dmamux1 41 0x414 0>, | ||
82 | <&dmamux1 42 0x414 0>; | ||
83 | dma-names = "rx", "tx"; | ||
84 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/stm32-mdma.txt b/Documentation/devicetree/bindings/dma/stm32-mdma.txt new file mode 100644 index 000000000000..d18772d6bc65 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/stm32-mdma.txt | |||
@@ -0,0 +1,94 @@ | |||
1 | * STMicroelectronics STM32 MDMA controller | ||
2 | |||
3 | The STM32 MDMA is a general-purpose direct memory access controller capable of | ||
4 | supporting 64 independent DMA channels with 256 HW requests. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible: Should be "st,stm32h7-mdma" | ||
8 | - reg: Should contain MDMA registers location and length. This should include | ||
9 | all of the per-channel registers. | ||
10 | - interrupts: Should contain the MDMA interrupt. | ||
11 | - clocks: Should contain the input clock of the DMA instance. | ||
12 | - resets: Reference to a reset controller asserting the DMA controller. | ||
13 | - #dma-cells : Must be <5>. See DMA client paragraph for more details. | ||
14 | |||
15 | Optional properties: | ||
16 | - dma-channels: Number of DMA channels supported by the controller. | ||
17 | - dma-requests: Number of DMA request signals supported by the controller. | ||
18 | - st,ahb-addr-masks: Array of u32 mask to list memory devices addressed via | ||
19 | AHB bus. | ||
20 | |||
21 | Example: | ||
22 | |||
23 | mdma1: dma@52000000 { | ||
24 | compatible = "st,stm32h7-mdma"; | ||
25 | reg = <0x52000000 0x1000>; | ||
26 | interrupts = <122>; | ||
27 | clocks = <&timer_clk>; | ||
28 | resets = <&rcc 992>; | ||
29 | #dma-cells = <5>; | ||
30 | dma-channels = <16>; | ||
31 | dma-requests = <32>; | ||
32 | st,ahb-addr-masks = <0x20000000>, <0x00000000>; | ||
33 | }; | ||
34 | |||
35 | * DMA client | ||
36 | |||
37 | DMA clients connected to the STM32 MDMA controller must use the format | ||
38 | described in the dma.txt file, using a five-cell specifier for each channel: | ||
39 | a phandle to the MDMA controller plus the following five integer cells: | ||
40 | |||
41 | 1. The request line number | ||
42 | 2. The priority level | ||
43 | 0x00: Low | ||
44 | 0x01: Medium | ||
45 | 0x10: High | ||
46 | 0x11: Very high | ||
47 | 3. A 32bit mask specifying the DMA channel configuration | ||
48 | -bit 0-1: Source increment mode | ||
49 | 0x00: Source address pointer is fixed | ||
50 | 0x10: Source address pointer is incremented after each data transfer | ||
51 | 0x11: Source address pointer is decremented after each data transfer | ||
52 | -bit 2-3: Destination increment mode | ||
53 | 0x00: Destination address pointer is fixed | ||
54 | 0x10: Destination address pointer is incremented after each data | ||
55 | transfer | ||
56 | 0x11: Destination address pointer is decremented after each data | ||
57 | transfer | ||
58 | -bit 8-9: Source increment offset size | ||
59 | 0x00: byte (8bit) | ||
60 | 0x01: half-word (16bit) | ||
61 | 0x10: word (32bit) | ||
62 | 0x11: double-word (64bit) | ||
63 | -bit 10-11: Destination increment offset size | ||
64 | 0x00: byte (8bit) | ||
65 | 0x01: half-word (16bit) | ||
66 | 0x10: word (32bit) | ||
67 | 0x11: double-word (64bit) | ||
68 | -bit 25-18: The number of bytes to be transferred in a single transfer | ||
69 | (min = 1 byte, max = 128 bytes) | ||
70 | -bit 29:28: Trigger Mode | ||
71 | 0x00: Each MDMA request triggers a buffer transfer (max 128 bytes) | ||
72 | 0x01: Each MDMA request triggers a block transfer (max 64K bytes) | ||
73 | 0x10: Each MDMA request triggers a repeated block transfer | ||
74 | 0x11: Each MDMA request triggers a linked list transfer | ||
75 | 4. A 32bit value specifying the register to be used to acknowledge the request | ||
76 | if no HW ack signal is used by the MDMA client | ||
77 | 5. A 32bit mask specifying the value to be written to acknowledge the request | ||
78 | if no HW ack signal is used by the MDMA client | ||
79 | |||
80 | Example: | ||
81 | |||
82 | i2c4: i2c@5c002000 { | ||
83 | compatible = "st,stm32f7-i2c"; | ||
84 | reg = <0x5c002000 0x400>; | ||
85 | interrupts = <95>, | ||
86 | <96>; | ||
87 | clocks = <&timer_clk>; | ||
88 | #address-cells = <1>; | ||
89 | #size-cells = <0>; | ||
90 | dmas = <&mdma1 36 0x0 0x40008 0x0 0x0>, | ||
91 | <&mdma1 37 0x0 0x40002 0x0 0x0>; | ||
92 | dma-names = "rx", "tx"; | ||
93 | status = "disabled"; | ||
94 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt index 98fbe1a5c6dd..9700b1d00fed 100644 --- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt | |||
@@ -27,6 +27,32 @@ Example: | |||
27 | #dma-cells = <1>; | 27 | #dma-cells = <1>; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | ------------------------------------------------------------------------------ | ||
31 | For A64 DMA controller: | ||
32 | |||
33 | Required properties: | ||
34 | - compatible: "allwinner,sun50i-a64-dma" | ||
35 | - dma-channels: Number of DMA channels supported by the controller. | ||
36 | Refer to Documentation/devicetree/bindings/dma/dma.txt | ||
37 | - all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells | ||
38 | |||
39 | Optional properties: | ||
40 | - dma-requests: Number of DMA request signals supported by the controller. | ||
41 | Refer to Documentation/devicetree/bindings/dma/dma.txt | ||
42 | |||
43 | Example: | ||
44 | dma: dma-controller@1c02000 { | ||
45 | compatible = "allwinner,sun50i-a64-dma"; | ||
46 | reg = <0x01c02000 0x1000>; | ||
47 | interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; | ||
48 | clocks = <&ccu CLK_BUS_DMA>; | ||
49 | dma-channels = <8>; | ||
50 | dma-requests = <27>; | ||
51 | resets = <&ccu RST_BUS_DMA>; | ||
52 | #dma-cells = <1>; | ||
53 | }; | ||
54 | ------------------------------------------------------------------------------ | ||
55 | |||
30 | Clients: | 56 | Clients: |
31 | 57 | ||
32 | DMA clients connected to the A31 DMA controller must use the format | 58 | DMA clients connected to the A31 DMA controller must use the format |
diff --git a/MAINTAINERS b/MAINTAINERS index 8604cf64a169..2a121c4c990d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -12947,7 +12947,7 @@ F: Documentation/devicetree/bindings/arc/axs10* | |||
12947 | 12947 | ||
12948 | SYNOPSYS DESIGNWARE DMAC DRIVER | 12948 | SYNOPSYS DESIGNWARE DMAC DRIVER |
12949 | M: Viresh Kumar <vireshk@kernel.org> | 12949 | M: Viresh Kumar <vireshk@kernel.org> |
12950 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 12950 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
12951 | S: Maintained | 12951 | S: Maintained |
12952 | F: include/linux/dma/dw.h | 12952 | F: include/linux/dma/dw.h |
12953 | F: include/linux/platform_data/dma-dw.h | 12953 | F: include/linux/platform_data/dma-dw.h |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fadc4d8783bd..27df3e2837fd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -115,7 +115,7 @@ config BCM_SBA_RAID | |||
115 | select DMA_ENGINE_RAID | 115 | select DMA_ENGINE_RAID |
116 | select ASYNC_TX_DISABLE_XOR_VAL_DMA | 116 | select ASYNC_TX_DISABLE_XOR_VAL_DMA |
117 | select ASYNC_TX_DISABLE_PQ_VAL_DMA | 117 | select ASYNC_TX_DISABLE_PQ_VAL_DMA |
118 | default ARCH_BCM_IPROC | 118 | default m if ARCH_BCM_IPROC |
119 | help | 119 | help |
120 | Enable support for Broadcom SBA RAID Engine. The SBA RAID | 120 | Enable support for Broadcom SBA RAID Engine. The SBA RAID |
121 | engine is available on most of the Broadcom iProc SoCs. It | 121 | engine is available on most of the Broadcom iProc SoCs. It |
@@ -483,6 +483,35 @@ config STM32_DMA | |||
483 | If you have a board based on such a MCU and wish to use DMA say Y | 483 | If you have a board based on such a MCU and wish to use DMA say Y |
484 | here. | 484 | here. |
485 | 485 | ||
486 | config STM32_DMAMUX | ||
487 | bool "STMicroelectronics STM32 dma multiplexer support" | ||
488 | depends on STM32_DMA || COMPILE_TEST | ||
489 | help | ||
490 | Enable support for the on-chip DMA multiplexer on STMicroelectronics | ||
491 | STM32 MCUs. | ||
492 | If you have a board based on such a MCU and wish to use DMAMUX say Y | ||
493 | here. | ||
494 | |||
495 | config STM32_MDMA | ||
496 | bool "STMicroelectronics STM32 master dma support" | ||
497 | depends on ARCH_STM32 || COMPILE_TEST | ||
498 | depends on OF | ||
499 | select DMA_ENGINE | ||
500 | select DMA_VIRTUAL_CHANNELS | ||
501 | help | ||
502 | Enable support for the on-chip MDMA controller on STMicroelectronics | ||
503 | STM32 platforms. | ||
504 | If you have a board based on STM32 SoC and wish to use the master DMA | ||
505 | say Y here. | ||
506 | |||
507 | config SPRD_DMA | ||
508 | tristate "Spreadtrum DMA support" | ||
509 | depends on ARCH_SPRD || COMPILE_TEST | ||
510 | select DMA_ENGINE | ||
511 | select DMA_VIRTUAL_CHANNELS | ||
512 | help | ||
513 | Enable support for the on-chip DMA controller on Spreadtrum platform. | ||
514 | |||
486 | config S3C24XX_DMAC | 515 | config S3C24XX_DMAC |
487 | bool "Samsung S3C24XX DMA support" | 516 | bool "Samsung S3C24XX DMA support" |
488 | depends on ARCH_S3C24XX || COMPILE_TEST | 517 | depends on ARCH_S3C24XX || COMPILE_TEST |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 9d0156b50294..b9dca8a0e142 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -60,6 +60,9 @@ obj-$(CONFIG_RENESAS_DMA) += sh/ | |||
60 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 60 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
61 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 61 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
62 | obj-$(CONFIG_STM32_DMA) += stm32-dma.o | 62 | obj-$(CONFIG_STM32_DMA) += stm32-dma.o |
63 | obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o | ||
64 | obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o | ||
65 | obj-$(CONFIG_SPRD_DMA) += sprd-dma.o | ||
63 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o | 66 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o |
64 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 67 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
65 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 68 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 7f58f06157f6..ef3f227ce3e6 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -385,7 +385,7 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} | |||
385 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) | 385 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) |
386 | { | 386 | { |
387 | dev_crit(chan2dev(&atchan->chan_common), | 387 | dev_crit(chan2dev(&atchan->chan_common), |
388 | " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", | 388 | "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n", |
389 | &lli->saddr, &lli->daddr, | 389 | &lli->saddr, &lli->daddr, |
390 | lli->ctrla, lli->ctrlb, &lli->dscr); | 390 | lli->ctrla, lli->ctrlb, &lli->dscr); |
391 | } | 391 | } |
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 6c2c44724637..3956a018bf5a 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -1,9 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2017 Broadcom | 2 | * Copyright (C) 2017 Broadcom |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * modify it under the terms of the GNU General Public License as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation version 2. |
7 | * | ||
8 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
9 | * kind, whether express or implied; without even the implied warranty | ||
10 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
7 | */ | 12 | */ |
8 | 13 | ||
9 | /* | 14 | /* |
@@ -25,11 +30,8 @@ | |||
25 | * | 30 | * |
26 | * The Broadcom SBA RAID driver does not require any register programming | 31 | * The Broadcom SBA RAID driver does not require any register programming |
27 | * except submitting request to SBA hardware device via mailbox channels. | 32 | * except submitting request to SBA hardware device via mailbox channels. |
28 | * This driver implements a DMA device with one DMA channel using a set | 33 | * This driver implements a DMA device with one DMA channel using a single |
29 | * of mailbox channels provided by Broadcom SoC specific ring manager | 34 | * mailbox channel provided by Broadcom SoC specific ring manager driver. |
30 | * driver. To exploit parallelism (as described above), all DMA request | ||
31 | * coming to SBA RAID DMA channel are broken down to smaller requests | ||
32 | * and submitted to multiple mailbox channels in round-robin fashion. | ||
33 | * For having more SBA DMA channels, we can create more SBA device nodes | 35 | * For having more SBA DMA channels, we can create more SBA device nodes |
34 | * in Broadcom SoC specific DTS based on number of hardware rings supported | 36 | * in Broadcom SoC specific DTS based on number of hardware rings supported |
35 | * by Broadcom SoC ring manager. | 37 | * by Broadcom SoC ring manager. |
@@ -85,6 +87,7 @@ | |||
85 | #define SBA_CMD_GALOIS 0xe | 87 | #define SBA_CMD_GALOIS 0xe |
86 | 88 | ||
87 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 | 89 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 |
90 | #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 | ||
88 | 91 | ||
89 | /* Driver helper macros */ | 92 | /* Driver helper macros */ |
90 | #define to_sba_request(tx) \ | 93 | #define to_sba_request(tx) \ |
@@ -142,9 +145,7 @@ struct sba_device { | |||
142 | u32 max_cmds_pool_size; | 145 | u32 max_cmds_pool_size; |
143 | /* Maibox client and Mailbox channels */ | 146 | /* Maibox client and Mailbox channels */ |
144 | struct mbox_client client; | 147 | struct mbox_client client; |
145 | int mchans_count; | 148 | struct mbox_chan *mchan; |
146 | atomic_t mchans_current; | ||
147 | struct mbox_chan **mchans; | ||
148 | struct device *mbox_dev; | 149 | struct device *mbox_dev; |
149 | /* DMA device and DMA channel */ | 150 | /* DMA device and DMA channel */ |
150 | struct dma_device dma_dev; | 151 | struct dma_device dma_dev; |
@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) | |||
200 | 201 | ||
201 | /* ====== General helper routines ===== */ | 202 | /* ====== General helper routines ===== */ |
202 | 203 | ||
203 | static void sba_peek_mchans(struct sba_device *sba) | ||
204 | { | ||
205 | int mchan_idx; | ||
206 | |||
207 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) | ||
208 | mbox_client_peek_data(sba->mchans[mchan_idx]); | ||
209 | } | ||
210 | |||
211 | static struct sba_request *sba_alloc_request(struct sba_device *sba) | 204 | static struct sba_request *sba_alloc_request(struct sba_device *sba) |
212 | { | 205 | { |
213 | bool found = false; | 206 | bool found = false; |
@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) | |||
231 | * would have completed which will create more | 224 | * would have completed which will create more |
232 | * room for new requests. | 225 | * room for new requests. |
233 | */ | 226 | */ |
234 | sba_peek_mchans(sba); | 227 | mbox_client_peek_data(sba->mchan); |
235 | return NULL; | 228 | return NULL; |
236 | } | 229 | } |
237 | 230 | ||
@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) | |||
369 | static int sba_send_mbox_request(struct sba_device *sba, | 362 | static int sba_send_mbox_request(struct sba_device *sba, |
370 | struct sba_request *req) | 363 | struct sba_request *req) |
371 | { | 364 | { |
372 | int mchans_idx, ret = 0; | 365 | int ret = 0; |
373 | |||
374 | /* Select mailbox channel in round-robin fashion */ | ||
375 | mchans_idx = atomic_inc_return(&sba->mchans_current); | ||
376 | mchans_idx = mchans_idx % sba->mchans_count; | ||
377 | 366 | ||
378 | /* Send message for the request */ | 367 | /* Send message for the request */ |
379 | req->msg.error = 0; | 368 | req->msg.error = 0; |
380 | ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); | 369 | ret = mbox_send_message(sba->mchan, &req->msg); |
381 | if (ret < 0) { | 370 | if (ret < 0) { |
382 | dev_err(sba->dev, "send message failed with error %d", ret); | 371 | dev_err(sba->dev, "send message failed with error %d", ret); |
383 | return ret; | 372 | return ret; |
@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba, | |||
390 | } | 379 | } |
391 | 380 | ||
392 | /* Signal txdone for mailbox channel */ | 381 | /* Signal txdone for mailbox channel */ |
393 | mbox_client_txdone(sba->mchans[mchans_idx], ret); | 382 | mbox_client_txdone(sba->mchan, ret); |
394 | 383 | ||
395 | return ret; | 384 | return ret; |
396 | } | 385 | } |
@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba) | |||
402 | u32 count; | 391 | u32 count; |
403 | struct sba_request *req; | 392 | struct sba_request *req; |
404 | 393 | ||
405 | /* | 394 | /* Process few pending requests */ |
406 | * Process few pending requests | 395 | count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; |
407 | * | ||
408 | * For now, we process (<number_of_mailbox_channels> * 8) | ||
409 | * number of requests at a time. | ||
410 | */ | ||
411 | count = sba->mchans_count * 8; | ||
412 | while (!list_empty(&sba->reqs_pending_list) && count) { | 396 | while (!list_empty(&sba->reqs_pending_list) && count) { |
413 | /* Get the first pending request */ | 397 | /* Get the first pending request */ |
414 | req = list_first_entry(&sba->reqs_pending_list, | 398 | req = list_first_entry(&sba->reqs_pending_list, |
@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba, | |||
442 | 426 | ||
443 | WARN_ON(tx->cookie < 0); | 427 | WARN_ON(tx->cookie < 0); |
444 | if (tx->cookie > 0) { | 428 | if (tx->cookie > 0) { |
429 | spin_lock_irqsave(&sba->reqs_lock, flags); | ||
445 | dma_cookie_complete(tx); | 430 | dma_cookie_complete(tx); |
431 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | ||
446 | dmaengine_desc_get_callback_invoke(tx, NULL); | 432 | dmaengine_desc_get_callback_invoke(tx, NULL); |
447 | dma_descriptor_unmap(tx); | 433 | dma_descriptor_unmap(tx); |
448 | tx->callback = NULL; | 434 | tx->callback = NULL; |
@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, | |||
570 | if (ret == DMA_COMPLETE) | 556 | if (ret == DMA_COMPLETE) |
571 | return ret; | 557 | return ret; |
572 | 558 | ||
573 | sba_peek_mchans(sba); | 559 | mbox_client_peek_data(sba->mchan); |
574 | 560 | ||
575 | return dma_cookie_status(dchan, cookie, txstate); | 561 | return dma_cookie_status(dchan, cookie, txstate); |
576 | } | 562 | } |
@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba) | |||
1637 | 1623 | ||
1638 | static int sba_probe(struct platform_device *pdev) | 1624 | static int sba_probe(struct platform_device *pdev) |
1639 | { | 1625 | { |
1640 | int i, ret = 0, mchans_count; | 1626 | int ret = 0; |
1641 | struct sba_device *sba; | 1627 | struct sba_device *sba; |
1642 | struct platform_device *mbox_pdev; | 1628 | struct platform_device *mbox_pdev; |
1643 | struct of_phandle_args args; | 1629 | struct of_phandle_args args; |
@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev) | |||
1650 | sba->dev = &pdev->dev; | 1636 | sba->dev = &pdev->dev; |
1651 | platform_set_drvdata(pdev, sba); | 1637 | platform_set_drvdata(pdev, sba); |
1652 | 1638 | ||
1653 | /* Number of channels equals number of mailbox channels */ | 1639 | /* Number of mailbox channels should be atleast 1 */ |
1654 | ret = of_count_phandle_with_args(pdev->dev.of_node, | 1640 | ret = of_count_phandle_with_args(pdev->dev.of_node, |
1655 | "mboxes", "#mbox-cells"); | 1641 | "mboxes", "#mbox-cells"); |
1656 | if (ret <= 0) | 1642 | if (ret <= 0) |
1657 | return -ENODEV; | 1643 | return -ENODEV; |
1658 | mchans_count = ret; | ||
1659 | 1644 | ||
1660 | /* Determine SBA version from DT compatible string */ | 1645 | /* Determine SBA version from DT compatible string */ |
1661 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) | 1646 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) |
@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev) | |||
1688 | default: | 1673 | default: |
1689 | return -EINVAL; | 1674 | return -EINVAL; |
1690 | } | 1675 | } |
1691 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; | 1676 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; |
1692 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; | 1677 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; |
1693 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; | 1678 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; |
1694 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; | 1679 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; |
@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev) | |||
1702 | sba->client.knows_txdone = true; | 1687 | sba->client.knows_txdone = true; |
1703 | sba->client.tx_tout = 0; | 1688 | sba->client.tx_tout = 0; |
1704 | 1689 | ||
1705 | /* Allocate mailbox channel array */ | 1690 | /* Request mailbox channel */ |
1706 | sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, | 1691 | sba->mchan = mbox_request_channel(&sba->client, 0); |
1707 | sizeof(*sba->mchans), GFP_KERNEL); | 1692 | if (IS_ERR(sba->mchan)) { |
1708 | if (!sba->mchans) | 1693 | ret = PTR_ERR(sba->mchan); |
1709 | return -ENOMEM; | 1694 | goto fail_free_mchan; |
1710 | |||
1711 | /* Request mailbox channels */ | ||
1712 | sba->mchans_count = 0; | ||
1713 | for (i = 0; i < mchans_count; i++) { | ||
1714 | sba->mchans[i] = mbox_request_channel(&sba->client, i); | ||
1715 | if (IS_ERR(sba->mchans[i])) { | ||
1716 | ret = PTR_ERR(sba->mchans[i]); | ||
1717 | goto fail_free_mchans; | ||
1718 | } | ||
1719 | sba->mchans_count++; | ||
1720 | } | 1695 | } |
1721 | atomic_set(&sba->mchans_current, 0); | ||
1722 | 1696 | ||
1723 | /* Find-out underlying mailbox device */ | 1697 | /* Find-out underlying mailbox device */ |
1724 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | 1698 | ret = of_parse_phandle_with_args(pdev->dev.of_node, |
1725 | "mboxes", "#mbox-cells", 0, &args); | 1699 | "mboxes", "#mbox-cells", 0, &args); |
1726 | if (ret) | 1700 | if (ret) |
1727 | goto fail_free_mchans; | 1701 | goto fail_free_mchan; |
1728 | mbox_pdev = of_find_device_by_node(args.np); | 1702 | mbox_pdev = of_find_device_by_node(args.np); |
1729 | of_node_put(args.np); | 1703 | of_node_put(args.np); |
1730 | if (!mbox_pdev) { | 1704 | if (!mbox_pdev) { |
1731 | ret = -ENODEV; | 1705 | ret = -ENODEV; |
1732 | goto fail_free_mchans; | 1706 | goto fail_free_mchan; |
1733 | } | 1707 | } |
1734 | sba->mbox_dev = &mbox_pdev->dev; | 1708 | sba->mbox_dev = &mbox_pdev->dev; |
1735 | 1709 | ||
1736 | /* All mailbox channels should be of same ring manager device */ | ||
1737 | for (i = 1; i < mchans_count; i++) { | ||
1738 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | ||
1739 | "mboxes", "#mbox-cells", i, &args); | ||
1740 | if (ret) | ||
1741 | goto fail_free_mchans; | ||
1742 | mbox_pdev = of_find_device_by_node(args.np); | ||
1743 | of_node_put(args.np); | ||
1744 | if (sba->mbox_dev != &mbox_pdev->dev) { | ||
1745 | ret = -EINVAL; | ||
1746 | goto fail_free_mchans; | ||
1747 | } | ||
1748 | } | ||
1749 | |||
1750 | /* Prealloc channel resource */ | 1710 | /* Prealloc channel resource */ |
1751 | ret = sba_prealloc_channel_resources(sba); | 1711 | ret = sba_prealloc_channel_resources(sba); |
1752 | if (ret) | 1712 | if (ret) |
1753 | goto fail_free_mchans; | 1713 | goto fail_free_mchan; |
1754 | 1714 | ||
1755 | /* Check availability of debugfs */ | 1715 | /* Check availability of debugfs */ |
1756 | if (!debugfs_initialized()) | 1716 | if (!debugfs_initialized()) |
@@ -1777,24 +1737,22 @@ skip_debugfs: | |||
1777 | goto fail_free_resources; | 1737 | goto fail_free_resources; |
1778 | 1738 | ||
1779 | /* Print device info */ | 1739 | /* Print device info */ |
1780 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", | 1740 | dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", |
1781 | dma_chan_name(&sba->dma_chan), sba->ver+1, | 1741 | dma_chan_name(&sba->dma_chan), sba->ver+1, |
1782 | sba->mchans_count); | 1742 | dev_name(sba->mbox_dev)); |
1783 | 1743 | ||
1784 | return 0; | 1744 | return 0; |
1785 | 1745 | ||
1786 | fail_free_resources: | 1746 | fail_free_resources: |
1787 | debugfs_remove_recursive(sba->root); | 1747 | debugfs_remove_recursive(sba->root); |
1788 | sba_freeup_channel_resources(sba); | 1748 | sba_freeup_channel_resources(sba); |
1789 | fail_free_mchans: | 1749 | fail_free_mchan: |
1790 | for (i = 0; i < sba->mchans_count; i++) | 1750 | mbox_free_channel(sba->mchan); |
1791 | mbox_free_channel(sba->mchans[i]); | ||
1792 | return ret; | 1751 | return ret; |
1793 | } | 1752 | } |
1794 | 1753 | ||
1795 | static int sba_remove(struct platform_device *pdev) | 1754 | static int sba_remove(struct platform_device *pdev) |
1796 | { | 1755 | { |
1797 | int i; | ||
1798 | struct sba_device *sba = platform_get_drvdata(pdev); | 1756 | struct sba_device *sba = platform_get_drvdata(pdev); |
1799 | 1757 | ||
1800 | dma_async_device_unregister(&sba->dma_dev); | 1758 | dma_async_device_unregister(&sba->dma_dev); |
@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev) | |||
1803 | 1761 | ||
1804 | sba_freeup_channel_resources(sba); | 1762 | sba_freeup_channel_resources(sba); |
1805 | 1763 | ||
1806 | for (i = 0; i < sba->mchans_count; i++) | 1764 | mbox_free_channel(sba->mchan); |
1807 | mbox_free_channel(sba->mchans[i]); | ||
1808 | 1765 | ||
1809 | return 0; | 1766 | return 0; |
1810 | } | 1767 | } |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 74794c9859f6..da74fd74636b 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -1319,8 +1319,8 @@ static void coh901318_list_print(struct coh901318_chan *cohc, | |||
1319 | int i = 0; | 1319 | int i = 0; |
1320 | 1320 | ||
1321 | while (l) { | 1321 | while (l) { |
1322 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%pad" | 1322 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src %pad" |
1323 | ", dst 0x%pad, link 0x%pad virt_link_addr 0x%p\n", | 1323 | ", dst %pad, link %pad virt_link_addr 0x%p\n", |
1324 | i, l, l->control, &l->src_addr, &l->dst_addr, | 1324 | i, l, l->control, &l->src_addr, &l->dst_addr, |
1325 | &l->link_addr, l->virt_link_addr); | 1325 | &l->link_addr, l->virt_link_addr); |
1326 | i++; | 1326 | i++; |
@@ -2231,7 +2231,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
2231 | spin_lock_irqsave(&cohc->lock, flg); | 2231 | spin_lock_irqsave(&cohc->lock, flg); |
2232 | 2232 | ||
2233 | dev_vdbg(COHC_2_DEV(cohc), | 2233 | dev_vdbg(COHC_2_DEV(cohc), |
2234 | "[%s] channel %d src 0x%pad dest 0x%pad size %zu\n", | 2234 | "[%s] channel %d src %pad dest %pad size %zu\n", |
2235 | __func__, cohc->id, &src, &dest, size); | 2235 | __func__, cohc->id, &src, &dest, size); |
2236 | 2236 | ||
2237 | if (flags & DMA_PREP_INTERRUPT) | 2237 | if (flags & DMA_PREP_INTERRUPT) |
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index 7f0b9aa15867..2419fe524daa 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c | |||
@@ -72,6 +72,9 @@ | |||
72 | 72 | ||
73 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) | 73 | #define AXI_DMAC_FLAG_CYCLIC BIT(0) |
74 | 74 | ||
75 | /* The maximum ID allocated by the hardware is 31 */ | ||
76 | #define AXI_DMAC_SG_UNUSED 32U | ||
77 | |||
75 | struct axi_dmac_sg { | 78 | struct axi_dmac_sg { |
76 | dma_addr_t src_addr; | 79 | dma_addr_t src_addr; |
77 | dma_addr_t dest_addr; | 80 | dma_addr_t dest_addr; |
@@ -80,6 +83,7 @@ struct axi_dmac_sg { | |||
80 | unsigned int dest_stride; | 83 | unsigned int dest_stride; |
81 | unsigned int src_stride; | 84 | unsigned int src_stride; |
82 | unsigned int id; | 85 | unsigned int id; |
86 | bool schedule_when_free; | ||
83 | }; | 87 | }; |
84 | 88 | ||
85 | struct axi_dmac_desc { | 89 | struct axi_dmac_desc { |
@@ -200,11 +204,21 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |||
200 | } | 204 | } |
201 | sg = &desc->sg[desc->num_submitted]; | 205 | sg = &desc->sg[desc->num_submitted]; |
202 | 206 | ||
207 | /* Already queued in cyclic mode. Wait for it to finish */ | ||
208 | if (sg->id != AXI_DMAC_SG_UNUSED) { | ||
209 | sg->schedule_when_free = true; | ||
210 | return; | ||
211 | } | ||
212 | |||
203 | desc->num_submitted++; | 213 | desc->num_submitted++; |
204 | if (desc->num_submitted == desc->num_sgs) | 214 | if (desc->num_submitted == desc->num_sgs) { |
205 | chan->next_desc = NULL; | 215 | if (desc->cyclic) |
206 | else | 216 | desc->num_submitted = 0; /* Start again */ |
217 | else | ||
218 | chan->next_desc = NULL; | ||
219 | } else { | ||
207 | chan->next_desc = desc; | 220 | chan->next_desc = desc; |
221 | } | ||
208 | 222 | ||
209 | sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); | 223 | sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID); |
210 | 224 | ||
@@ -220,9 +234,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan) | |||
220 | 234 | ||
221 | /* | 235 | /* |
222 | * If the hardware supports cyclic transfers and there is no callback to | 236 | * If the hardware supports cyclic transfers and there is no callback to |
223 | * call, enable hw cyclic mode to avoid unnecessary interrupts. | 237 | * call and only a single segment, enable hw cyclic mode to avoid |
238 | * unnecessary interrupts. | ||
224 | */ | 239 | */ |
225 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback) | 240 | if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback && |
241 | desc->num_sgs == 1) | ||
226 | flags |= AXI_DMAC_FLAG_CYCLIC; | 242 | flags |= AXI_DMAC_FLAG_CYCLIC; |
227 | 243 | ||
228 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); | 244 | axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1); |
@@ -237,37 +253,52 @@ static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan) | |||
237 | struct axi_dmac_desc, vdesc.node); | 253 | struct axi_dmac_desc, vdesc.node); |
238 | } | 254 | } |
239 | 255 | ||
240 | static void axi_dmac_transfer_done(struct axi_dmac_chan *chan, | 256 | static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan, |
241 | unsigned int completed_transfers) | 257 | unsigned int completed_transfers) |
242 | { | 258 | { |
243 | struct axi_dmac_desc *active; | 259 | struct axi_dmac_desc *active; |
244 | struct axi_dmac_sg *sg; | 260 | struct axi_dmac_sg *sg; |
261 | bool start_next = false; | ||
245 | 262 | ||
246 | active = axi_dmac_active_desc(chan); | 263 | active = axi_dmac_active_desc(chan); |
247 | if (!active) | 264 | if (!active) |
248 | return; | 265 | return false; |
249 | 266 | ||
250 | if (active->cyclic) { | 267 | do { |
251 | vchan_cyclic_callback(&active->vdesc); | 268 | sg = &active->sg[active->num_completed]; |
252 | } else { | 269 | if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */ |
253 | do { | 270 | break; |
254 | sg = &active->sg[active->num_completed]; | 271 | if (!(BIT(sg->id) & completed_transfers)) |
255 | if (!(BIT(sg->id) & completed_transfers)) | 272 | break; |
256 | break; | 273 | active->num_completed++; |
257 | active->num_completed++; | 274 | sg->id = AXI_DMAC_SG_UNUSED; |
258 | if (active->num_completed == active->num_sgs) { | 275 | if (sg->schedule_when_free) { |
276 | sg->schedule_when_free = false; | ||
277 | start_next = true; | ||
278 | } | ||
279 | |||
280 | if (active->cyclic) | ||
281 | vchan_cyclic_callback(&active->vdesc); | ||
282 | |||
283 | if (active->num_completed == active->num_sgs) { | ||
284 | if (active->cyclic) { | ||
285 | active->num_completed = 0; /* wrap around */ | ||
286 | } else { | ||
259 | list_del(&active->vdesc.node); | 287 | list_del(&active->vdesc.node); |
260 | vchan_cookie_complete(&active->vdesc); | 288 | vchan_cookie_complete(&active->vdesc); |
261 | active = axi_dmac_active_desc(chan); | 289 | active = axi_dmac_active_desc(chan); |
262 | } | 290 | } |
263 | } while (active); | 291 | } |
264 | } | 292 | } while (active); |
293 | |||
294 | return start_next; | ||
265 | } | 295 | } |
266 | 296 | ||
267 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) | 297 | static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) |
268 | { | 298 | { |
269 | struct axi_dmac *dmac = devid; | 299 | struct axi_dmac *dmac = devid; |
270 | unsigned int pending; | 300 | unsigned int pending; |
301 | bool start_next = false; | ||
271 | 302 | ||
272 | pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); | 303 | pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING); |
273 | if (!pending) | 304 | if (!pending) |
@@ -281,10 +312,10 @@ static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid) | |||
281 | unsigned int completed; | 312 | unsigned int completed; |
282 | 313 | ||
283 | completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); | 314 | completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE); |
284 | axi_dmac_transfer_done(&dmac->chan, completed); | 315 | start_next = axi_dmac_transfer_done(&dmac->chan, completed); |
285 | } | 316 | } |
286 | /* Space has become available in the descriptor queue */ | 317 | /* Space has become available in the descriptor queue */ |
287 | if (pending & AXI_DMAC_IRQ_SOT) | 318 | if ((pending & AXI_DMAC_IRQ_SOT) || start_next) |
288 | axi_dmac_start_transfer(&dmac->chan); | 319 | axi_dmac_start_transfer(&dmac->chan); |
289 | spin_unlock(&dmac->chan.vchan.lock); | 320 | spin_unlock(&dmac->chan.vchan.lock); |
290 | 321 | ||
@@ -334,12 +365,16 @@ static void axi_dmac_issue_pending(struct dma_chan *c) | |||
334 | static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) | 365 | static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) |
335 | { | 366 | { |
336 | struct axi_dmac_desc *desc; | 367 | struct axi_dmac_desc *desc; |
368 | unsigned int i; | ||
337 | 369 | ||
338 | desc = kzalloc(sizeof(struct axi_dmac_desc) + | 370 | desc = kzalloc(sizeof(struct axi_dmac_desc) + |
339 | sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); | 371 | sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT); |
340 | if (!desc) | 372 | if (!desc) |
341 | return NULL; | 373 | return NULL; |
342 | 374 | ||
375 | for (i = 0; i < num_sgs; i++) | ||
376 | desc->sg[i].id = AXI_DMAC_SG_UNUSED; | ||
377 | |||
343 | desc->num_sgs = num_sgs; | 378 | desc->num_sgs = num_sgs; |
344 | 379 | ||
345 | return desc; | 380 | return desc; |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 34ff53290b03..47edc7fbf91f 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -702,6 +702,7 @@ static int dmatest_func(void *data) | |||
702 | * free it this time?" dancing. For now, just | 702 | * free it this time?" dancing. For now, just |
703 | * leave it dangling. | 703 | * leave it dangling. |
704 | */ | 704 | */ |
705 | WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); | ||
705 | dmaengine_unmap_put(um); | 706 | dmaengine_unmap_put(um); |
706 | result("test timed out", total_tests, src_off, dst_off, | 707 | result("test timed out", total_tests, src_off, dst_off, |
707 | len, 0); | 708 | len, 0); |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index a7ea20e7b8e9..9364a3ed345a 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -891,6 +891,10 @@ static int edma_slave_config(struct dma_chan *chan, | |||
891 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 891 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
892 | return -EINVAL; | 892 | return -EINVAL; |
893 | 893 | ||
894 | if (cfg->src_maxburst > chan->device->max_burst || | ||
895 | cfg->dst_maxburst > chan->device->max_burst) | ||
896 | return -EINVAL; | ||
897 | |||
894 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); | 898 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
895 | 899 | ||
896 | return 0; | 900 | return 0; |
@@ -1868,6 +1872,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | |||
1868 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | 1872 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; |
1869 | s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); | 1873 | s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); |
1870 | s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1874 | s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1875 | s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ | ||
1871 | 1876 | ||
1872 | s_ddev->dev = ecc->dev; | 1877 | s_ddev->dev = ecc->dev; |
1873 | INIT_LIST_HEAD(&s_ddev->channels); | 1878 | INIT_LIST_HEAD(&s_ddev->channels); |
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index 54db1411ce73..0391f930aecc 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/of_device.h> | 23 | #include <linux/of_device.h> |
24 | #include <linux/of_dma.h> | 24 | #include <linux/of_dma.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> | ||
26 | #include <linux/regmap.h> | 27 | #include <linux/regmap.h> |
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
@@ -730,14 +731,23 @@ static int mdc_slave_config(struct dma_chan *chan, | |||
730 | return 0; | 731 | return 0; |
731 | } | 732 | } |
732 | 733 | ||
734 | static int mdc_alloc_chan_resources(struct dma_chan *chan) | ||
735 | { | ||
736 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
737 | struct device *dev = mdma2dev(mchan->mdma); | ||
738 | |||
739 | return pm_runtime_get_sync(dev); | ||
740 | } | ||
741 | |||
733 | static void mdc_free_chan_resources(struct dma_chan *chan) | 742 | static void mdc_free_chan_resources(struct dma_chan *chan) |
734 | { | 743 | { |
735 | struct mdc_chan *mchan = to_mdc_chan(chan); | 744 | struct mdc_chan *mchan = to_mdc_chan(chan); |
736 | struct mdc_dma *mdma = mchan->mdma; | 745 | struct mdc_dma *mdma = mchan->mdma; |
746 | struct device *dev = mdma2dev(mdma); | ||
737 | 747 | ||
738 | mdc_terminate_all(chan); | 748 | mdc_terminate_all(chan); |
739 | |||
740 | mdma->soc->disable_chan(mchan); | 749 | mdma->soc->disable_chan(mchan); |
750 | pm_runtime_put(dev); | ||
741 | } | 751 | } |
742 | 752 | ||
743 | static irqreturn_t mdc_chan_irq(int irq, void *dev_id) | 753 | static irqreturn_t mdc_chan_irq(int irq, void *dev_id) |
@@ -854,6 +864,22 @@ static const struct of_device_id mdc_dma_of_match[] = { | |||
854 | }; | 864 | }; |
855 | MODULE_DEVICE_TABLE(of, mdc_dma_of_match); | 865 | MODULE_DEVICE_TABLE(of, mdc_dma_of_match); |
856 | 866 | ||
867 | static int img_mdc_runtime_suspend(struct device *dev) | ||
868 | { | ||
869 | struct mdc_dma *mdma = dev_get_drvdata(dev); | ||
870 | |||
871 | clk_disable_unprepare(mdma->clk); | ||
872 | |||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static int img_mdc_runtime_resume(struct device *dev) | ||
877 | { | ||
878 | struct mdc_dma *mdma = dev_get_drvdata(dev); | ||
879 | |||
880 | return clk_prepare_enable(mdma->clk); | ||
881 | } | ||
882 | |||
857 | static int mdc_dma_probe(struct platform_device *pdev) | 883 | static int mdc_dma_probe(struct platform_device *pdev) |
858 | { | 884 | { |
859 | struct mdc_dma *mdma; | 885 | struct mdc_dma *mdma; |
@@ -883,10 +909,6 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
883 | if (IS_ERR(mdma->clk)) | 909 | if (IS_ERR(mdma->clk)) |
884 | return PTR_ERR(mdma->clk); | 910 | return PTR_ERR(mdma->clk); |
885 | 911 | ||
886 | ret = clk_prepare_enable(mdma->clk); | ||
887 | if (ret) | ||
888 | return ret; | ||
889 | |||
890 | dma_cap_zero(mdma->dma_dev.cap_mask); | 912 | dma_cap_zero(mdma->dma_dev.cap_mask); |
891 | dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); | 913 | dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); |
892 | dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); | 914 | dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); |
@@ -919,12 +941,13 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
919 | "img,max-burst-multiplier", | 941 | "img,max-burst-multiplier", |
920 | &mdma->max_burst_mult); | 942 | &mdma->max_burst_mult); |
921 | if (ret) | 943 | if (ret) |
922 | goto disable_clk; | 944 | return ret; |
923 | 945 | ||
924 | mdma->dma_dev.dev = &pdev->dev; | 946 | mdma->dma_dev.dev = &pdev->dev; |
925 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; | 947 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; |
926 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; | 948 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; |
927 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; | 949 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; |
950 | mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; | ||
928 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; | 951 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; |
929 | mdma->dma_dev.device_tx_status = mdc_tx_status; | 952 | mdma->dma_dev.device_tx_status = mdc_tx_status; |
930 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; | 953 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; |
@@ -945,15 +968,14 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
945 | mchan->mdma = mdma; | 968 | mchan->mdma = mdma; |
946 | mchan->chan_nr = i; | 969 | mchan->chan_nr = i; |
947 | mchan->irq = platform_get_irq(pdev, i); | 970 | mchan->irq = platform_get_irq(pdev, i); |
948 | if (mchan->irq < 0) { | 971 | if (mchan->irq < 0) |
949 | ret = mchan->irq; | 972 | return mchan->irq; |
950 | goto disable_clk; | 973 | |
951 | } | ||
952 | ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, | 974 | ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, |
953 | IRQ_TYPE_LEVEL_HIGH, | 975 | IRQ_TYPE_LEVEL_HIGH, |
954 | dev_name(&pdev->dev), mchan); | 976 | dev_name(&pdev->dev), mchan); |
955 | if (ret < 0) | 977 | if (ret < 0) |
956 | goto disable_clk; | 978 | return ret; |
957 | 979 | ||
958 | mchan->vc.desc_free = mdc_desc_free; | 980 | mchan->vc.desc_free = mdc_desc_free; |
959 | vchan_init(&mchan->vc, &mdma->dma_dev); | 981 | vchan_init(&mchan->vc, &mdma->dma_dev); |
@@ -962,14 +984,19 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
962 | mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | 984 | mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, |
963 | sizeof(struct mdc_hw_list_desc), | 985 | sizeof(struct mdc_hw_list_desc), |
964 | 4, 0); | 986 | 4, 0); |
965 | if (!mdma->desc_pool) { | 987 | if (!mdma->desc_pool) |
966 | ret = -ENOMEM; | 988 | return -ENOMEM; |
967 | goto disable_clk; | 989 | |
990 | pm_runtime_enable(&pdev->dev); | ||
991 | if (!pm_runtime_enabled(&pdev->dev)) { | ||
992 | ret = img_mdc_runtime_resume(&pdev->dev); | ||
993 | if (ret) | ||
994 | return ret; | ||
968 | } | 995 | } |
969 | 996 | ||
970 | ret = dma_async_device_register(&mdma->dma_dev); | 997 | ret = dma_async_device_register(&mdma->dma_dev); |
971 | if (ret) | 998 | if (ret) |
972 | goto disable_clk; | 999 | goto suspend; |
973 | 1000 | ||
974 | ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); | 1001 | ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); |
975 | if (ret) | 1002 | if (ret) |
@@ -982,8 +1009,10 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
982 | 1009 | ||
983 | unregister: | 1010 | unregister: |
984 | dma_async_device_unregister(&mdma->dma_dev); | 1011 | dma_async_device_unregister(&mdma->dma_dev); |
985 | disable_clk: | 1012 | suspend: |
986 | clk_disable_unprepare(mdma->clk); | 1013 | if (!pm_runtime_enabled(&pdev->dev)) |
1014 | img_mdc_runtime_suspend(&pdev->dev); | ||
1015 | pm_runtime_disable(&pdev->dev); | ||
987 | return ret; | 1016 | return ret; |
988 | } | 1017 | } |
989 | 1018 | ||
@@ -1004,14 +1033,47 @@ static int mdc_dma_remove(struct platform_device *pdev) | |||
1004 | tasklet_kill(&mchan->vc.task); | 1033 | tasklet_kill(&mchan->vc.task); |
1005 | } | 1034 | } |
1006 | 1035 | ||
1007 | clk_disable_unprepare(mdma->clk); | 1036 | pm_runtime_disable(&pdev->dev); |
1037 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1038 | img_mdc_runtime_suspend(&pdev->dev); | ||
1008 | 1039 | ||
1009 | return 0; | 1040 | return 0; |
1010 | } | 1041 | } |
1011 | 1042 | ||
1043 | #ifdef CONFIG_PM_SLEEP | ||
1044 | static int img_mdc_suspend_late(struct device *dev) | ||
1045 | { | ||
1046 | struct mdc_dma *mdma = dev_get_drvdata(dev); | ||
1047 | int i; | ||
1048 | |||
1049 | /* Check that all channels are idle */ | ||
1050 | for (i = 0; i < mdma->nr_channels; i++) { | ||
1051 | struct mdc_chan *mchan = &mdma->channels[i]; | ||
1052 | |||
1053 | if (unlikely(mchan->desc)) | ||
1054 | return -EBUSY; | ||
1055 | } | ||
1056 | |||
1057 | return pm_runtime_force_suspend(dev); | ||
1058 | } | ||
1059 | |||
1060 | static int img_mdc_resume_early(struct device *dev) | ||
1061 | { | ||
1062 | return pm_runtime_force_resume(dev); | ||
1063 | } | ||
1064 | #endif /* CONFIG_PM_SLEEP */ | ||
1065 | |||
1066 | static const struct dev_pm_ops img_mdc_pm_ops = { | ||
1067 | SET_RUNTIME_PM_OPS(img_mdc_runtime_suspend, | ||
1068 | img_mdc_runtime_resume, NULL) | ||
1069 | SET_LATE_SYSTEM_SLEEP_PM_OPS(img_mdc_suspend_late, | ||
1070 | img_mdc_resume_early) | ||
1071 | }; | ||
1072 | |||
1012 | static struct platform_driver mdc_dma_driver = { | 1073 | static struct platform_driver mdc_dma_driver = { |
1013 | .driver = { | 1074 | .driver = { |
1014 | .name = "img-mdc-dma", | 1075 | .name = "img-mdc-dma", |
1076 | .pm = &img_mdc_pm_ops, | ||
1015 | .of_match_table = of_match_ptr(mdc_dma_of_match), | 1077 | .of_match_table = of_match_ptr(mdc_dma_of_match), |
1016 | }, | 1078 | }, |
1017 | .probe = mdc_dma_probe, | 1079 | .probe = mdc_dma_probe, |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f681df8f0ed3..331f863c605e 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -364,9 +364,9 @@ static void imxdma_disable_hw(struct imxdma_channel *imxdmac) | |||
364 | local_irq_restore(flags); | 364 | local_irq_restore(flags); |
365 | } | 365 | } |
366 | 366 | ||
367 | static void imxdma_watchdog(unsigned long data) | 367 | static void imxdma_watchdog(struct timer_list *t) |
368 | { | 368 | { |
369 | struct imxdma_channel *imxdmac = (struct imxdma_channel *)data; | 369 | struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog); |
370 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 370 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
371 | int channel = imxdmac->channel; | 371 | int channel = imxdmac->channel; |
372 | 372 | ||
@@ -1153,9 +1153,7 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | imxdmac->irq = irq + i; | 1155 | imxdmac->irq = irq + i; |
1156 | init_timer(&imxdmac->watchdog); | 1156 | timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0); |
1157 | imxdmac->watchdog.function = &imxdma_watchdog; | ||
1158 | imxdmac->watchdog.data = (unsigned long)imxdmac; | ||
1159 | } | 1157 | } |
1160 | 1158 | ||
1161 | imxdmac->imxdma = imxdma; | 1159 | imxdmac->imxdma = imxdma; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index a67ec1bdc4e0..2184881afe76 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -178,6 +178,14 @@ | |||
178 | #define SDMA_WATERMARK_LEVEL_HWE BIT(29) | 178 | #define SDMA_WATERMARK_LEVEL_HWE BIT(29) |
179 | #define SDMA_WATERMARK_LEVEL_CONT BIT(31) | 179 | #define SDMA_WATERMARK_LEVEL_CONT BIT(31) |
180 | 180 | ||
181 | #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
182 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
183 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
184 | |||
185 | #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ | ||
186 | BIT(DMA_MEM_TO_DEV) | \ | ||
187 | BIT(DMA_DEV_TO_DEV)) | ||
188 | |||
181 | /* | 189 | /* |
182 | * Mode/Count of data node descriptors - IPCv2 | 190 | * Mode/Count of data node descriptors - IPCv2 |
183 | */ | 191 | */ |
@@ -1851,9 +1859,9 @@ static int sdma_probe(struct platform_device *pdev) | |||
1851 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1859 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1852 | sdma->dma_device.device_config = sdma_config; | 1860 | sdma->dma_device.device_config = sdma_config; |
1853 | sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; | 1861 | sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; |
1854 | sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 1862 | sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; |
1855 | sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | 1863 | sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; |
1856 | sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1864 | sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; |
1857 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | 1865 | sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
1858 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1866 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1859 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | 1867 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f70cc74032ea..58d4ccd33672 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -474,7 +474,7 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) | |||
474 | if (time_is_before_jiffies(ioat_chan->timer.expires) | 474 | if (time_is_before_jiffies(ioat_chan->timer.expires) |
475 | && timer_pending(&ioat_chan->timer)) { | 475 | && timer_pending(&ioat_chan->timer)) { |
476 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); | 476 | mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT); |
477 | ioat_timer_event((unsigned long)ioat_chan); | 477 | ioat_timer_event(&ioat_chan->timer); |
478 | } | 478 | } |
479 | 479 | ||
480 | return -ENOMEM; | 480 | return -ENOMEM; |
@@ -862,9 +862,9 @@ static void check_active(struct ioatdma_chan *ioat_chan) | |||
862 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | 862 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
863 | } | 863 | } |
864 | 864 | ||
865 | void ioat_timer_event(unsigned long data) | 865 | void ioat_timer_event(struct timer_list *t) |
866 | { | 866 | { |
867 | struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data); | 867 | struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer); |
868 | dma_addr_t phys_complete; | 868 | dma_addr_t phys_complete; |
869 | u64 status; | 869 | u64 status; |
870 | 870 | ||
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 56200eefcf5e..1ab42ec2b7ff 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -406,10 +406,9 @@ enum dma_status | |||
406 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | 406 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, |
407 | struct dma_tx_state *txstate); | 407 | struct dma_tx_state *txstate); |
408 | void ioat_cleanup_event(unsigned long data); | 408 | void ioat_cleanup_event(unsigned long data); |
409 | void ioat_timer_event(unsigned long data); | 409 | void ioat_timer_event(struct timer_list *t); |
410 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); | 410 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); |
411 | void ioat_issue_pending(struct dma_chan *chan); | 411 | void ioat_issue_pending(struct dma_chan *chan); |
412 | void ioat_timer_event(unsigned long data); | ||
413 | 412 | ||
414 | /* IOAT Init functions */ | 413 | /* IOAT Init functions */ |
415 | bool is_bwd_ioat(struct pci_dev *pdev); | 414 | bool is_bwd_ioat(struct pci_dev *pdev); |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 93e006c3441d..2f31d3d0caa6 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -760,7 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma, | |||
760 | dma_cookie_init(&ioat_chan->dma_chan); | 760 | dma_cookie_init(&ioat_chan->dma_chan); |
761 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); | 761 | list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); |
762 | ioat_dma->idx[idx] = ioat_chan; | 762 | ioat_dma->idx[idx] = ioat_chan; |
763 | setup_timer(&ioat_chan->timer, ioat_timer_event, data); | 763 | timer_setup(&ioat_chan->timer, ioat_timer_event, 0); |
764 | tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); | 764 | tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data); |
765 | } | 765 | } |
766 | 766 | ||
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index d3f918a9ee76..50559338239b 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c | |||
@@ -1286,7 +1286,6 @@ MODULE_DEVICE_TABLE(of, nbpf_match); | |||
1286 | static int nbpf_probe(struct platform_device *pdev) | 1286 | static int nbpf_probe(struct platform_device *pdev) |
1287 | { | 1287 | { |
1288 | struct device *dev = &pdev->dev; | 1288 | struct device *dev = &pdev->dev; |
1289 | const struct of_device_id *of_id = of_match_device(nbpf_match, dev); | ||
1290 | struct device_node *np = dev->of_node; | 1289 | struct device_node *np = dev->of_node; |
1291 | struct nbpf_device *nbpf; | 1290 | struct nbpf_device *nbpf; |
1292 | struct dma_device *dma_dev; | 1291 | struct dma_device *dma_dev; |
@@ -1300,10 +1299,10 @@ static int nbpf_probe(struct platform_device *pdev) | |||
1300 | BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); | 1299 | BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); |
1301 | 1300 | ||
1302 | /* DT only */ | 1301 | /* DT only */ |
1303 | if (!np || !of_id || !of_id->data) | 1302 | if (!np) |
1304 | return -ENODEV; | 1303 | return -ENODEV; |
1305 | 1304 | ||
1306 | cfg = of_id->data; | 1305 | cfg = of_device_get_match_data(dev); |
1307 | num_channels = cfg->num_channels; | 1306 | num_channels = cfg->num_channels; |
1308 | 1307 | ||
1309 | nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * | 1308 | nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 8c1665c8fe33..f6dd849159d8 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -1288,6 +1288,10 @@ static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config | |||
1288 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 1288 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
1289 | return -EINVAL; | 1289 | return -EINVAL; |
1290 | 1290 | ||
1291 | if (cfg->src_maxburst > chan->device->max_burst || | ||
1292 | cfg->dst_maxburst > chan->device->max_burst) | ||
1293 | return -EINVAL; | ||
1294 | |||
1291 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); | 1295 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); |
1292 | 1296 | ||
1293 | return 0; | 1297 | return 0; |
@@ -1482,6 +1486,7 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
1482 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; | 1486 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; |
1483 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1487 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1484 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1488 | od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1489 | od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ | ||
1485 | od->ddev.dev = &pdev->dev; | 1490 | od->ddev.dev = &pdev->dev; |
1486 | INIT_LIST_HEAD(&od->ddev.channels); | 1491 | INIT_LIST_HEAD(&od->ddev.channels); |
1487 | spin_lock_init(&od->lock); | 1492 | spin_lock_init(&od->lock); |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index f9028e9d0dfc..afd8f27bda96 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -123,7 +123,7 @@ struct pch_dma_chan { | |||
123 | struct pch_dma { | 123 | struct pch_dma { |
124 | struct dma_device dma; | 124 | struct dma_device dma; |
125 | void __iomem *membase; | 125 | void __iomem *membase; |
126 | struct pci_pool *pool; | 126 | struct dma_pool *pool; |
127 | struct pch_dma_regs regs; | 127 | struct pch_dma_regs regs; |
128 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | 128 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
129 | struct pch_dma_chan channels[MAX_CHAN_NR]; | 129 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
@@ -437,7 +437,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | |||
437 | struct pch_dma *pd = to_pd(chan->device); | 437 | struct pch_dma *pd = to_pd(chan->device); |
438 | dma_addr_t addr; | 438 | dma_addr_t addr; |
439 | 439 | ||
440 | desc = pci_pool_zalloc(pd->pool, flags, &addr); | 440 | desc = dma_pool_zalloc(pd->pool, flags, &addr); |
441 | if (desc) { | 441 | if (desc) { |
442 | INIT_LIST_HEAD(&desc->tx_list); | 442 | INIT_LIST_HEAD(&desc->tx_list); |
443 | dma_async_tx_descriptor_init(&desc->txd, chan); | 443 | dma_async_tx_descriptor_init(&desc->txd, chan); |
@@ -549,7 +549,7 @@ static void pd_free_chan_resources(struct dma_chan *chan) | |||
549 | spin_unlock_irq(&pd_chan->lock); | 549 | spin_unlock_irq(&pd_chan->lock); |
550 | 550 | ||
551 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | 551 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
552 | pci_pool_free(pd->pool, desc, desc->txd.phys); | 552 | dma_pool_free(pd->pool, desc, desc->txd.phys); |
553 | 553 | ||
554 | pdc_enable_irq(chan, 0); | 554 | pdc_enable_irq(chan, 0); |
555 | } | 555 | } |
@@ -880,7 +880,7 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
880 | goto err_iounmap; | 880 | goto err_iounmap; |
881 | } | 881 | } |
882 | 882 | ||
883 | pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, | 883 | pd->pool = dma_pool_create("pch_dma_desc_pool", &pdev->dev, |
884 | sizeof(struct pch_dma_desc), 4, 0); | 884 | sizeof(struct pch_dma_desc), 4, 0); |
885 | if (!pd->pool) { | 885 | if (!pd->pool) { |
886 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); | 886 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); |
@@ -931,7 +931,7 @@ static int pch_dma_probe(struct pci_dev *pdev, | |||
931 | return 0; | 931 | return 0; |
932 | 932 | ||
933 | err_free_pool: | 933 | err_free_pool: |
934 | pci_pool_destroy(pd->pool); | 934 | dma_pool_destroy(pd->pool); |
935 | err_free_irq: | 935 | err_free_irq: |
936 | free_irq(pdev->irq, pd); | 936 | free_irq(pdev->irq, pd); |
937 | err_iounmap: | 937 | err_iounmap: |
@@ -963,7 +963,7 @@ static void pch_dma_remove(struct pci_dev *pdev) | |||
963 | tasklet_kill(&pd_chan->tasklet); | 963 | tasklet_kill(&pd_chan->tasklet); |
964 | } | 964 | } |
965 | 965 | ||
966 | pci_pool_destroy(pd->pool); | 966 | dma_pool_destroy(pd->pool); |
967 | pci_iounmap(pdev, pd->membase); | 967 | pci_iounmap(pdev, pd->membase); |
968 | pci_release_regions(pdev); | 968 | pci_release_regions(pdev); |
969 | pci_disable_device(pdev); | 969 | pci_disable_device(pdev); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f122c2a7b9f0..d7327fd5f445 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2390,7 +2390,8 @@ static inline void _init_desc(struct dma_pl330_desc *desc) | |||
2390 | } | 2390 | } |
2391 | 2391 | ||
2392 | /* Returns the number of descriptors added to the DMAC pool */ | 2392 | /* Returns the number of descriptors added to the DMAC pool */ |
2393 | static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) | 2393 | static int add_desc(struct list_head *pool, spinlock_t *lock, |
2394 | gfp_t flg, int count) | ||
2394 | { | 2395 | { |
2395 | struct dma_pl330_desc *desc; | 2396 | struct dma_pl330_desc *desc; |
2396 | unsigned long flags; | 2397 | unsigned long flags; |
@@ -2400,27 +2401,28 @@ static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) | |||
2400 | if (!desc) | 2401 | if (!desc) |
2401 | return 0; | 2402 | return 0; |
2402 | 2403 | ||
2403 | spin_lock_irqsave(&pl330->pool_lock, flags); | 2404 | spin_lock_irqsave(lock, flags); |
2404 | 2405 | ||
2405 | for (i = 0; i < count; i++) { | 2406 | for (i = 0; i < count; i++) { |
2406 | _init_desc(&desc[i]); | 2407 | _init_desc(&desc[i]); |
2407 | list_add_tail(&desc[i].node, &pl330->desc_pool); | 2408 | list_add_tail(&desc[i].node, pool); |
2408 | } | 2409 | } |
2409 | 2410 | ||
2410 | spin_unlock_irqrestore(&pl330->pool_lock, flags); | 2411 | spin_unlock_irqrestore(lock, flags); |
2411 | 2412 | ||
2412 | return count; | 2413 | return count; |
2413 | } | 2414 | } |
2414 | 2415 | ||
2415 | static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) | 2416 | static struct dma_pl330_desc *pluck_desc(struct list_head *pool, |
2417 | spinlock_t *lock) | ||
2416 | { | 2418 | { |
2417 | struct dma_pl330_desc *desc = NULL; | 2419 | struct dma_pl330_desc *desc = NULL; |
2418 | unsigned long flags; | 2420 | unsigned long flags; |
2419 | 2421 | ||
2420 | spin_lock_irqsave(&pl330->pool_lock, flags); | 2422 | spin_lock_irqsave(lock, flags); |
2421 | 2423 | ||
2422 | if (!list_empty(&pl330->desc_pool)) { | 2424 | if (!list_empty(pool)) { |
2423 | desc = list_entry(pl330->desc_pool.next, | 2425 | desc = list_entry(pool->next, |
2424 | struct dma_pl330_desc, node); | 2426 | struct dma_pl330_desc, node); |
2425 | 2427 | ||
2426 | list_del_init(&desc->node); | 2428 | list_del_init(&desc->node); |
@@ -2429,7 +2431,7 @@ static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) | |||
2429 | desc->txd.callback = NULL; | 2431 | desc->txd.callback = NULL; |
2430 | } | 2432 | } |
2431 | 2433 | ||
2432 | spin_unlock_irqrestore(&pl330->pool_lock, flags); | 2434 | spin_unlock_irqrestore(lock, flags); |
2433 | 2435 | ||
2434 | return desc; | 2436 | return desc; |
2435 | } | 2437 | } |
@@ -2441,20 +2443,18 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
2441 | struct dma_pl330_desc *desc; | 2443 | struct dma_pl330_desc *desc; |
2442 | 2444 | ||
2443 | /* Pluck one desc from the pool of DMAC */ | 2445 | /* Pluck one desc from the pool of DMAC */ |
2444 | desc = pluck_desc(pl330); | 2446 | desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock); |
2445 | 2447 | ||
2446 | /* If the DMAC pool is empty, alloc new */ | 2448 | /* If the DMAC pool is empty, alloc new */ |
2447 | if (!desc) { | 2449 | if (!desc) { |
2448 | if (!add_desc(pl330, GFP_ATOMIC, 1)) | 2450 | DEFINE_SPINLOCK(lock); |
2449 | return NULL; | 2451 | LIST_HEAD(pool); |
2450 | 2452 | ||
2451 | /* Try again */ | 2453 | if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) |
2452 | desc = pluck_desc(pl330); | ||
2453 | if (!desc) { | ||
2454 | dev_err(pch->dmac->ddma.dev, | ||
2455 | "%s:%d ALERT!\n", __func__, __LINE__); | ||
2456 | return NULL; | 2454 | return NULL; |
2457 | } | 2455 | |
2456 | desc = pluck_desc(&pool, &lock); | ||
2457 | WARN_ON(!desc || !list_empty(&pool)); | ||
2458 | } | 2458 | } |
2459 | 2459 | ||
2460 | /* Initialize the descriptor */ | 2460 | /* Initialize the descriptor */ |
@@ -2868,7 +2868,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2868 | spin_lock_init(&pl330->pool_lock); | 2868 | spin_lock_init(&pl330->pool_lock); |
2869 | 2869 | ||
2870 | /* Create a descriptor pool of default size */ | 2870 | /* Create a descriptor pool of default size */ |
2871 | if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) | 2871 | if (!add_desc(&pl330->desc_pool, &pl330->pool_lock, |
2872 | GFP_KERNEL, NR_DEFAULT_DESC)) | ||
2872 | dev_warn(&adev->dev, "unable to allocate desc\n"); | 2873 | dev_warn(&adev->dev, "unable to allocate desc\n"); |
2873 | 2874 | ||
2874 | INIT_LIST_HEAD(&pd->channels); | 2875 | INIT_LIST_HEAD(&pd->channels); |
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 6d89fb6a6a92..d076940e0c69 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/of_address.h> | 46 | #include <linux/of_address.h> |
47 | #include <linux/of_irq.h> | 47 | #include <linux/of_irq.h> |
48 | #include <linux/of_dma.h> | 48 | #include <linux/of_dma.h> |
49 | #include <linux/circ_buf.h> | ||
49 | #include <linux/clk.h> | 50 | #include <linux/clk.h> |
50 | #include <linux/dmaengine.h> | 51 | #include <linux/dmaengine.h> |
51 | #include <linux/pm_runtime.h> | 52 | #include <linux/pm_runtime.h> |
@@ -78,6 +79,8 @@ struct bam_async_desc { | |||
78 | 79 | ||
79 | struct bam_desc_hw *curr_desc; | 80 | struct bam_desc_hw *curr_desc; |
80 | 81 | ||
82 | /* list node for the desc in the bam_chan list of descriptors */ | ||
83 | struct list_head desc_node; | ||
81 | enum dma_transfer_direction dir; | 84 | enum dma_transfer_direction dir; |
82 | size_t length; | 85 | size_t length; |
83 | struct bam_desc_hw desc[0]; | 86 | struct bam_desc_hw desc[0]; |
@@ -347,6 +350,8 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = { | |||
347 | #define BAM_DESC_FIFO_SIZE SZ_32K | 350 | #define BAM_DESC_FIFO_SIZE SZ_32K |
348 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) | 351 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) |
349 | #define BAM_FIFO_SIZE (SZ_32K - 8) | 352 | #define BAM_FIFO_SIZE (SZ_32K - 8) |
353 | #define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\ | ||
354 | MAX_DESCRIPTORS + 1) == 0) | ||
350 | 355 | ||
351 | struct bam_chan { | 356 | struct bam_chan { |
352 | struct virt_dma_chan vc; | 357 | struct virt_dma_chan vc; |
@@ -356,8 +361,6 @@ struct bam_chan { | |||
356 | /* configuration from device tree */ | 361 | /* configuration from device tree */ |
357 | u32 id; | 362 | u32 id; |
358 | 363 | ||
359 | struct bam_async_desc *curr_txd; /* current running dma */ | ||
360 | |||
361 | /* runtime configuration */ | 364 | /* runtime configuration */ |
362 | struct dma_slave_config slave; | 365 | struct dma_slave_config slave; |
363 | 366 | ||
@@ -372,6 +375,8 @@ struct bam_chan { | |||
372 | unsigned int initialized; /* is the channel hw initialized? */ | 375 | unsigned int initialized; /* is the channel hw initialized? */ |
373 | unsigned int paused; /* is the channel paused? */ | 376 | unsigned int paused; /* is the channel paused? */ |
374 | unsigned int reconfigure; /* new slave config? */ | 377 | unsigned int reconfigure; /* new slave config? */ |
378 | /* list of descriptors currently processed */ | ||
379 | struct list_head desc_list; | ||
375 | 380 | ||
376 | struct list_head node; | 381 | struct list_head node; |
377 | }; | 382 | }; |
@@ -539,7 +544,7 @@ static void bam_free_chan(struct dma_chan *chan) | |||
539 | 544 | ||
540 | vchan_free_chan_resources(to_virt_chan(chan)); | 545 | vchan_free_chan_resources(to_virt_chan(chan)); |
541 | 546 | ||
542 | if (bchan->curr_txd) { | 547 | if (!list_empty(&bchan->desc_list)) { |
543 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); | 548 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); |
544 | goto err; | 549 | goto err; |
545 | } | 550 | } |
@@ -632,8 +637,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
632 | 637 | ||
633 | if (flags & DMA_PREP_INTERRUPT) | 638 | if (flags & DMA_PREP_INTERRUPT) |
634 | async_desc->flags |= DESC_FLAG_EOT; | 639 | async_desc->flags |= DESC_FLAG_EOT; |
635 | else | ||
636 | async_desc->flags |= DESC_FLAG_INT; | ||
637 | 640 | ||
638 | async_desc->num_desc = num_alloc; | 641 | async_desc->num_desc = num_alloc; |
639 | async_desc->curr_desc = async_desc->desc; | 642 | async_desc->curr_desc = async_desc->desc; |
@@ -684,14 +687,16 @@ err_out: | |||
684 | static int bam_dma_terminate_all(struct dma_chan *chan) | 687 | static int bam_dma_terminate_all(struct dma_chan *chan) |
685 | { | 688 | { |
686 | struct bam_chan *bchan = to_bam_chan(chan); | 689 | struct bam_chan *bchan = to_bam_chan(chan); |
690 | struct bam_async_desc *async_desc, *tmp; | ||
687 | unsigned long flag; | 691 | unsigned long flag; |
688 | LIST_HEAD(head); | 692 | LIST_HEAD(head); |
689 | 693 | ||
690 | /* remove all transactions, including active transaction */ | 694 | /* remove all transactions, including active transaction */ |
691 | spin_lock_irqsave(&bchan->vc.lock, flag); | 695 | spin_lock_irqsave(&bchan->vc.lock, flag); |
692 | if (bchan->curr_txd) { | 696 | list_for_each_entry_safe(async_desc, tmp, |
693 | list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); | 697 | &bchan->desc_list, desc_node) { |
694 | bchan->curr_txd = NULL; | 698 | list_add(&async_desc->vd.node, &bchan->vc.desc_issued); |
699 | list_del(&async_desc->desc_node); | ||
695 | } | 700 | } |
696 | 701 | ||
697 | vchan_get_all_descriptors(&bchan->vc, &head); | 702 | vchan_get_all_descriptors(&bchan->vc, &head); |
@@ -763,9 +768,9 @@ static int bam_resume(struct dma_chan *chan) | |||
763 | */ | 768 | */ |
764 | static u32 process_channel_irqs(struct bam_device *bdev) | 769 | static u32 process_channel_irqs(struct bam_device *bdev) |
765 | { | 770 | { |
766 | u32 i, srcs, pipe_stts; | 771 | u32 i, srcs, pipe_stts, offset, avail; |
767 | unsigned long flags; | 772 | unsigned long flags; |
768 | struct bam_async_desc *async_desc; | 773 | struct bam_async_desc *async_desc, *tmp; |
769 | 774 | ||
770 | srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); | 775 | srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); |
771 | 776 | ||
@@ -785,27 +790,40 @@ static u32 process_channel_irqs(struct bam_device *bdev) | |||
785 | writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); | 790 | writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); |
786 | 791 | ||
787 | spin_lock_irqsave(&bchan->vc.lock, flags); | 792 | spin_lock_irqsave(&bchan->vc.lock, flags); |
788 | async_desc = bchan->curr_txd; | ||
789 | 793 | ||
790 | if (async_desc) { | 794 | offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) & |
791 | async_desc->num_desc -= async_desc->xfer_len; | 795 | P_SW_OFSTS_MASK; |
792 | async_desc->curr_desc += async_desc->xfer_len; | 796 | offset /= sizeof(struct bam_desc_hw); |
793 | bchan->curr_txd = NULL; | 797 | |
798 | /* Number of bytes available to read */ | ||
799 | avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); | ||
800 | |||
801 | list_for_each_entry_safe(async_desc, tmp, | ||
802 | &bchan->desc_list, desc_node) { | ||
803 | /* Not enough data to read */ | ||
804 | if (avail < async_desc->xfer_len) | ||
805 | break; | ||
794 | 806 | ||
795 | /* manage FIFO */ | 807 | /* manage FIFO */ |
796 | bchan->head += async_desc->xfer_len; | 808 | bchan->head += async_desc->xfer_len; |
797 | bchan->head %= MAX_DESCRIPTORS; | 809 | bchan->head %= MAX_DESCRIPTORS; |
798 | 810 | ||
811 | async_desc->num_desc -= async_desc->xfer_len; | ||
812 | async_desc->curr_desc += async_desc->xfer_len; | ||
813 | avail -= async_desc->xfer_len; | ||
814 | |||
799 | /* | 815 | /* |
800 | * if complete, process cookie. Otherwise | 816 | * if complete, process cookie. Otherwise |
801 | * push back to front of desc_issued so that | 817 | * push back to front of desc_issued so that |
802 | * it gets restarted by the tasklet | 818 | * it gets restarted by the tasklet |
803 | */ | 819 | */ |
804 | if (!async_desc->num_desc) | 820 | if (!async_desc->num_desc) { |
805 | vchan_cookie_complete(&async_desc->vd); | 821 | vchan_cookie_complete(&async_desc->vd); |
806 | else | 822 | } else { |
807 | list_add(&async_desc->vd.node, | 823 | list_add(&async_desc->vd.node, |
808 | &bchan->vc.desc_issued); | 824 | &bchan->vc.desc_issued); |
825 | } | ||
826 | list_del(&async_desc->desc_node); | ||
809 | } | 827 | } |
810 | 828 | ||
811 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | 829 | spin_unlock_irqrestore(&bchan->vc.lock, flags); |
@@ -867,6 +885,7 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
867 | struct dma_tx_state *txstate) | 885 | struct dma_tx_state *txstate) |
868 | { | 886 | { |
869 | struct bam_chan *bchan = to_bam_chan(chan); | 887 | struct bam_chan *bchan = to_bam_chan(chan); |
888 | struct bam_async_desc *async_desc; | ||
870 | struct virt_dma_desc *vd; | 889 | struct virt_dma_desc *vd; |
871 | int ret; | 890 | int ret; |
872 | size_t residue = 0; | 891 | size_t residue = 0; |
@@ -882,11 +901,17 @@ static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
882 | 901 | ||
883 | spin_lock_irqsave(&bchan->vc.lock, flags); | 902 | spin_lock_irqsave(&bchan->vc.lock, flags); |
884 | vd = vchan_find_desc(&bchan->vc, cookie); | 903 | vd = vchan_find_desc(&bchan->vc, cookie); |
885 | if (vd) | 904 | if (vd) { |
886 | residue = container_of(vd, struct bam_async_desc, vd)->length; | 905 | residue = container_of(vd, struct bam_async_desc, vd)->length; |
887 | else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) | 906 | } else { |
888 | for (i = 0; i < bchan->curr_txd->num_desc; i++) | 907 | list_for_each_entry(async_desc, &bchan->desc_list, desc_node) { |
889 | residue += bchan->curr_txd->curr_desc[i].size; | 908 | if (async_desc->vd.tx.cookie != cookie) |
909 | continue; | ||
910 | |||
911 | for (i = 0; i < async_desc->num_desc; i++) | ||
912 | residue += async_desc->curr_desc[i].size; | ||
913 | } | ||
914 | } | ||
890 | 915 | ||
891 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | 916 | spin_unlock_irqrestore(&bchan->vc.lock, flags); |
892 | 917 | ||
@@ -927,63 +952,86 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
927 | { | 952 | { |
928 | struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); | 953 | struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); |
929 | struct bam_device *bdev = bchan->bdev; | 954 | struct bam_device *bdev = bchan->bdev; |
930 | struct bam_async_desc *async_desc; | 955 | struct bam_async_desc *async_desc = NULL; |
931 | struct bam_desc_hw *desc; | 956 | struct bam_desc_hw *desc; |
932 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, | 957 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, |
933 | sizeof(struct bam_desc_hw)); | 958 | sizeof(struct bam_desc_hw)); |
934 | int ret; | 959 | int ret; |
960 | unsigned int avail; | ||
961 | struct dmaengine_desc_callback cb; | ||
935 | 962 | ||
936 | lockdep_assert_held(&bchan->vc.lock); | 963 | lockdep_assert_held(&bchan->vc.lock); |
937 | 964 | ||
938 | if (!vd) | 965 | if (!vd) |
939 | return; | 966 | return; |
940 | 967 | ||
941 | list_del(&vd->node); | ||
942 | |||
943 | async_desc = container_of(vd, struct bam_async_desc, vd); | ||
944 | bchan->curr_txd = async_desc; | ||
945 | |||
946 | ret = pm_runtime_get_sync(bdev->dev); | 968 | ret = pm_runtime_get_sync(bdev->dev); |
947 | if (ret < 0) | 969 | if (ret < 0) |
948 | return; | 970 | return; |
949 | 971 | ||
950 | /* on first use, initialize the channel hardware */ | 972 | while (vd && !IS_BUSY(bchan)) { |
951 | if (!bchan->initialized) | 973 | list_del(&vd->node); |
952 | bam_chan_init_hw(bchan, async_desc->dir); | ||
953 | 974 | ||
954 | /* apply new slave config changes, if necessary */ | 975 | async_desc = container_of(vd, struct bam_async_desc, vd); |
955 | if (bchan->reconfigure) | ||
956 | bam_apply_new_config(bchan, async_desc->dir); | ||
957 | 976 | ||
958 | desc = bchan->curr_txd->curr_desc; | 977 | /* on first use, initialize the channel hardware */ |
978 | if (!bchan->initialized) | ||
979 | bam_chan_init_hw(bchan, async_desc->dir); | ||
959 | 980 | ||
960 | if (async_desc->num_desc > MAX_DESCRIPTORS) | 981 | /* apply new slave config changes, if necessary */ |
961 | async_desc->xfer_len = MAX_DESCRIPTORS; | 982 | if (bchan->reconfigure) |
962 | else | 983 | bam_apply_new_config(bchan, async_desc->dir); |
963 | async_desc->xfer_len = async_desc->num_desc; | ||
964 | 984 | ||
965 | /* set any special flags on the last descriptor */ | 985 | desc = async_desc->curr_desc; |
966 | if (async_desc->num_desc == async_desc->xfer_len) | 986 | avail = CIRC_SPACE(bchan->tail, bchan->head, |
967 | desc[async_desc->xfer_len - 1].flags |= | 987 | MAX_DESCRIPTORS + 1); |
968 | cpu_to_le16(async_desc->flags); | 988 | |
969 | else | 989 | if (async_desc->num_desc > avail) |
970 | desc[async_desc->xfer_len - 1].flags |= | 990 | async_desc->xfer_len = avail; |
971 | cpu_to_le16(DESC_FLAG_INT); | 991 | else |
992 | async_desc->xfer_len = async_desc->num_desc; | ||
993 | |||
994 | /* set any special flags on the last descriptor */ | ||
995 | if (async_desc->num_desc == async_desc->xfer_len) | ||
996 | desc[async_desc->xfer_len - 1].flags |= | ||
997 | cpu_to_le16(async_desc->flags); | ||
972 | 998 | ||
973 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | 999 | vd = vchan_next_desc(&bchan->vc); |
974 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | ||
975 | 1000 | ||
976 | memcpy(&fifo[bchan->tail], desc, | 1001 | dmaengine_desc_get_callback(&async_desc->vd.tx, &cb); |
977 | partial * sizeof(struct bam_desc_hw)); | 1002 | |
978 | memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * | 1003 | /* |
1004 | * An interrupt is generated at this desc, if | ||
1005 | * - FIFO is FULL. | ||
1006 | * - No more descriptors to add. | ||
1007 | * - If a callback completion was requested for this DESC, | ||
1008 | * In this case, BAM will deliver the completion callback | ||
1009 | * for this desc and continue processing the next desc. | ||
1010 | */ | ||
1011 | if (((avail <= async_desc->xfer_len) || !vd || | ||
1012 | dmaengine_desc_callback_valid(&cb)) && | ||
1013 | !(async_desc->flags & DESC_FLAG_EOT)) | ||
1014 | desc[async_desc->xfer_len - 1].flags |= | ||
1015 | cpu_to_le16(DESC_FLAG_INT); | ||
1016 | |||
1017 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | ||
1018 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | ||
1019 | |||
1020 | memcpy(&fifo[bchan->tail], desc, | ||
1021 | partial * sizeof(struct bam_desc_hw)); | ||
1022 | memcpy(fifo, &desc[partial], | ||
1023 | (async_desc->xfer_len - partial) * | ||
979 | sizeof(struct bam_desc_hw)); | 1024 | sizeof(struct bam_desc_hw)); |
980 | } else { | 1025 | } else { |
981 | memcpy(&fifo[bchan->tail], desc, | 1026 | memcpy(&fifo[bchan->tail], desc, |
982 | async_desc->xfer_len * sizeof(struct bam_desc_hw)); | 1027 | async_desc->xfer_len * |
983 | } | 1028 | sizeof(struct bam_desc_hw)); |
1029 | } | ||
984 | 1030 | ||
985 | bchan->tail += async_desc->xfer_len; | 1031 | bchan->tail += async_desc->xfer_len; |
986 | bchan->tail %= MAX_DESCRIPTORS; | 1032 | bchan->tail %= MAX_DESCRIPTORS; |
1033 | list_add_tail(&async_desc->desc_node, &bchan->desc_list); | ||
1034 | } | ||
987 | 1035 | ||
988 | /* ensure descriptor writes and dma start not reordered */ | 1036 | /* ensure descriptor writes and dma start not reordered */ |
989 | wmb(); | 1037 | wmb(); |
@@ -1012,7 +1060,7 @@ static void dma_tasklet(unsigned long data) | |||
1012 | bchan = &bdev->channels[i]; | 1060 | bchan = &bdev->channels[i]; |
1013 | spin_lock_irqsave(&bchan->vc.lock, flags); | 1061 | spin_lock_irqsave(&bchan->vc.lock, flags); |
1014 | 1062 | ||
1015 | if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) | 1063 | if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan)) |
1016 | bam_start_dma(bchan); | 1064 | bam_start_dma(bchan); |
1017 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | 1065 | spin_unlock_irqrestore(&bchan->vc.lock, flags); |
1018 | } | 1066 | } |
@@ -1033,7 +1081,7 @@ static void bam_issue_pending(struct dma_chan *chan) | |||
1033 | spin_lock_irqsave(&bchan->vc.lock, flags); | 1081 | spin_lock_irqsave(&bchan->vc.lock, flags); |
1034 | 1082 | ||
1035 | /* if work pending and idle, start a transaction */ | 1083 | /* if work pending and idle, start a transaction */ |
1036 | if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) | 1084 | if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan)) |
1037 | bam_start_dma(bchan); | 1085 | bam_start_dma(bchan); |
1038 | 1086 | ||
1039 | spin_unlock_irqrestore(&bchan->vc.lock, flags); | 1087 | spin_unlock_irqrestore(&bchan->vc.lock, flags); |
@@ -1133,6 +1181,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, | |||
1133 | 1181 | ||
1134 | vchan_init(&bchan->vc, &bdev->common); | 1182 | vchan_init(&bchan->vc, &bdev->common); |
1135 | bchan->vc.desc_free = bam_dma_free_desc; | 1183 | bchan->vc.desc_free = bam_dma_free_desc; |
1184 | INIT_LIST_HEAD(&bchan->desc_list); | ||
1136 | } | 1185 | } |
1137 | 1186 | ||
1138 | static const struct of_device_id bam_of_match[] = { | 1187 | static const struct of_device_id bam_of_match[] = { |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 1adeb3265085..c7a89c22890e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -823,6 +823,13 @@ static const struct sa11x0_dma_channel_desc chan_desc[] = { | |||
823 | CD(Ser4SSPRc, DDAR_RW), | 823 | CD(Ser4SSPRc, DDAR_RW), |
824 | }; | 824 | }; |
825 | 825 | ||
826 | static const struct dma_slave_map sa11x0_dma_map[] = { | ||
827 | { "sa11x0-ir", "tx", "Ser2ICPTr" }, | ||
828 | { "sa11x0-ir", "rx", "Ser2ICPRc" }, | ||
829 | { "sa11x0-ssp", "tx", "Ser4SSPTr" }, | ||
830 | { "sa11x0-ssp", "rx", "Ser4SSPRc" }, | ||
831 | }; | ||
832 | |||
826 | static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, | 833 | static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, |
827 | struct device *dev) | 834 | struct device *dev) |
828 | { | 835 | { |
@@ -909,6 +916,10 @@ static int sa11x0_dma_probe(struct platform_device *pdev) | |||
909 | spin_lock_init(&d->lock); | 916 | spin_lock_init(&d->lock); |
910 | INIT_LIST_HEAD(&d->chan_pending); | 917 | INIT_LIST_HEAD(&d->chan_pending); |
911 | 918 | ||
919 | d->slave.filter.fn = sa11x0_dma_filter_fn; | ||
920 | d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map); | ||
921 | d->slave.filter.map = sa11x0_dma_map; | ||
922 | |||
912 | d->base = ioremap(res->start, resource_size(res)); | 923 | d->base = ioremap(res->start, resource_size(res)); |
913 | if (!d->base) { | 924 | if (!d->base) { |
914 | ret = -ENOMEM; | 925 | ret = -ENOMEM; |
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c new file mode 100644 index 000000000000..b652071a2096 --- /dev/null +++ b/drivers/dma/sprd-dma.c | |||
@@ -0,0 +1,988 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Spreadtrum Communications Inc. | ||
3 | * | ||
4 | * SPDX-License-Identifier: GPL-2.0 | ||
5 | */ | ||
6 | |||
7 | #include <linux/clk.h> | ||
8 | #include <linux/dma-mapping.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/of_dma.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/pm_runtime.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | #include "virt-dma.h" | ||
22 | |||
23 | #define SPRD_DMA_CHN_REG_OFFSET 0x1000 | ||
24 | #define SPRD_DMA_CHN_REG_LENGTH 0x40 | ||
25 | #define SPRD_DMA_MEMCPY_MIN_SIZE 64 | ||
26 | |||
27 | /* DMA global registers definition */ | ||
28 | #define SPRD_DMA_GLB_PAUSE 0x0 | ||
29 | #define SPRD_DMA_GLB_FRAG_WAIT 0x4 | ||
30 | #define SPRD_DMA_GLB_REQ_PEND0_EN 0x8 | ||
31 | #define SPRD_DMA_GLB_REQ_PEND1_EN 0xc | ||
32 | #define SPRD_DMA_GLB_INT_RAW_STS 0x10 | ||
33 | #define SPRD_DMA_GLB_INT_MSK_STS 0x14 | ||
34 | #define SPRD_DMA_GLB_REQ_STS 0x18 | ||
35 | #define SPRD_DMA_GLB_CHN_EN_STS 0x1c | ||
36 | #define SPRD_DMA_GLB_DEBUG_STS 0x20 | ||
37 | #define SPRD_DMA_GLB_ARB_SEL_STS 0x24 | ||
38 | #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1)) | ||
39 | #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000 | ||
40 | |||
41 | /* DMA channel registers definition */ | ||
42 | #define SPRD_DMA_CHN_PAUSE 0x0 | ||
43 | #define SPRD_DMA_CHN_REQ 0x4 | ||
44 | #define SPRD_DMA_CHN_CFG 0x8 | ||
45 | #define SPRD_DMA_CHN_INTC 0xc | ||
46 | #define SPRD_DMA_CHN_SRC_ADDR 0x10 | ||
47 | #define SPRD_DMA_CHN_DES_ADDR 0x14 | ||
48 | #define SPRD_DMA_CHN_FRG_LEN 0x18 | ||
49 | #define SPRD_DMA_CHN_BLK_LEN 0x1c | ||
50 | #define SPRD_DMA_CHN_TRSC_LEN 0x20 | ||
51 | #define SPRD_DMA_CHN_TRSF_STEP 0x24 | ||
52 | #define SPRD_DMA_CHN_WARP_PTR 0x28 | ||
53 | #define SPRD_DMA_CHN_WARP_TO 0x2c | ||
54 | #define SPRD_DMA_CHN_LLIST_PTR 0x30 | ||
55 | #define SPRD_DMA_CHN_FRAG_STEP 0x34 | ||
56 | #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38 | ||
57 | #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c | ||
58 | |||
59 | /* SPRD_DMA_CHN_INTC register definition */ | ||
60 | #define SPRD_DMA_INT_MASK GENMASK(4, 0) | ||
61 | #define SPRD_DMA_INT_CLR_OFFSET 24 | ||
62 | #define SPRD_DMA_FRAG_INT_EN BIT(0) | ||
63 | #define SPRD_DMA_BLK_INT_EN BIT(1) | ||
64 | #define SPRD_DMA_TRANS_INT_EN BIT(2) | ||
65 | #define SPRD_DMA_LIST_INT_EN BIT(3) | ||
66 | #define SPRD_DMA_CFG_ERR_INT_EN BIT(4) | ||
67 | |||
68 | /* SPRD_DMA_CHN_CFG register definition */ | ||
69 | #define SPRD_DMA_CHN_EN BIT(0) | ||
70 | #define SPRD_DMA_WAIT_BDONE_OFFSET 24 | ||
71 | #define SPRD_DMA_DONOT_WAIT_BDONE 1 | ||
72 | |||
73 | /* SPRD_DMA_CHN_REQ register definition */ | ||
74 | #define SPRD_DMA_REQ_EN BIT(0) | ||
75 | |||
76 | /* SPRD_DMA_CHN_PAUSE register definition */ | ||
77 | #define SPRD_DMA_PAUSE_EN BIT(0) | ||
78 | #define SPRD_DMA_PAUSE_STS BIT(2) | ||
79 | #define SPRD_DMA_PAUSE_CNT 0x2000 | ||
80 | |||
81 | /* DMA_CHN_WARP_* register definition */ | ||
82 | #define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28) | ||
83 | #define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0) | ||
84 | #define SPRD_DMA_HIGH_ADDR_OFFSET 4 | ||
85 | |||
86 | /* SPRD_DMA_CHN_INTC register definition */ | ||
87 | #define SPRD_DMA_FRAG_INT_STS BIT(16) | ||
88 | #define SPRD_DMA_BLK_INT_STS BIT(17) | ||
89 | #define SPRD_DMA_TRSC_INT_STS BIT(18) | ||
90 | #define SPRD_DMA_LIST_INT_STS BIT(19) | ||
91 | #define SPRD_DMA_CFGERR_INT_STS BIT(20) | ||
92 | #define SPRD_DMA_CHN_INT_STS \ | ||
93 | (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \ | ||
94 | SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \ | ||
95 | SPRD_DMA_CFGERR_INT_STS) | ||
96 | |||
97 | /* SPRD_DMA_CHN_FRG_LEN register definition */ | ||
98 | #define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30 | ||
99 | #define SPRD_DMA_DES_DATAWIDTH_OFFSET 28 | ||
100 | #define SPRD_DMA_SWT_MODE_OFFSET 26 | ||
101 | #define SPRD_DMA_REQ_MODE_OFFSET 24 | ||
102 | #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0) | ||
103 | #define SPRD_DMA_FIX_SEL_OFFSET 21 | ||
104 | #define SPRD_DMA_FIX_EN_OFFSET 20 | ||
105 | #define SPRD_DMA_LLIST_END_OFFSET 19 | ||
106 | #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0) | ||
107 | |||
108 | /* SPRD_DMA_CHN_BLK_LEN register definition */ | ||
109 | #define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0) | ||
110 | |||
111 | /* SPRD_DMA_CHN_TRSC_LEN register definition */ | ||
112 | #define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0) | ||
113 | |||
114 | /* SPRD_DMA_CHN_TRSF_STEP register definition */ | ||
115 | #define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16 | ||
116 | #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 | ||
117 | #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) | ||
118 | |||
119 | #define SPRD_DMA_SOFTWARE_UID 0 | ||
120 | |||
121 | /* | ||
122 | * enum sprd_dma_req_mode: define the DMA request mode | ||
123 | * @SPRD_DMA_FRAG_REQ: fragment request mode | ||
124 | * @SPRD_DMA_BLK_REQ: block request mode | ||
125 | * @SPRD_DMA_TRANS_REQ: transaction request mode | ||
126 | * @SPRD_DMA_LIST_REQ: link-list request mode | ||
127 | * | ||
128 | * We have 4 types request mode: fragment mode, block mode, transaction mode | ||
129 | * and linklist mode. One transaction can contain several blocks, one block can | ||
130 | * contain several fragments. Link-list mode means we can save several DMA | ||
131 | * configuration into one reserved memory, then DMA can fetch each DMA | ||
132 | * configuration automatically to start transfer. | ||
133 | */ | ||
134 | enum sprd_dma_req_mode { | ||
135 | SPRD_DMA_FRAG_REQ, | ||
136 | SPRD_DMA_BLK_REQ, | ||
137 | SPRD_DMA_TRANS_REQ, | ||
138 | SPRD_DMA_LIST_REQ, | ||
139 | }; | ||
140 | |||
141 | /* | ||
142 | * enum sprd_dma_int_type: define the DMA interrupt type | ||
143 | * @SPRD_DMA_NO_INT: do not need generate DMA interrupts. | ||
144 | * @SPRD_DMA_FRAG_INT: fragment done interrupt when one fragment request | ||
145 | * is done. | ||
146 | * @SPRD_DMA_BLK_INT: block done interrupt when one block request is done. | ||
147 | * @SPRD_DMA_BLK_FRAG_INT: block and fragment interrupt when one fragment | ||
148 | * or one block request is done. | ||
149 | * @SPRD_DMA_TRANS_INT: tansaction done interrupt when one transaction | ||
150 | * request is done. | ||
151 | * @SPRD_DMA_TRANS_FRAG_INT: transaction and fragment interrupt when one | ||
152 | * transaction request or fragment request is done. | ||
153 | * @SPRD_DMA_TRANS_BLK_INT: transaction and block interrupt when one | ||
154 | * transaction request or block request is done. | ||
155 | * @SPRD_DMA_LIST_INT: link-list done interrupt when one link-list request | ||
156 | * is done. | ||
157 | * @SPRD_DMA_CFGERR_INT: configure error interrupt when configuration is | ||
158 | * incorrect. | ||
159 | */ | ||
160 | enum sprd_dma_int_type { | ||
161 | SPRD_DMA_NO_INT, | ||
162 | SPRD_DMA_FRAG_INT, | ||
163 | SPRD_DMA_BLK_INT, | ||
164 | SPRD_DMA_BLK_FRAG_INT, | ||
165 | SPRD_DMA_TRANS_INT, | ||
166 | SPRD_DMA_TRANS_FRAG_INT, | ||
167 | SPRD_DMA_TRANS_BLK_INT, | ||
168 | SPRD_DMA_LIST_INT, | ||
169 | SPRD_DMA_CFGERR_INT, | ||
170 | }; | ||
171 | |||
172 | /* dma channel hardware configuration */ | ||
173 | struct sprd_dma_chn_hw { | ||
174 | u32 pause; | ||
175 | u32 req; | ||
176 | u32 cfg; | ||
177 | u32 intc; | ||
178 | u32 src_addr; | ||
179 | u32 des_addr; | ||
180 | u32 frg_len; | ||
181 | u32 blk_len; | ||
182 | u32 trsc_len; | ||
183 | u32 trsf_step; | ||
184 | u32 wrap_ptr; | ||
185 | u32 wrap_to; | ||
186 | u32 llist_ptr; | ||
187 | u32 frg_step; | ||
188 | u32 src_blk_step; | ||
189 | u32 des_blk_step; | ||
190 | }; | ||
191 | |||
192 | /* dma request description */ | ||
193 | struct sprd_dma_desc { | ||
194 | struct virt_dma_desc vd; | ||
195 | struct sprd_dma_chn_hw chn_hw; | ||
196 | }; | ||
197 | |||
198 | /* dma channel description */ | ||
199 | struct sprd_dma_chn { | ||
200 | struct virt_dma_chan vc; | ||
201 | void __iomem *chn_base; | ||
202 | u32 chn_num; | ||
203 | u32 dev_id; | ||
204 | struct sprd_dma_desc *cur_desc; | ||
205 | }; | ||
206 | |||
207 | /* SPRD dma device */ | ||
208 | struct sprd_dma_dev { | ||
209 | struct dma_device dma_dev; | ||
210 | void __iomem *glb_base; | ||
211 | struct clk *clk; | ||
212 | struct clk *ashb_clk; | ||
213 | int irq; | ||
214 | u32 total_chns; | ||
215 | struct sprd_dma_chn channels[0]; | ||
216 | }; | ||
217 | |||
218 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); | ||
219 | static struct of_dma_filter_info sprd_dma_info = { | ||
220 | .filter_fn = sprd_dma_filter_fn, | ||
221 | }; | ||
222 | |||
223 | static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c) | ||
224 | { | ||
225 | return container_of(c, struct sprd_dma_chn, vc.chan); | ||
226 | } | ||
227 | |||
228 | static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c) | ||
229 | { | ||
230 | struct sprd_dma_chn *schan = to_sprd_dma_chan(c); | ||
231 | |||
232 | return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]); | ||
233 | } | ||
234 | |||
235 | static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd) | ||
236 | { | ||
237 | return container_of(vd, struct sprd_dma_desc, vd); | ||
238 | } | ||
239 | |||
240 | static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg, | ||
241 | u32 mask, u32 val) | ||
242 | { | ||
243 | u32 orig = readl(schan->chn_base + reg); | ||
244 | u32 tmp; | ||
245 | |||
246 | tmp = (orig & ~mask) | val; | ||
247 | writel(tmp, schan->chn_base + reg); | ||
248 | } | ||
249 | |||
250 | static int sprd_dma_enable(struct sprd_dma_dev *sdev) | ||
251 | { | ||
252 | int ret; | ||
253 | |||
254 | ret = clk_prepare_enable(sdev->clk); | ||
255 | if (ret) | ||
256 | return ret; | ||
257 | |||
258 | /* | ||
259 | * The ashb_clk is optional and only for AGCP DMA controller, so we | ||
260 | * need add one condition to check if the ashb_clk need enable. | ||
261 | */ | ||
262 | if (!IS_ERR(sdev->ashb_clk)) | ||
263 | ret = clk_prepare_enable(sdev->ashb_clk); | ||
264 | |||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | static void sprd_dma_disable(struct sprd_dma_dev *sdev) | ||
269 | { | ||
270 | clk_disable_unprepare(sdev->clk); | ||
271 | |||
272 | /* | ||
273 | * Need to check if we need disable the optional ashb_clk for AGCP DMA. | ||
274 | */ | ||
275 | if (!IS_ERR(sdev->ashb_clk)) | ||
276 | clk_disable_unprepare(sdev->ashb_clk); | ||
277 | } | ||
278 | |||
279 | static void sprd_dma_set_uid(struct sprd_dma_chn *schan) | ||
280 | { | ||
281 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | ||
282 | u32 dev_id = schan->dev_id; | ||
283 | |||
284 | if (dev_id != SPRD_DMA_SOFTWARE_UID) { | ||
285 | u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET + | ||
286 | SPRD_DMA_GLB_REQ_UID(dev_id); | ||
287 | |||
288 | writel(schan->chn_num + 1, sdev->glb_base + uid_offset); | ||
289 | } | ||
290 | } | ||
291 | |||
292 | static void sprd_dma_unset_uid(struct sprd_dma_chn *schan) | ||
293 | { | ||
294 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | ||
295 | u32 dev_id = schan->dev_id; | ||
296 | |||
297 | if (dev_id != SPRD_DMA_SOFTWARE_UID) { | ||
298 | u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET + | ||
299 | SPRD_DMA_GLB_REQ_UID(dev_id); | ||
300 | |||
301 | writel(0, sdev->glb_base + uid_offset); | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static void sprd_dma_clear_int(struct sprd_dma_chn *schan) | ||
306 | { | ||
307 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC, | ||
308 | SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET, | ||
309 | SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET); | ||
310 | } | ||
311 | |||
312 | static void sprd_dma_enable_chn(struct sprd_dma_chn *schan) | ||
313 | { | ||
314 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, | ||
315 | SPRD_DMA_CHN_EN); | ||
316 | } | ||
317 | |||
318 | static void sprd_dma_disable_chn(struct sprd_dma_chn *schan) | ||
319 | { | ||
320 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0); | ||
321 | } | ||
322 | |||
323 | static void sprd_dma_soft_request(struct sprd_dma_chn *schan) | ||
324 | { | ||
325 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN, | ||
326 | SPRD_DMA_REQ_EN); | ||
327 | } | ||
328 | |||
329 | static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable) | ||
330 | { | ||
331 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | ||
332 | u32 pause, timeout = SPRD_DMA_PAUSE_CNT; | ||
333 | |||
334 | if (enable) { | ||
335 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE, | ||
336 | SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN); | ||
337 | |||
338 | do { | ||
339 | pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE); | ||
340 | if (pause & SPRD_DMA_PAUSE_STS) | ||
341 | break; | ||
342 | |||
343 | cpu_relax(); | ||
344 | } while (--timeout > 0); | ||
345 | |||
346 | if (!timeout) | ||
347 | dev_warn(sdev->dma_dev.dev, | ||
348 | "pause dma controller timeout\n"); | ||
349 | } else { | ||
350 | sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE, | ||
351 | SPRD_DMA_PAUSE_EN, 0); | ||
352 | } | ||
353 | } | ||
354 | |||
355 | static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan) | ||
356 | { | ||
357 | u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG); | ||
358 | |||
359 | if (!(cfg & SPRD_DMA_CHN_EN)) | ||
360 | return; | ||
361 | |||
362 | sprd_dma_pause_resume(schan, true); | ||
363 | sprd_dma_disable_chn(schan); | ||
364 | } | ||
365 | |||
366 | static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan) | ||
367 | { | ||
368 | unsigned long addr, addr_high; | ||
369 | |||
370 | addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR); | ||
371 | addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) & | ||
372 | SPRD_DMA_HIGH_ADDR_MASK; | ||
373 | |||
374 | return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET); | ||
375 | } | ||
376 | |||
377 | static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan) | ||
378 | { | ||
379 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | ||
380 | u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) & | ||
381 | SPRD_DMA_CHN_INT_STS; | ||
382 | |||
383 | switch (intc_sts) { | ||
384 | case SPRD_DMA_CFGERR_INT_STS: | ||
385 | return SPRD_DMA_CFGERR_INT; | ||
386 | |||
387 | case SPRD_DMA_LIST_INT_STS: | ||
388 | return SPRD_DMA_LIST_INT; | ||
389 | |||
390 | case SPRD_DMA_TRSC_INT_STS: | ||
391 | return SPRD_DMA_TRANS_INT; | ||
392 | |||
393 | case SPRD_DMA_BLK_INT_STS: | ||
394 | return SPRD_DMA_BLK_INT; | ||
395 | |||
396 | case SPRD_DMA_FRAG_INT_STS: | ||
397 | return SPRD_DMA_FRAG_INT; | ||
398 | |||
399 | default: | ||
400 | dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n"); | ||
401 | return SPRD_DMA_NO_INT; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan) | ||
406 | { | ||
407 | u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN); | ||
408 | |||
409 | return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK; | ||
410 | } | ||
411 | |||
412 | static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, | ||
413 | struct sprd_dma_desc *sdesc) | ||
414 | { | ||
415 | struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw; | ||
416 | |||
417 | writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE); | ||
418 | writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG); | ||
419 | writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC); | ||
420 | writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR); | ||
421 | writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR); | ||
422 | writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN); | ||
423 | writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN); | ||
424 | writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN); | ||
425 | writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP); | ||
426 | writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR); | ||
427 | writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO); | ||
428 | writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR); | ||
429 | writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP); | ||
430 | writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP); | ||
431 | writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP); | ||
432 | writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ); | ||
433 | } | ||
434 | |||
435 | static void sprd_dma_start(struct sprd_dma_chn *schan) | ||
436 | { | ||
437 | struct virt_dma_desc *vd = vchan_next_desc(&schan->vc); | ||
438 | |||
439 | if (!vd) | ||
440 | return; | ||
441 | |||
442 | list_del(&vd->node); | ||
443 | schan->cur_desc = to_sprd_dma_desc(vd); | ||
444 | |||
445 | /* | ||
446 | * Copy the DMA configuration from DMA descriptor to this hardware | ||
447 | * channel. | ||
448 | */ | ||
449 | sprd_dma_set_chn_config(schan, schan->cur_desc); | ||
450 | sprd_dma_set_uid(schan); | ||
451 | sprd_dma_enable_chn(schan); | ||
452 | |||
453 | if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) | ||
454 | sprd_dma_soft_request(schan); | ||
455 | } | ||
456 | |||
457 | static void sprd_dma_stop(struct sprd_dma_chn *schan) | ||
458 | { | ||
459 | sprd_dma_stop_and_disable(schan); | ||
460 | sprd_dma_unset_uid(schan); | ||
461 | sprd_dma_clear_int(schan); | ||
462 | } | ||
463 | |||
464 | static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc, | ||
465 | enum sprd_dma_int_type int_type, | ||
466 | enum sprd_dma_req_mode req_mode) | ||
467 | { | ||
468 | if (int_type == SPRD_DMA_NO_INT) | ||
469 | return false; | ||
470 | |||
471 | if (int_type >= req_mode + 1) | ||
472 | return true; | ||
473 | else | ||
474 | return false; | ||
475 | } | ||
476 | |||
477 | static irqreturn_t dma_irq_handle(int irq, void *dev_id) | ||
478 | { | ||
479 | struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id; | ||
480 | u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS); | ||
481 | struct sprd_dma_chn *schan; | ||
482 | struct sprd_dma_desc *sdesc; | ||
483 | enum sprd_dma_req_mode req_type; | ||
484 | enum sprd_dma_int_type int_type; | ||
485 | bool trans_done = false; | ||
486 | u32 i; | ||
487 | |||
488 | while (irq_status) { | ||
489 | i = __ffs(irq_status); | ||
490 | irq_status &= (irq_status - 1); | ||
491 | schan = &sdev->channels[i]; | ||
492 | |||
493 | spin_lock(&schan->vc.lock); | ||
494 | int_type = sprd_dma_get_int_type(schan); | ||
495 | req_type = sprd_dma_get_req_type(schan); | ||
496 | sprd_dma_clear_int(schan); | ||
497 | |||
498 | sdesc = schan->cur_desc; | ||
499 | |||
500 | /* Check if the dma request descriptor is done. */ | ||
501 | trans_done = sprd_dma_check_trans_done(sdesc, int_type, | ||
502 | req_type); | ||
503 | if (trans_done == true) { | ||
504 | vchan_cookie_complete(&sdesc->vd); | ||
505 | schan->cur_desc = NULL; | ||
506 | sprd_dma_start(schan); | ||
507 | } | ||
508 | spin_unlock(&schan->vc.lock); | ||
509 | } | ||
510 | |||
511 | return IRQ_HANDLED; | ||
512 | } | ||
513 | |||
514 | static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) | ||
515 | { | ||
516 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
517 | int ret; | ||
518 | |||
519 | ret = pm_runtime_get_sync(chan->device->dev); | ||
520 | if (ret < 0) | ||
521 | return ret; | ||
522 | |||
523 | schan->dev_id = SPRD_DMA_SOFTWARE_UID; | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | static void sprd_dma_free_chan_resources(struct dma_chan *chan) | ||
528 | { | ||
529 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
530 | unsigned long flags; | ||
531 | |||
532 | spin_lock_irqsave(&schan->vc.lock, flags); | ||
533 | sprd_dma_stop(schan); | ||
534 | spin_unlock_irqrestore(&schan->vc.lock, flags); | ||
535 | |||
536 | vchan_free_chan_resources(&schan->vc); | ||
537 | pm_runtime_put(chan->device->dev); | ||
538 | } | ||
539 | |||
540 | static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, | ||
541 | dma_cookie_t cookie, | ||
542 | struct dma_tx_state *txstate) | ||
543 | { | ||
544 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
545 | struct virt_dma_desc *vd; | ||
546 | unsigned long flags; | ||
547 | enum dma_status ret; | ||
548 | u32 pos; | ||
549 | |||
550 | ret = dma_cookie_status(chan, cookie, txstate); | ||
551 | if (ret == DMA_COMPLETE || !txstate) | ||
552 | return ret; | ||
553 | |||
554 | spin_lock_irqsave(&schan->vc.lock, flags); | ||
555 | vd = vchan_find_desc(&schan->vc, cookie); | ||
556 | if (vd) { | ||
557 | struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); | ||
558 | struct sprd_dma_chn_hw *hw = &sdesc->chn_hw; | ||
559 | |||
560 | if (hw->trsc_len > 0) | ||
561 | pos = hw->trsc_len; | ||
562 | else if (hw->blk_len > 0) | ||
563 | pos = hw->blk_len; | ||
564 | else if (hw->frg_len > 0) | ||
565 | pos = hw->frg_len; | ||
566 | else | ||
567 | pos = 0; | ||
568 | } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { | ||
569 | pos = sprd_dma_get_dst_addr(schan); | ||
570 | } else { | ||
571 | pos = 0; | ||
572 | } | ||
573 | spin_unlock_irqrestore(&schan->vc.lock, flags); | ||
574 | |||
575 | dma_set_residue(txstate, pos); | ||
576 | return ret; | ||
577 | } | ||
578 | |||
579 | static void sprd_dma_issue_pending(struct dma_chan *chan) | ||
580 | { | ||
581 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
582 | unsigned long flags; | ||
583 | |||
584 | spin_lock_irqsave(&schan->vc.lock, flags); | ||
585 | if (vchan_issue_pending(&schan->vc) && !schan->cur_desc) | ||
586 | sprd_dma_start(schan); | ||
587 | spin_unlock_irqrestore(&schan->vc.lock, flags); | ||
588 | } | ||
589 | |||
590 | static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc, | ||
591 | dma_addr_t dest, dma_addr_t src, size_t len) | ||
592 | { | ||
593 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); | ||
594 | struct sprd_dma_chn_hw *hw = &sdesc->chn_hw; | ||
595 | u32 datawidth, src_step, des_step, fragment_len; | ||
596 | u32 block_len, req_mode, irq_mode, transcation_len; | ||
597 | u32 fix_mode = 0, fix_en = 0; | ||
598 | |||
599 | if (IS_ALIGNED(len, 4)) { | ||
600 | datawidth = 2; | ||
601 | src_step = 4; | ||
602 | des_step = 4; | ||
603 | } else if (IS_ALIGNED(len, 2)) { | ||
604 | datawidth = 1; | ||
605 | src_step = 2; | ||
606 | des_step = 2; | ||
607 | } else { | ||
608 | datawidth = 0; | ||
609 | src_step = 1; | ||
610 | des_step = 1; | ||
611 | } | ||
612 | |||
613 | fragment_len = SPRD_DMA_MEMCPY_MIN_SIZE; | ||
614 | if (len <= SPRD_DMA_BLK_LEN_MASK) { | ||
615 | block_len = len; | ||
616 | transcation_len = 0; | ||
617 | req_mode = SPRD_DMA_BLK_REQ; | ||
618 | irq_mode = SPRD_DMA_BLK_INT; | ||
619 | } else { | ||
620 | block_len = SPRD_DMA_MEMCPY_MIN_SIZE; | ||
621 | transcation_len = len; | ||
622 | req_mode = SPRD_DMA_TRANS_REQ; | ||
623 | irq_mode = SPRD_DMA_TRANS_INT; | ||
624 | } | ||
625 | |||
626 | hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET; | ||
627 | hw->wrap_ptr = (u32)((src >> SPRD_DMA_HIGH_ADDR_OFFSET) & | ||
628 | SPRD_DMA_HIGH_ADDR_MASK); | ||
629 | hw->wrap_to = (u32)((dest >> SPRD_DMA_HIGH_ADDR_OFFSET) & | ||
630 | SPRD_DMA_HIGH_ADDR_MASK); | ||
631 | |||
632 | hw->src_addr = (u32)(src & SPRD_DMA_LOW_ADDR_MASK); | ||
633 | hw->des_addr = (u32)(dest & SPRD_DMA_LOW_ADDR_MASK); | ||
634 | |||
635 | if ((src_step != 0 && des_step != 0) || (src_step | des_step) == 0) { | ||
636 | fix_en = 0; | ||
637 | } else { | ||
638 | fix_en = 1; | ||
639 | if (src_step) | ||
640 | fix_mode = 1; | ||
641 | else | ||
642 | fix_mode = 0; | ||
643 | } | ||
644 | |||
645 | hw->frg_len = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET | | ||
646 | datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET | | ||
647 | req_mode << SPRD_DMA_REQ_MODE_OFFSET | | ||
648 | fix_mode << SPRD_DMA_FIX_SEL_OFFSET | | ||
649 | fix_en << SPRD_DMA_FIX_EN_OFFSET | | ||
650 | (fragment_len & SPRD_DMA_FRG_LEN_MASK); | ||
651 | hw->blk_len = block_len & SPRD_DMA_BLK_LEN_MASK; | ||
652 | |||
653 | hw->intc = SPRD_DMA_CFG_ERR_INT_EN; | ||
654 | |||
655 | switch (irq_mode) { | ||
656 | case SPRD_DMA_NO_INT: | ||
657 | break; | ||
658 | |||
659 | case SPRD_DMA_FRAG_INT: | ||
660 | hw->intc |= SPRD_DMA_FRAG_INT_EN; | ||
661 | break; | ||
662 | |||
663 | case SPRD_DMA_BLK_INT: | ||
664 | hw->intc |= SPRD_DMA_BLK_INT_EN; | ||
665 | break; | ||
666 | |||
667 | case SPRD_DMA_BLK_FRAG_INT: | ||
668 | hw->intc |= SPRD_DMA_BLK_INT_EN | SPRD_DMA_FRAG_INT_EN; | ||
669 | break; | ||
670 | |||
671 | case SPRD_DMA_TRANS_INT: | ||
672 | hw->intc |= SPRD_DMA_TRANS_INT_EN; | ||
673 | break; | ||
674 | |||
675 | case SPRD_DMA_TRANS_FRAG_INT: | ||
676 | hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_FRAG_INT_EN; | ||
677 | break; | ||
678 | |||
679 | case SPRD_DMA_TRANS_BLK_INT: | ||
680 | hw->intc |= SPRD_DMA_TRANS_INT_EN | SPRD_DMA_BLK_INT_EN; | ||
681 | break; | ||
682 | |||
683 | case SPRD_DMA_LIST_INT: | ||
684 | hw->intc |= SPRD_DMA_LIST_INT_EN; | ||
685 | break; | ||
686 | |||
687 | case SPRD_DMA_CFGERR_INT: | ||
688 | hw->intc |= SPRD_DMA_CFG_ERR_INT_EN; | ||
689 | break; | ||
690 | |||
691 | default: | ||
692 | dev_err(sdev->dma_dev.dev, "invalid irq mode\n"); | ||
693 | return -EINVAL; | ||
694 | } | ||
695 | |||
696 | if (transcation_len == 0) | ||
697 | hw->trsc_len = block_len & SPRD_DMA_TRSC_LEN_MASK; | ||
698 | else | ||
699 | hw->trsc_len = transcation_len & SPRD_DMA_TRSC_LEN_MASK; | ||
700 | |||
701 | hw->trsf_step = (des_step & SPRD_DMA_TRSF_STEP_MASK) << | ||
702 | SPRD_DMA_DEST_TRSF_STEP_OFFSET | | ||
703 | (src_step & SPRD_DMA_TRSF_STEP_MASK) << | ||
704 | SPRD_DMA_SRC_TRSF_STEP_OFFSET; | ||
705 | |||
706 | hw->frg_step = 0; | ||
707 | hw->src_blk_step = 0; | ||
708 | hw->des_blk_step = 0; | ||
709 | hw->src_blk_step = 0; | ||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | struct dma_async_tx_descriptor * | ||
714 | sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
715 | size_t len, unsigned long flags) | ||
716 | { | ||
717 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
718 | struct sprd_dma_desc *sdesc; | ||
719 | int ret; | ||
720 | |||
721 | sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); | ||
722 | if (!sdesc) | ||
723 | return NULL; | ||
724 | |||
725 | ret = sprd_dma_config(chan, sdesc, dest, src, len); | ||
726 | if (ret) { | ||
727 | kfree(sdesc); | ||
728 | return NULL; | ||
729 | } | ||
730 | |||
731 | return vchan_tx_prep(&schan->vc, &sdesc->vd, flags); | ||
732 | } | ||
733 | |||
734 | static int sprd_dma_pause(struct dma_chan *chan) | ||
735 | { | ||
736 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
737 | unsigned long flags; | ||
738 | |||
739 | spin_lock_irqsave(&schan->vc.lock, flags); | ||
740 | sprd_dma_pause_resume(schan, true); | ||
741 | spin_unlock_irqrestore(&schan->vc.lock, flags); | ||
742 | |||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int sprd_dma_resume(struct dma_chan *chan) | ||
747 | { | ||
748 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
749 | unsigned long flags; | ||
750 | |||
751 | spin_lock_irqsave(&schan->vc.lock, flags); | ||
752 | sprd_dma_pause_resume(schan, false); | ||
753 | spin_unlock_irqrestore(&schan->vc.lock, flags); | ||
754 | |||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static int sprd_dma_terminate_all(struct dma_chan *chan) | ||
759 | { | ||
760 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
761 | unsigned long flags; | ||
762 | LIST_HEAD(head); | ||
763 | |||
764 | spin_lock_irqsave(&schan->vc.lock, flags); | ||
765 | sprd_dma_stop(schan); | ||
766 | |||
767 | vchan_get_all_descriptors(&schan->vc, &head); | ||
768 | spin_unlock_irqrestore(&schan->vc.lock, flags); | ||
769 | |||
770 | vchan_dma_desc_free_list(&schan->vc, &head); | ||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static void sprd_dma_free_desc(struct virt_dma_desc *vd) | ||
775 | { | ||
776 | struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); | ||
777 | |||
778 | kfree(sdesc); | ||
779 | } | ||
780 | |||
781 | static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param) | ||
782 | { | ||
783 | struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); | ||
784 | struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); | ||
785 | u32 req = *(u32 *)param; | ||
786 | |||
787 | if (req < sdev->total_chns) | ||
788 | return req == schan->chn_num + 1; | ||
789 | else | ||
790 | return false; | ||
791 | } | ||
792 | |||
793 | static int sprd_dma_probe(struct platform_device *pdev) | ||
794 | { | ||
795 | struct device_node *np = pdev->dev.of_node; | ||
796 | struct sprd_dma_dev *sdev; | ||
797 | struct sprd_dma_chn *dma_chn; | ||
798 | struct resource *res; | ||
799 | u32 chn_count; | ||
800 | int ret, i; | ||
801 | |||
802 | ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count); | ||
803 | if (ret) { | ||
804 | dev_err(&pdev->dev, "get dma channels count failed\n"); | ||
805 | return ret; | ||
806 | } | ||
807 | |||
808 | sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev) + | ||
809 | sizeof(*dma_chn) * chn_count, | ||
810 | GFP_KERNEL); | ||
811 | if (!sdev) | ||
812 | return -ENOMEM; | ||
813 | |||
814 | sdev->clk = devm_clk_get(&pdev->dev, "enable"); | ||
815 | if (IS_ERR(sdev->clk)) { | ||
816 | dev_err(&pdev->dev, "get enable clock failed\n"); | ||
817 | return PTR_ERR(sdev->clk); | ||
818 | } | ||
819 | |||
820 | /* ashb clock is optional for AGCP DMA */ | ||
821 | sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb"); | ||
822 | if (IS_ERR(sdev->ashb_clk)) | ||
823 | dev_warn(&pdev->dev, "no optional ashb eb clock\n"); | ||
824 | |||
825 | /* | ||
826 | * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP | ||
827 | * DMA controller, it can or do not request the irq, which will save | ||
828 | * system power without resuming system by DMA interrupts if AGCP DMA | ||
829 | * does not request the irq. Thus the DMA interrupts property should | ||
830 | * be optional. | ||
831 | */ | ||
832 | sdev->irq = platform_get_irq(pdev, 0); | ||
833 | if (sdev->irq > 0) { | ||
834 | ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle, | ||
835 | 0, "sprd_dma", (void *)sdev); | ||
836 | if (ret < 0) { | ||
837 | dev_err(&pdev->dev, "request dma irq failed\n"); | ||
838 | return ret; | ||
839 | } | ||
840 | } else { | ||
841 | dev_warn(&pdev->dev, "no interrupts for the dma controller\n"); | ||
842 | } | ||
843 | |||
844 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
845 | sdev->glb_base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
846 | resource_size(res)); | ||
847 | if (!sdev->glb_base) | ||
848 | return -ENOMEM; | ||
849 | |||
850 | dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask); | ||
851 | sdev->total_chns = chn_count; | ||
852 | sdev->dma_dev.chancnt = chn_count; | ||
853 | INIT_LIST_HEAD(&sdev->dma_dev.channels); | ||
854 | INIT_LIST_HEAD(&sdev->dma_dev.global_node); | ||
855 | sdev->dma_dev.dev = &pdev->dev; | ||
856 | sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources; | ||
857 | sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources; | ||
858 | sdev->dma_dev.device_tx_status = sprd_dma_tx_status; | ||
859 | sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending; | ||
860 | sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy; | ||
861 | sdev->dma_dev.device_pause = sprd_dma_pause; | ||
862 | sdev->dma_dev.device_resume = sprd_dma_resume; | ||
863 | sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all; | ||
864 | |||
865 | for (i = 0; i < chn_count; i++) { | ||
866 | dma_chn = &sdev->channels[i]; | ||
867 | dma_chn->chn_num = i; | ||
868 | dma_chn->cur_desc = NULL; | ||
869 | /* get each channel's registers base address. */ | ||
870 | dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET + | ||
871 | SPRD_DMA_CHN_REG_LENGTH * i; | ||
872 | |||
873 | dma_chn->vc.desc_free = sprd_dma_free_desc; | ||
874 | vchan_init(&dma_chn->vc, &sdev->dma_dev); | ||
875 | } | ||
876 | |||
877 | platform_set_drvdata(pdev, sdev); | ||
878 | ret = sprd_dma_enable(sdev); | ||
879 | if (ret) | ||
880 | return ret; | ||
881 | |||
882 | pm_runtime_set_active(&pdev->dev); | ||
883 | pm_runtime_enable(&pdev->dev); | ||
884 | |||
885 | ret = pm_runtime_get_sync(&pdev->dev); | ||
886 | if (ret < 0) | ||
887 | goto err_rpm; | ||
888 | |||
889 | ret = dma_async_device_register(&sdev->dma_dev); | ||
890 | if (ret < 0) { | ||
891 | dev_err(&pdev->dev, "register dma device failed:%d\n", ret); | ||
892 | goto err_register; | ||
893 | } | ||
894 | |||
895 | sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask; | ||
896 | ret = of_dma_controller_register(np, of_dma_simple_xlate, | ||
897 | &sprd_dma_info); | ||
898 | if (ret) | ||
899 | goto err_of_register; | ||
900 | |||
901 | pm_runtime_put(&pdev->dev); | ||
902 | return 0; | ||
903 | |||
904 | err_of_register: | ||
905 | dma_async_device_unregister(&sdev->dma_dev); | ||
906 | err_register: | ||
907 | pm_runtime_put_noidle(&pdev->dev); | ||
908 | pm_runtime_disable(&pdev->dev); | ||
909 | err_rpm: | ||
910 | sprd_dma_disable(sdev); | ||
911 | return ret; | ||
912 | } | ||
913 | |||
914 | static int sprd_dma_remove(struct platform_device *pdev) | ||
915 | { | ||
916 | struct sprd_dma_dev *sdev = platform_get_drvdata(pdev); | ||
917 | struct sprd_dma_chn *c, *cn; | ||
918 | int ret; | ||
919 | |||
920 | ret = pm_runtime_get_sync(&pdev->dev); | ||
921 | if (ret < 0) | ||
922 | return ret; | ||
923 | |||
924 | /* explicitly free the irq */ | ||
925 | if (sdev->irq > 0) | ||
926 | devm_free_irq(&pdev->dev, sdev->irq, sdev); | ||
927 | |||
928 | list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels, | ||
929 | vc.chan.device_node) { | ||
930 | list_del(&c->vc.chan.device_node); | ||
931 | tasklet_kill(&c->vc.task); | ||
932 | } | ||
933 | |||
934 | of_dma_controller_free(pdev->dev.of_node); | ||
935 | dma_async_device_unregister(&sdev->dma_dev); | ||
936 | sprd_dma_disable(sdev); | ||
937 | |||
938 | pm_runtime_put_noidle(&pdev->dev); | ||
939 | pm_runtime_disable(&pdev->dev); | ||
940 | return 0; | ||
941 | } | ||
942 | |||
943 | static const struct of_device_id sprd_dma_match[] = { | ||
944 | { .compatible = "sprd,sc9860-dma", }, | ||
945 | {}, | ||
946 | }; | ||
947 | |||
948 | static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev) | ||
949 | { | ||
950 | struct sprd_dma_dev *sdev = dev_get_drvdata(dev); | ||
951 | |||
952 | sprd_dma_disable(sdev); | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | static int __maybe_unused sprd_dma_runtime_resume(struct device *dev) | ||
957 | { | ||
958 | struct sprd_dma_dev *sdev = dev_get_drvdata(dev); | ||
959 | int ret; | ||
960 | |||
961 | ret = sprd_dma_enable(sdev); | ||
962 | if (ret) | ||
963 | dev_err(sdev->dma_dev.dev, "enable dma failed\n"); | ||
964 | |||
965 | return ret; | ||
966 | } | ||
967 | |||
968 | static const struct dev_pm_ops sprd_dma_pm_ops = { | ||
969 | SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend, | ||
970 | sprd_dma_runtime_resume, | ||
971 | NULL) | ||
972 | }; | ||
973 | |||
974 | static struct platform_driver sprd_dma_driver = { | ||
975 | .probe = sprd_dma_probe, | ||
976 | .remove = sprd_dma_remove, | ||
977 | .driver = { | ||
978 | .name = "sprd-dma", | ||
979 | .of_match_table = sprd_dma_match, | ||
980 | .pm = &sprd_dma_pm_ops, | ||
981 | }, | ||
982 | }; | ||
983 | module_platform_driver(sprd_dma_driver); | ||
984 | |||
985 | MODULE_LICENSE("GPL v2"); | ||
986 | MODULE_DESCRIPTION("DMA driver for Spreadtrum"); | ||
987 | MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>"); | ||
988 | MODULE_ALIAS("platform:sprd-dma"); | ||
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c new file mode 100644 index 000000000000..d5db0f6e1ff8 --- /dev/null +++ b/drivers/dma/stm32-dmamux.c | |||
@@ -0,0 +1,327 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (C) STMicroelectronics SA 2017 | ||
4 | * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com> | ||
5 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> | ||
6 | * | ||
7 | * License terms: GPL V2.0. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License version 2 as published by | ||
11 | * the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more | ||
16 | * details. | ||
17 | * | ||
18 | * DMA Router driver for STM32 DMA MUX | ||
19 | * | ||
20 | * Based on TI DMA Crossbar driver | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/clk.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/of_device.h> | ||
30 | #include <linux/of_dma.h> | ||
31 | #include <linux/reset.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | |||
35 | #define STM32_DMAMUX_CCR(x) (0x4 * (x)) | ||
36 | #define STM32_DMAMUX_MAX_DMA_REQUESTS 32 | ||
37 | #define STM32_DMAMUX_MAX_REQUESTS 255 | ||
38 | |||
39 | struct stm32_dmamux { | ||
40 | u32 master; | ||
41 | u32 request; | ||
42 | u32 chan_id; | ||
43 | }; | ||
44 | |||
45 | struct stm32_dmamux_data { | ||
46 | struct dma_router dmarouter; | ||
47 | struct clk *clk; | ||
48 | struct reset_control *rst; | ||
49 | void __iomem *iomem; | ||
50 | u32 dma_requests; /* Number of DMA requests connected to DMAMUX */ | ||
51 | u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */ | ||
52 | spinlock_t lock; /* Protects register access */ | ||
53 | unsigned long *dma_inuse; /* Used DMA channel */ | ||
54 | u32 dma_reqs[]; /* Number of DMA Request per DMA masters. | ||
55 | * [0] holds number of DMA Masters. | ||
56 | * To be kept at very end end of this structure | ||
57 | */ | ||
58 | }; | ||
59 | |||
60 | static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg) | ||
61 | { | ||
62 | return readl_relaxed(iomem + reg); | ||
63 | } | ||
64 | |||
65 | static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val) | ||
66 | { | ||
67 | writel_relaxed(val, iomem + reg); | ||
68 | } | ||
69 | |||
70 | static void stm32_dmamux_free(struct device *dev, void *route_data) | ||
71 | { | ||
72 | struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev); | ||
73 | struct stm32_dmamux *mux = route_data; | ||
74 | unsigned long flags; | ||
75 | |||
76 | /* Clear dma request */ | ||
77 | spin_lock_irqsave(&dmamux->lock, flags); | ||
78 | |||
79 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0); | ||
80 | clear_bit(mux->chan_id, dmamux->dma_inuse); | ||
81 | |||
82 | if (!IS_ERR(dmamux->clk)) | ||
83 | clk_disable(dmamux->clk); | ||
84 | |||
85 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
86 | |||
87 | dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n", | ||
88 | mux->request, mux->master, mux->chan_id); | ||
89 | |||
90 | kfree(mux); | ||
91 | } | ||
92 | |||
93 | static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, | ||
94 | struct of_dma *ofdma) | ||
95 | { | ||
96 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | ||
97 | struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev); | ||
98 | struct stm32_dmamux *mux; | ||
99 | u32 i, min, max; | ||
100 | int ret; | ||
101 | unsigned long flags; | ||
102 | |||
103 | if (dma_spec->args_count != 3) { | ||
104 | dev_err(&pdev->dev, "invalid number of dma mux args\n"); | ||
105 | return ERR_PTR(-EINVAL); | ||
106 | } | ||
107 | |||
108 | if (dma_spec->args[0] > dmamux->dmamux_requests) { | ||
109 | dev_err(&pdev->dev, "invalid mux request number: %d\n", | ||
110 | dma_spec->args[0]); | ||
111 | return ERR_PTR(-EINVAL); | ||
112 | } | ||
113 | |||
114 | mux = kzalloc(sizeof(*mux), GFP_KERNEL); | ||
115 | if (!mux) | ||
116 | return ERR_PTR(-ENOMEM); | ||
117 | |||
118 | spin_lock_irqsave(&dmamux->lock, flags); | ||
119 | mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, | ||
120 | dmamux->dma_requests); | ||
121 | set_bit(mux->chan_id, dmamux->dma_inuse); | ||
122 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
123 | |||
124 | if (mux->chan_id == dmamux->dma_requests) { | ||
125 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | ||
126 | ret = -ENOMEM; | ||
127 | goto error; | ||
128 | } | ||
129 | |||
130 | /* Look for DMA Master */ | ||
131 | for (i = 1, min = 0, max = dmamux->dma_reqs[i]; | ||
132 | i <= dmamux->dma_reqs[0]; | ||
133 | min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i]) | ||
134 | if (mux->chan_id < max) | ||
135 | break; | ||
136 | mux->master = i - 1; | ||
137 | |||
138 | /* The of_node_put() will be done in of_dma_router_xlate function */ | ||
139 | dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1); | ||
140 | if (!dma_spec->np) { | ||
141 | dev_err(&pdev->dev, "can't get dma master\n"); | ||
142 | ret = -EINVAL; | ||
143 | goto error; | ||
144 | } | ||
145 | |||
146 | /* Set dma request */ | ||
147 | spin_lock_irqsave(&dmamux->lock, flags); | ||
148 | if (!IS_ERR(dmamux->clk)) { | ||
149 | ret = clk_enable(dmamux->clk); | ||
150 | if (ret < 0) { | ||
151 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
152 | dev_err(&pdev->dev, "clk_prep_enable issue: %d\n", ret); | ||
153 | goto error; | ||
154 | } | ||
155 | } | ||
156 | spin_unlock_irqrestore(&dmamux->lock, flags); | ||
157 | |||
158 | mux->request = dma_spec->args[0]; | ||
159 | |||
160 | /* craft DMA spec */ | ||
161 | dma_spec->args[3] = dma_spec->args[2]; | ||
162 | dma_spec->args[2] = dma_spec->args[1]; | ||
163 | dma_spec->args[1] = 0; | ||
164 | dma_spec->args[0] = mux->chan_id - min; | ||
165 | dma_spec->args_count = 4; | ||
166 | |||
167 | stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), | ||
168 | mux->request); | ||
169 | dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n", | ||
170 | mux->request, mux->master, mux->chan_id); | ||
171 | |||
172 | return mux; | ||
173 | |||
174 | error: | ||
175 | clear_bit(mux->chan_id, dmamux->dma_inuse); | ||
176 | kfree(mux); | ||
177 | return ERR_PTR(ret); | ||
178 | } | ||
179 | |||
180 | static const struct of_device_id stm32_stm32dma_master_match[] = { | ||
181 | { .compatible = "st,stm32-dma", }, | ||
182 | {}, | ||
183 | }; | ||
184 | |||
185 | static int stm32_dmamux_probe(struct platform_device *pdev) | ||
186 | { | ||
187 | struct device_node *node = pdev->dev.of_node; | ||
188 | const struct of_device_id *match; | ||
189 | struct device_node *dma_node; | ||
190 | struct stm32_dmamux_data *stm32_dmamux; | ||
191 | struct resource *res; | ||
192 | void __iomem *iomem; | ||
193 | int i, count, ret; | ||
194 | u32 dma_req; | ||
195 | |||
196 | if (!node) | ||
197 | return -ENODEV; | ||
198 | |||
199 | count = device_property_read_u32_array(&pdev->dev, "dma-masters", | ||
200 | NULL, 0); | ||
201 | if (count < 0) { | ||
202 | dev_err(&pdev->dev, "Can't get DMA master(s) node\n"); | ||
203 | return -ENODEV; | ||
204 | } | ||
205 | |||
206 | stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) + | ||
207 | sizeof(u32) * (count + 1), GFP_KERNEL); | ||
208 | if (!stm32_dmamux) | ||
209 | return -ENOMEM; | ||
210 | |||
211 | dma_req = 0; | ||
212 | for (i = 1; i <= count; i++) { | ||
213 | dma_node = of_parse_phandle(node, "dma-masters", i - 1); | ||
214 | |||
215 | match = of_match_node(stm32_stm32dma_master_match, dma_node); | ||
216 | if (!match) { | ||
217 | dev_err(&pdev->dev, "DMA master is not supported\n"); | ||
218 | of_node_put(dma_node); | ||
219 | return -EINVAL; | ||
220 | } | ||
221 | |||
222 | if (of_property_read_u32(dma_node, "dma-requests", | ||
223 | &stm32_dmamux->dma_reqs[i])) { | ||
224 | dev_info(&pdev->dev, | ||
225 | "Missing MUX output information, using %u.\n", | ||
226 | STM32_DMAMUX_MAX_DMA_REQUESTS); | ||
227 | stm32_dmamux->dma_reqs[i] = | ||
228 | STM32_DMAMUX_MAX_DMA_REQUESTS; | ||
229 | } | ||
230 | dma_req += stm32_dmamux->dma_reqs[i]; | ||
231 | of_node_put(dma_node); | ||
232 | } | ||
233 | |||
234 | if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) { | ||
235 | dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n"); | ||
236 | return -ENODEV; | ||
237 | } | ||
238 | |||
239 | stm32_dmamux->dma_requests = dma_req; | ||
240 | stm32_dmamux->dma_reqs[0] = count; | ||
241 | stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev, | ||
242 | BITS_TO_LONGS(dma_req), | ||
243 | sizeof(unsigned long), | ||
244 | GFP_KERNEL); | ||
245 | if (!stm32_dmamux->dma_inuse) | ||
246 | return -ENOMEM; | ||
247 | |||
248 | if (device_property_read_u32(&pdev->dev, "dma-requests", | ||
249 | &stm32_dmamux->dmamux_requests)) { | ||
250 | stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS; | ||
251 | dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n", | ||
252 | stm32_dmamux->dmamux_requests); | ||
253 | } | ||
254 | |||
255 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
256 | if (!res) | ||
257 | return -ENODEV; | ||
258 | |||
259 | iomem = devm_ioremap_resource(&pdev->dev, res); | ||
260 | if (IS_ERR(iomem)) | ||
261 | return PTR_ERR(iomem); | ||
262 | |||
263 | spin_lock_init(&stm32_dmamux->lock); | ||
264 | |||
265 | stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL); | ||
266 | if (IS_ERR(stm32_dmamux->clk)) { | ||
267 | ret = PTR_ERR(stm32_dmamux->clk); | ||
268 | if (ret == -EPROBE_DEFER) | ||
269 | dev_info(&pdev->dev, "Missing controller clock\n"); | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | stm32_dmamux->rst = devm_reset_control_get(&pdev->dev, NULL); | ||
274 | if (!IS_ERR(stm32_dmamux->rst)) { | ||
275 | reset_control_assert(stm32_dmamux->rst); | ||
276 | udelay(2); | ||
277 | reset_control_deassert(stm32_dmamux->rst); | ||
278 | } | ||
279 | |||
280 | stm32_dmamux->iomem = iomem; | ||
281 | stm32_dmamux->dmarouter.dev = &pdev->dev; | ||
282 | stm32_dmamux->dmarouter.route_free = stm32_dmamux_free; | ||
283 | |||
284 | platform_set_drvdata(pdev, stm32_dmamux); | ||
285 | |||
286 | if (!IS_ERR(stm32_dmamux->clk)) { | ||
287 | ret = clk_prepare_enable(stm32_dmamux->clk); | ||
288 | if (ret < 0) { | ||
289 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | ||
290 | return ret; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | /* Reset the dmamux */ | ||
295 | for (i = 0; i < stm32_dmamux->dma_requests; i++) | ||
296 | stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0); | ||
297 | |||
298 | if (!IS_ERR(stm32_dmamux->clk)) | ||
299 | clk_disable(stm32_dmamux->clk); | ||
300 | |||
301 | return of_dma_router_register(node, stm32_dmamux_route_allocate, | ||
302 | &stm32_dmamux->dmarouter); | ||
303 | } | ||
304 | |||
305 | static const struct of_device_id stm32_dmamux_match[] = { | ||
306 | { .compatible = "st,stm32h7-dmamux" }, | ||
307 | {}, | ||
308 | }; | ||
309 | |||
310 | static struct platform_driver stm32_dmamux_driver = { | ||
311 | .probe = stm32_dmamux_probe, | ||
312 | .driver = { | ||
313 | .name = "stm32-dmamux", | ||
314 | .of_match_table = stm32_dmamux_match, | ||
315 | }, | ||
316 | }; | ||
317 | |||
318 | static int __init stm32_dmamux_init(void) | ||
319 | { | ||
320 | return platform_driver_register(&stm32_dmamux_driver); | ||
321 | } | ||
322 | arch_initcall(stm32_dmamux_init); | ||
323 | |||
324 | MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX"); | ||
325 | MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); | ||
326 | MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>"); | ||
327 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c new file mode 100644 index 000000000000..daa1602eb9f5 --- /dev/null +++ b/drivers/dma/stm32-mdma.c | |||
@@ -0,0 +1,1682 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (C) STMicroelectronics SA 2017 | ||
4 | * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com> | ||
5 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> | ||
6 | * | ||
7 | * License terms: GPL V2.0. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License version 2 as published by | ||
11 | * the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more | ||
16 | * details. | ||
17 | * | ||
18 | * Driver for STM32 MDMA controller | ||
19 | * | ||
20 | * Inspired by stm32-dma.c and dma-jz4780.c | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/clk.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/dmapool.h> | ||
29 | #include <linux/err.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/iopoll.h> | ||
32 | #include <linux/jiffies.h> | ||
33 | #include <linux/list.h> | ||
34 | #include <linux/log2.h> | ||
35 | #include <linux/module.h> | ||
36 | #include <linux/of.h> | ||
37 | #include <linux/of_device.h> | ||
38 | #include <linux/of_dma.h> | ||
39 | #include <linux/platform_device.h> | ||
40 | #include <linux/reset.h> | ||
41 | #include <linux/slab.h> | ||
42 | |||
43 | #include "virt-dma.h" | ||
44 | |||
45 | /* MDMA Generic getter/setter */ | ||
46 | #define STM32_MDMA_SHIFT(n) (ffs(n) - 1) | ||
47 | #define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \ | ||
48 | (mask)) | ||
49 | #define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \ | ||
50 | STM32_MDMA_SHIFT(mask)) | ||
51 | |||
52 | #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ | ||
53 | #define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ | ||
54 | |||
55 | /* MDMA Channel x interrupt/status register */ | ||
56 | #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ | ||
57 | #define STM32_MDMA_CISR_CRQA BIT(16) | ||
58 | #define STM32_MDMA_CISR_TCIF BIT(4) | ||
59 | #define STM32_MDMA_CISR_BTIF BIT(3) | ||
60 | #define STM32_MDMA_CISR_BRTIF BIT(2) | ||
61 | #define STM32_MDMA_CISR_CTCIF BIT(1) | ||
62 | #define STM32_MDMA_CISR_TEIF BIT(0) | ||
63 | |||
64 | /* MDMA Channel x interrupt flag clear register */ | ||
65 | #define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x)) | ||
66 | #define STM32_MDMA_CIFCR_CLTCIF BIT(4) | ||
67 | #define STM32_MDMA_CIFCR_CBTIF BIT(3) | ||
68 | #define STM32_MDMA_CIFCR_CBRTIF BIT(2) | ||
69 | #define STM32_MDMA_CIFCR_CCTCIF BIT(1) | ||
70 | #define STM32_MDMA_CIFCR_CTEIF BIT(0) | ||
71 | #define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \ | ||
72 | | STM32_MDMA_CIFCR_CBTIF \ | ||
73 | | STM32_MDMA_CIFCR_CBRTIF \ | ||
74 | | STM32_MDMA_CIFCR_CCTCIF \ | ||
75 | | STM32_MDMA_CIFCR_CTEIF) | ||
76 | |||
77 | /* MDMA Channel x error status register */ | ||
78 | #define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x)) | ||
79 | #define STM32_MDMA_CESR_BSE BIT(11) | ||
80 | #define STM32_MDMA_CESR_ASR BIT(10) | ||
81 | #define STM32_MDMA_CESR_TEMD BIT(9) | ||
82 | #define STM32_MDMA_CESR_TELD BIT(8) | ||
83 | #define STM32_MDMA_CESR_TED BIT(7) | ||
84 | #define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0) | ||
85 | |||
86 | /* MDMA Channel x control register */ | ||
87 | #define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x)) | ||
88 | #define STM32_MDMA_CCR_SWRQ BIT(16) | ||
89 | #define STM32_MDMA_CCR_WEX BIT(14) | ||
90 | #define STM32_MDMA_CCR_HEX BIT(13) | ||
91 | #define STM32_MDMA_CCR_BEX BIT(12) | ||
92 | #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) | ||
93 | #define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \ | ||
94 | STM32_MDMA_CCR_PL_MASK) | ||
95 | #define STM32_MDMA_CCR_TCIE BIT(5) | ||
96 | #define STM32_MDMA_CCR_BTIE BIT(4) | ||
97 | #define STM32_MDMA_CCR_BRTIE BIT(3) | ||
98 | #define STM32_MDMA_CCR_CTCIE BIT(2) | ||
99 | #define STM32_MDMA_CCR_TEIE BIT(1) | ||
100 | #define STM32_MDMA_CCR_EN BIT(0) | ||
101 | #define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \ | ||
102 | | STM32_MDMA_CCR_BTIE \ | ||
103 | | STM32_MDMA_CCR_BRTIE \ | ||
104 | | STM32_MDMA_CCR_CTCIE \ | ||
105 | | STM32_MDMA_CCR_TEIE) | ||
106 | |||
107 | /* MDMA Channel x transfer configuration register */ | ||
108 | #define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x)) | ||
109 | #define STM32_MDMA_CTCR_BWM BIT(31) | ||
110 | #define STM32_MDMA_CTCR_SWRM BIT(30) | ||
111 | #define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28) | ||
112 | #define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \ | ||
113 | STM32_MDMA_CTCR_TRGM_MSK) | ||
114 | #define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \ | ||
115 | STM32_MDMA_CTCR_TRGM_MSK) | ||
116 | #define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26) | ||
117 | #define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \ | ||
118 | STM32_MDMA_CTCR_PAM_MASK) | ||
119 | #define STM32_MDMA_CTCR_PKE BIT(25) | ||
120 | #define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18) | ||
121 | #define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \ | ||
122 | STM32_MDMA_CTCR_TLEN_MSK) | ||
123 | #define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \ | ||
124 | STM32_MDMA_CTCR_TLEN_MSK) | ||
125 | #define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18) | ||
126 | #define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \ | ||
127 | STM32_MDMA_CTCR_LEN2_MSK) | ||
128 | #define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \ | ||
129 | STM32_MDMA_CTCR_LEN2_MSK) | ||
130 | #define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15) | ||
131 | #define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \ | ||
132 | STM32_MDMA_CTCR_DBURST_MASK) | ||
133 | #define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12) | ||
134 | #define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \ | ||
135 | STM32_MDMA_CTCR_SBURST_MASK) | ||
136 | #define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10) | ||
137 | #define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \ | ||
138 | STM32_MDMA_CTCR_DINCOS_MASK) | ||
139 | #define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8) | ||
140 | #define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \ | ||
141 | STM32_MDMA_CTCR_SINCOS_MASK) | ||
142 | #define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6) | ||
143 | #define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \ | ||
144 | STM32_MDMA_CTCR_DSIZE_MASK) | ||
145 | #define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4) | ||
146 | #define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \ | ||
147 | STM32_MDMA_CTCR_SSIZE_MASK) | ||
148 | #define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2) | ||
149 | #define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \ | ||
150 | STM32_MDMA_CTCR_DINC_MASK) | ||
151 | #define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0) | ||
152 | #define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \ | ||
153 | STM32_MDMA_CTCR_SINC_MASK) | ||
154 | #define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \ | ||
155 | | STM32_MDMA_CTCR_DINC_MASK \ | ||
156 | | STM32_MDMA_CTCR_SINCOS_MASK \ | ||
157 | | STM32_MDMA_CTCR_DINCOS_MASK \ | ||
158 | | STM32_MDMA_CTCR_LEN2_MSK \ | ||
159 | | STM32_MDMA_CTCR_TRGM_MSK) | ||
160 | |||
161 | /* MDMA Channel x block number of data register */ | ||
162 | #define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x)) | ||
163 | #define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20) | ||
164 | #define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \ | ||
165 | STM32_MDMA_CBNDTR_BRC_MK) | ||
166 | #define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \ | ||
167 | STM32_MDMA_CBNDTR_BRC_MK) | ||
168 | |||
169 | #define STM32_MDMA_CBNDTR_BRDUM BIT(19) | ||
170 | #define STM32_MDMA_CBNDTR_BRSUM BIT(18) | ||
171 | #define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0) | ||
172 | #define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \ | ||
173 | STM32_MDMA_CBNDTR_BNDT_MASK) | ||
174 | |||
175 | /* MDMA Channel x source address register */ | ||
176 | #define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x)) | ||
177 | |||
178 | /* MDMA Channel x destination address register */ | ||
179 | #define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x)) | ||
180 | |||
181 | /* MDMA Channel x block repeat address update register */ | ||
182 | #define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x)) | ||
183 | #define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16) | ||
184 | #define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \ | ||
185 | STM32_MDMA_CBRUR_DUV_MASK) | ||
186 | #define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0) | ||
187 | #define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \ | ||
188 | STM32_MDMA_CBRUR_SUV_MASK) | ||
189 | |||
190 | /* MDMA Channel x link address register */ | ||
191 | #define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x)) | ||
192 | |||
193 | /* MDMA Channel x trigger and bus selection register */ | ||
194 | #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) | ||
195 | #define STM32_MDMA_CTBR_DBUS BIT(17) | ||
196 | #define STM32_MDMA_CTBR_SBUS BIT(16) | ||
197 | #define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) | ||
198 | #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ | ||
199 | STM32_MDMA_CTBR_TSEL_MASK) | ||
200 | |||
201 | /* MDMA Channel x mask address register */ | ||
202 | #define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x)) | ||
203 | |||
204 | /* MDMA Channel x mask data register */ | ||
205 | #define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x)) | ||
206 | |||
207 | #define STM32_MDMA_MAX_BUF_LEN 128 | ||
208 | #define STM32_MDMA_MAX_BLOCK_LEN 65536 | ||
209 | #define STM32_MDMA_MAX_CHANNELS 63 | ||
210 | #define STM32_MDMA_MAX_REQUESTS 256 | ||
211 | #define STM32_MDMA_MAX_BURST 128 | ||
212 | #define STM32_MDMA_VERY_HIGH_PRIORITY 0x11 | ||
213 | |||
214 | enum stm32_mdma_trigger_mode { | ||
215 | STM32_MDMA_BUFFER, | ||
216 | STM32_MDMA_BLOCK, | ||
217 | STM32_MDMA_BLOCK_REP, | ||
218 | STM32_MDMA_LINKED_LIST, | ||
219 | }; | ||
220 | |||
221 | enum stm32_mdma_width { | ||
222 | STM32_MDMA_BYTE, | ||
223 | STM32_MDMA_HALF_WORD, | ||
224 | STM32_MDMA_WORD, | ||
225 | STM32_MDMA_DOUBLE_WORD, | ||
226 | }; | ||
227 | |||
228 | enum stm32_mdma_inc_mode { | ||
229 | STM32_MDMA_FIXED = 0, | ||
230 | STM32_MDMA_INC = 2, | ||
231 | STM32_MDMA_DEC = 3, | ||
232 | }; | ||
233 | |||
234 | struct stm32_mdma_chan_config { | ||
235 | u32 request; | ||
236 | u32 priority_level; | ||
237 | u32 transfer_config; | ||
238 | u32 mask_addr; | ||
239 | u32 mask_data; | ||
240 | }; | ||
241 | |||
242 | struct stm32_mdma_hwdesc { | ||
243 | u32 ctcr; | ||
244 | u32 cbndtr; | ||
245 | u32 csar; | ||
246 | u32 cdar; | ||
247 | u32 cbrur; | ||
248 | u32 clar; | ||
249 | u32 ctbr; | ||
250 | u32 dummy; | ||
251 | u32 cmar; | ||
252 | u32 cmdr; | ||
253 | } __aligned(64); | ||
254 | |||
255 | struct stm32_mdma_desc { | ||
256 | struct virt_dma_desc vdesc; | ||
257 | u32 ccr; | ||
258 | struct stm32_mdma_hwdesc *hwdesc; | ||
259 | dma_addr_t hwdesc_phys; | ||
260 | bool cyclic; | ||
261 | u32 count; | ||
262 | }; | ||
263 | |||
264 | struct stm32_mdma_chan { | ||
265 | struct virt_dma_chan vchan; | ||
266 | struct dma_pool *desc_pool; | ||
267 | u32 id; | ||
268 | struct stm32_mdma_desc *desc; | ||
269 | u32 curr_hwdesc; | ||
270 | struct dma_slave_config dma_config; | ||
271 | struct stm32_mdma_chan_config chan_config; | ||
272 | bool busy; | ||
273 | u32 mem_burst; | ||
274 | u32 mem_width; | ||
275 | }; | ||
276 | |||
277 | struct stm32_mdma_device { | ||
278 | struct dma_device ddev; | ||
279 | void __iomem *base; | ||
280 | struct clk *clk; | ||
281 | int irq; | ||
282 | struct reset_control *rst; | ||
283 | u32 nr_channels; | ||
284 | u32 nr_requests; | ||
285 | u32 nr_ahb_addr_masks; | ||
286 | struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; | ||
287 | u32 ahb_addr_masks[]; | ||
288 | }; | ||
289 | |||
290 | static struct stm32_mdma_device *stm32_mdma_get_dev( | ||
291 | struct stm32_mdma_chan *chan) | ||
292 | { | ||
293 | return container_of(chan->vchan.chan.device, struct stm32_mdma_device, | ||
294 | ddev); | ||
295 | } | ||
296 | |||
297 | static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c) | ||
298 | { | ||
299 | return container_of(c, struct stm32_mdma_chan, vchan.chan); | ||
300 | } | ||
301 | |||
302 | static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc) | ||
303 | { | ||
304 | return container_of(vdesc, struct stm32_mdma_desc, vdesc); | ||
305 | } | ||
306 | |||
307 | static struct device *chan2dev(struct stm32_mdma_chan *chan) | ||
308 | { | ||
309 | return &chan->vchan.chan.dev->device; | ||
310 | } | ||
311 | |||
312 | static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev) | ||
313 | { | ||
314 | return mdma_dev->ddev.dev; | ||
315 | } | ||
316 | |||
317 | static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg) | ||
318 | { | ||
319 | return readl_relaxed(dmadev->base + reg); | ||
320 | } | ||
321 | |||
322 | static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val) | ||
323 | { | ||
324 | writel_relaxed(val, dmadev->base + reg); | ||
325 | } | ||
326 | |||
327 | static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg, | ||
328 | u32 mask) | ||
329 | { | ||
330 | void __iomem *addr = dmadev->base + reg; | ||
331 | |||
332 | writel_relaxed(readl_relaxed(addr) | mask, addr); | ||
333 | } | ||
334 | |||
335 | static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg, | ||
336 | u32 mask) | ||
337 | { | ||
338 | void __iomem *addr = dmadev->base + reg; | ||
339 | |||
340 | writel_relaxed(readl_relaxed(addr) & ~mask, addr); | ||
341 | } | ||
342 | |||
343 | static struct stm32_mdma_desc *stm32_mdma_alloc_desc( | ||
344 | struct stm32_mdma_chan *chan, u32 count) | ||
345 | { | ||
346 | struct stm32_mdma_desc *desc; | ||
347 | |||
348 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | ||
349 | if (!desc) | ||
350 | return NULL; | ||
351 | |||
352 | desc->hwdesc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, | ||
353 | &desc->hwdesc_phys); | ||
354 | if (!desc->hwdesc) { | ||
355 | dev_err(chan2dev(chan), "Failed to allocate descriptor\n"); | ||
356 | kfree(desc); | ||
357 | return NULL; | ||
358 | } | ||
359 | |||
360 | desc->count = count; | ||
361 | |||
362 | return desc; | ||
363 | } | ||
364 | |||
365 | static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc) | ||
366 | { | ||
367 | struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc); | ||
368 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan); | ||
369 | |||
370 | dma_pool_free(chan->desc_pool, desc->hwdesc, desc->hwdesc_phys); | ||
371 | kfree(desc); | ||
372 | } | ||
373 | |||
374 | static int stm32_mdma_get_width(struct stm32_mdma_chan *chan, | ||
375 | enum dma_slave_buswidth width) | ||
376 | { | ||
377 | switch (width) { | ||
378 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
379 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
380 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
381 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
382 | return ffs(width) - 1; | ||
383 | default: | ||
384 | dev_err(chan2dev(chan), "Dma bus width %i not supported\n", | ||
385 | width); | ||
386 | return -EINVAL; | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr, | ||
391 | u32 buf_len, u32 tlen) | ||
392 | { | ||
393 | enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; | ||
394 | |||
395 | for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; | ||
396 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
397 | max_width >>= 1) { | ||
398 | /* | ||
399 | * Address and buffer length both have to be aligned on | ||
400 | * bus width | ||
401 | */ | ||
402 | if ((((buf_len | addr) & (max_width - 1)) == 0) && | ||
403 | tlen >= max_width) | ||
404 | break; | ||
405 | } | ||
406 | |||
407 | return max_width; | ||
408 | } | ||
409 | |||
410 | static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst, | ||
411 | enum dma_slave_buswidth width) | ||
412 | { | ||
413 | u32 best_burst = max_burst; | ||
414 | u32 burst_len = best_burst * width; | ||
415 | |||
416 | while ((burst_len > 0) && (tlen % burst_len)) { | ||
417 | best_burst = best_burst >> 1; | ||
418 | burst_len = best_burst * width; | ||
419 | } | ||
420 | |||
421 | return (best_burst > 0) ? best_burst : 1; | ||
422 | } | ||
423 | |||
424 | static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan) | ||
425 | { | ||
426 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
427 | u32 ccr, cisr, id, reg; | ||
428 | int ret; | ||
429 | |||
430 | id = chan->id; | ||
431 | reg = STM32_MDMA_CCR(id); | ||
432 | |||
433 | /* Disable interrupts */ | ||
434 | stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK); | ||
435 | |||
436 | ccr = stm32_mdma_read(dmadev, reg); | ||
437 | if (ccr & STM32_MDMA_CCR_EN) { | ||
438 | stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN); | ||
439 | |||
440 | /* Ensure that any ongoing transfer has been completed */ | ||
441 | ret = readl_relaxed_poll_timeout_atomic( | ||
442 | dmadev->base + STM32_MDMA_CISR(id), cisr, | ||
443 | (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000); | ||
444 | if (ret) { | ||
445 | dev_err(chan2dev(chan), "%s: timeout!\n", __func__); | ||
446 | return -EBUSY; | ||
447 | } | ||
448 | } | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | static void stm32_mdma_stop(struct stm32_mdma_chan *chan) | ||
454 | { | ||
455 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
456 | u32 status; | ||
457 | int ret; | ||
458 | |||
459 | /* Disable DMA */ | ||
460 | ret = stm32_mdma_disable_chan(chan); | ||
461 | if (ret < 0) | ||
462 | return; | ||
463 | |||
464 | /* Clear interrupt status if it is there */ | ||
465 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); | ||
466 | if (status) { | ||
467 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", | ||
468 | __func__, status); | ||
469 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status); | ||
470 | } | ||
471 | |||
472 | chan->busy = false; | ||
473 | } | ||
474 | |||
475 | static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr, | ||
476 | u32 ctbr_mask, u32 src_addr) | ||
477 | { | ||
478 | u32 mask; | ||
479 | int i; | ||
480 | |||
481 | /* Check if memory device is on AHB or AXI */ | ||
482 | *ctbr &= ~ctbr_mask; | ||
483 | mask = src_addr & 0xF0000000; | ||
484 | for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) { | ||
485 | if (mask == dmadev->ahb_addr_masks[i]) { | ||
486 | *ctbr |= ctbr_mask; | ||
487 | break; | ||
488 | } | ||
489 | } | ||
490 | } | ||
491 | |||
492 | static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, | ||
493 | enum dma_transfer_direction direction, | ||
494 | u32 *mdma_ccr, u32 *mdma_ctcr, | ||
495 | u32 *mdma_ctbr, dma_addr_t addr, | ||
496 | u32 buf_len) | ||
497 | { | ||
498 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
499 | struct stm32_mdma_chan_config *chan_config = &chan->chan_config; | ||
500 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | ||
501 | phys_addr_t src_addr, dst_addr; | ||
502 | int src_bus_width, dst_bus_width; | ||
503 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; | ||
504 | u32 ccr, ctcr, ctbr, tlen; | ||
505 | |||
506 | src_addr_width = chan->dma_config.src_addr_width; | ||
507 | dst_addr_width = chan->dma_config.dst_addr_width; | ||
508 | src_maxburst = chan->dma_config.src_maxburst; | ||
509 | dst_maxburst = chan->dma_config.dst_maxburst; | ||
510 | |||
511 | ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); | ||
512 | ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); | ||
513 | ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); | ||
514 | |||
515 | /* Enable HW request mode */ | ||
516 | ctcr &= ~STM32_MDMA_CTCR_SWRM; | ||
517 | |||
518 | /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */ | ||
519 | ctcr &= ~STM32_MDMA_CTCR_CFG_MASK; | ||
520 | ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK; | ||
521 | |||
522 | /* | ||
523 | * For buffer transfer length (TLEN) we have to set | ||
524 | * the number of bytes - 1 in CTCR register | ||
525 | */ | ||
526 | tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr); | ||
527 | ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK; | ||
528 | ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); | ||
529 | |||
530 | /* Disable Pack Enable */ | ||
531 | ctcr &= ~STM32_MDMA_CTCR_PKE; | ||
532 | |||
533 | /* Check burst size constraints */ | ||
534 | if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST || | ||
535 | dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) { | ||
536 | dev_err(chan2dev(chan), | ||
537 | "burst size * bus width higher than %d bytes\n", | ||
538 | STM32_MDMA_MAX_BURST); | ||
539 | return -EINVAL; | ||
540 | } | ||
541 | |||
542 | if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) || | ||
543 | (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) { | ||
544 | dev_err(chan2dev(chan), "burst size must be a power of 2\n"); | ||
545 | return -EINVAL; | ||
546 | } | ||
547 | |||
548 | /* | ||
549 | * Configure channel control: | ||
550 | * - Clear SW request as in this case this is a HW one | ||
551 | * - Clear WEX, HEX and BEX bits | ||
552 | * - Set priority level | ||
553 | */ | ||
554 | ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX | | ||
555 | STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK); | ||
556 | ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level); | ||
557 | |||
558 | /* Configure Trigger selection */ | ||
559 | ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK; | ||
560 | ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request); | ||
561 | |||
562 | switch (direction) { | ||
563 | case DMA_MEM_TO_DEV: | ||
564 | dst_addr = chan->dma_config.dst_addr; | ||
565 | |||
566 | /* Set device data size */ | ||
567 | dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); | ||
568 | if (dst_bus_width < 0) | ||
569 | return dst_bus_width; | ||
570 | ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK; | ||
571 | ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width); | ||
572 | |||
573 | /* Set device burst value */ | ||
574 | dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | ||
575 | dst_maxburst, | ||
576 | dst_addr_width); | ||
577 | chan->mem_burst = dst_best_burst; | ||
578 | ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK; | ||
579 | ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); | ||
580 | |||
581 | /* Set memory data size */ | ||
582 | src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen); | ||
583 | chan->mem_width = src_addr_width; | ||
584 | src_bus_width = stm32_mdma_get_width(chan, src_addr_width); | ||
585 | if (src_bus_width < 0) | ||
586 | return src_bus_width; | ||
587 | ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK | | ||
588 | STM32_MDMA_CTCR_SINCOS_MASK; | ||
589 | ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) | | ||
590 | STM32_MDMA_CTCR_SINCOS(src_bus_width); | ||
591 | |||
592 | /* Set memory burst value */ | ||
593 | src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width; | ||
594 | src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | ||
595 | src_maxburst, | ||
596 | src_addr_width); | ||
597 | chan->mem_burst = src_best_burst; | ||
598 | ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK; | ||
599 | ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); | ||
600 | |||
601 | /* Select bus */ | ||
602 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, | ||
603 | dst_addr); | ||
604 | |||
605 | if (dst_bus_width != src_bus_width) | ||
606 | ctcr |= STM32_MDMA_CTCR_PKE; | ||
607 | |||
608 | /* Set destination address */ | ||
609 | stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr); | ||
610 | break; | ||
611 | |||
612 | case DMA_DEV_TO_MEM: | ||
613 | src_addr = chan->dma_config.src_addr; | ||
614 | |||
615 | /* Set device data size */ | ||
616 | src_bus_width = stm32_mdma_get_width(chan, src_addr_width); | ||
617 | if (src_bus_width < 0) | ||
618 | return src_bus_width; | ||
619 | ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK; | ||
620 | ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width); | ||
621 | |||
622 | /* Set device burst value */ | ||
623 | src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | ||
624 | src_maxburst, | ||
625 | src_addr_width); | ||
626 | ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK; | ||
627 | ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); | ||
628 | |||
629 | /* Set memory data size */ | ||
630 | dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen); | ||
631 | chan->mem_width = dst_addr_width; | ||
632 | dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); | ||
633 | if (dst_bus_width < 0) | ||
634 | return dst_bus_width; | ||
635 | ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK | | ||
636 | STM32_MDMA_CTCR_DINCOS_MASK); | ||
637 | ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) | | ||
638 | STM32_MDMA_CTCR_DINCOS(dst_bus_width); | ||
639 | |||
640 | /* Set memory burst value */ | ||
641 | dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width; | ||
642 | dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | ||
643 | dst_maxburst, | ||
644 | dst_addr_width); | ||
645 | ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK; | ||
646 | ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); | ||
647 | |||
648 | /* Select bus */ | ||
649 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, | ||
650 | src_addr); | ||
651 | |||
652 | if (dst_bus_width != src_bus_width) | ||
653 | ctcr |= STM32_MDMA_CTCR_PKE; | ||
654 | |||
655 | /* Set source address */ | ||
656 | stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr); | ||
657 | break; | ||
658 | |||
659 | default: | ||
660 | dev_err(chan2dev(chan), "Dma direction is not supported\n"); | ||
661 | return -EINVAL; | ||
662 | } | ||
663 | |||
664 | *mdma_ccr = ccr; | ||
665 | *mdma_ctcr = ctcr; | ||
666 | *mdma_ctbr = ctbr; | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan, | ||
672 | struct stm32_mdma_hwdesc *hwdesc) | ||
673 | { | ||
674 | dev_dbg(chan2dev(chan), "hwdesc: 0x%p\n", hwdesc); | ||
675 | dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", hwdesc->ctcr); | ||
676 | dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", hwdesc->cbndtr); | ||
677 | dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", hwdesc->csar); | ||
678 | dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", hwdesc->cdar); | ||
679 | dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", hwdesc->cbrur); | ||
680 | dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", hwdesc->clar); | ||
681 | dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", hwdesc->ctbr); | ||
682 | dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", hwdesc->cmar); | ||
683 | dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", hwdesc->cmdr); | ||
684 | } | ||
685 | |||
686 | static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan, | ||
687 | struct stm32_mdma_desc *desc, | ||
688 | enum dma_transfer_direction dir, u32 count, | ||
689 | dma_addr_t src_addr, dma_addr_t dst_addr, | ||
690 | u32 len, u32 ctcr, u32 ctbr, bool is_last, | ||
691 | bool is_first, bool is_cyclic) | ||
692 | { | ||
693 | struct stm32_mdma_chan_config *config = &chan->chan_config; | ||
694 | struct stm32_mdma_hwdesc *hwdesc; | ||
695 | u32 next = count + 1; | ||
696 | |||
697 | hwdesc = &desc->hwdesc[count]; | ||
698 | hwdesc->ctcr = ctcr; | ||
699 | hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | | ||
700 | STM32_MDMA_CBNDTR_BRDUM | | ||
701 | STM32_MDMA_CBNDTR_BRSUM | | ||
702 | STM32_MDMA_CBNDTR_BNDT_MASK); | ||
703 | hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len); | ||
704 | hwdesc->csar = src_addr; | ||
705 | hwdesc->cdar = dst_addr; | ||
706 | hwdesc->cbrur = 0; | ||
707 | hwdesc->clar = desc->hwdesc_phys + next * sizeof(*hwdesc); | ||
708 | hwdesc->ctbr = ctbr; | ||
709 | hwdesc->cmar = config->mask_addr; | ||
710 | hwdesc->cmdr = config->mask_data; | ||
711 | |||
712 | if (is_last) { | ||
713 | if (is_cyclic) | ||
714 | hwdesc->clar = desc->hwdesc_phys; | ||
715 | else | ||
716 | hwdesc->clar = 0; | ||
717 | } | ||
718 | |||
719 | stm32_mdma_dump_hwdesc(chan, hwdesc); | ||
720 | } | ||
721 | |||
722 | static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, | ||
723 | struct stm32_mdma_desc *desc, | ||
724 | struct scatterlist *sgl, u32 sg_len, | ||
725 | enum dma_transfer_direction direction) | ||
726 | { | ||
727 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
728 | struct dma_slave_config *dma_config = &chan->dma_config; | ||
729 | struct scatterlist *sg; | ||
730 | dma_addr_t src_addr, dst_addr; | ||
731 | u32 ccr, ctcr, ctbr; | ||
732 | int i, ret = 0; | ||
733 | |||
734 | for_each_sg(sgl, sg, sg_len, i) { | ||
735 | if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) { | ||
736 | dev_err(chan2dev(chan), "Invalid block len\n"); | ||
737 | return -EINVAL; | ||
738 | } | ||
739 | |||
740 | if (direction == DMA_MEM_TO_DEV) { | ||
741 | src_addr = sg_dma_address(sg); | ||
742 | dst_addr = dma_config->dst_addr; | ||
743 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, | ||
744 | &ctcr, &ctbr, src_addr, | ||
745 | sg_dma_len(sg)); | ||
746 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, | ||
747 | src_addr); | ||
748 | } else { | ||
749 | src_addr = dma_config->src_addr; | ||
750 | dst_addr = sg_dma_address(sg); | ||
751 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, | ||
752 | &ctcr, &ctbr, dst_addr, | ||
753 | sg_dma_len(sg)); | ||
754 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, | ||
755 | dst_addr); | ||
756 | } | ||
757 | |||
758 | if (ret < 0) | ||
759 | return ret; | ||
760 | |||
761 | stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, | ||
762 | dst_addr, sg_dma_len(sg), ctcr, ctbr, | ||
763 | i == sg_len - 1, i == 0, false); | ||
764 | } | ||
765 | |||
766 | /* Enable interrupts */ | ||
767 | ccr &= ~STM32_MDMA_CCR_IRQ_MASK; | ||
768 | ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE; | ||
769 | if (sg_len > 1) | ||
770 | ccr |= STM32_MDMA_CCR_BTIE; | ||
771 | desc->ccr = ccr; | ||
772 | |||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | static struct dma_async_tx_descriptor * | ||
777 | stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl, | ||
778 | u32 sg_len, enum dma_transfer_direction direction, | ||
779 | unsigned long flags, void *context) | ||
780 | { | ||
781 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
782 | struct stm32_mdma_desc *desc; | ||
783 | int ret; | ||
784 | |||
785 | /* | ||
786 | * Once DMA is in setup cyclic mode the channel we cannot assign this | ||
787 | * channel anymore. The DMA channel needs to be aborted or terminated | ||
788 | * for allowing another request. | ||
789 | */ | ||
790 | if (chan->desc && chan->desc->cyclic) { | ||
791 | dev_err(chan2dev(chan), | ||
792 | "Request not allowed when dma in cyclic mode\n"); | ||
793 | return NULL; | ||
794 | } | ||
795 | |||
796 | desc = stm32_mdma_alloc_desc(chan, sg_len); | ||
797 | if (!desc) | ||
798 | return NULL; | ||
799 | |||
800 | ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction); | ||
801 | if (ret < 0) | ||
802 | goto xfer_setup_err; | ||
803 | |||
804 | desc->cyclic = false; | ||
805 | |||
806 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
807 | |||
808 | xfer_setup_err: | ||
809 | dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys); | ||
810 | kfree(desc); | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | static struct dma_async_tx_descriptor * | ||
815 | stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr, | ||
816 | size_t buf_len, size_t period_len, | ||
817 | enum dma_transfer_direction direction, | ||
818 | unsigned long flags) | ||
819 | { | ||
820 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
821 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
822 | struct dma_slave_config *dma_config = &chan->dma_config; | ||
823 | struct stm32_mdma_desc *desc; | ||
824 | dma_addr_t src_addr, dst_addr; | ||
825 | u32 ccr, ctcr, ctbr, count; | ||
826 | int i, ret; | ||
827 | |||
828 | /* | ||
829 | * Once DMA is in setup cyclic mode the channel we cannot assign this | ||
830 | * channel anymore. The DMA channel needs to be aborted or terminated | ||
831 | * for allowing another request. | ||
832 | */ | ||
833 | if (chan->desc && chan->desc->cyclic) { | ||
834 | dev_err(chan2dev(chan), | ||
835 | "Request not allowed when dma in cyclic mode\n"); | ||
836 | return NULL; | ||
837 | } | ||
838 | |||
839 | if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) { | ||
840 | dev_err(chan2dev(chan), "Invalid buffer/period len\n"); | ||
841 | return NULL; | ||
842 | } | ||
843 | |||
844 | if (buf_len % period_len) { | ||
845 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); | ||
846 | return NULL; | ||
847 | } | ||
848 | |||
849 | count = buf_len / period_len; | ||
850 | |||
851 | desc = stm32_mdma_alloc_desc(chan, count); | ||
852 | if (!desc) | ||
853 | return NULL; | ||
854 | |||
855 | /* Select bus */ | ||
856 | if (direction == DMA_MEM_TO_DEV) { | ||
857 | src_addr = buf_addr; | ||
858 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, | ||
859 | &ctbr, src_addr, period_len); | ||
860 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, | ||
861 | src_addr); | ||
862 | } else { | ||
863 | dst_addr = buf_addr; | ||
864 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, | ||
865 | &ctbr, dst_addr, period_len); | ||
866 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, | ||
867 | dst_addr); | ||
868 | } | ||
869 | |||
870 | if (ret < 0) | ||
871 | goto xfer_setup_err; | ||
872 | |||
873 | /* Enable interrupts */ | ||
874 | ccr &= ~STM32_MDMA_CCR_IRQ_MASK; | ||
875 | ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE; | ||
876 | desc->ccr = ccr; | ||
877 | |||
878 | /* Configure hwdesc list */ | ||
879 | for (i = 0; i < count; i++) { | ||
880 | if (direction == DMA_MEM_TO_DEV) { | ||
881 | src_addr = buf_addr + i * period_len; | ||
882 | dst_addr = dma_config->dst_addr; | ||
883 | } else { | ||
884 | src_addr = dma_config->src_addr; | ||
885 | dst_addr = buf_addr + i * period_len; | ||
886 | } | ||
887 | |||
888 | stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, | ||
889 | dst_addr, period_len, ctcr, ctbr, | ||
890 | i == count - 1, i == 0, true); | ||
891 | } | ||
892 | |||
893 | desc->cyclic = true; | ||
894 | |||
895 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
896 | |||
897 | xfer_setup_err: | ||
898 | dma_pool_free(chan->desc_pool, &desc->hwdesc, desc->hwdesc_phys); | ||
899 | kfree(desc); | ||
900 | return NULL; | ||
901 | } | ||
902 | |||
903 | static struct dma_async_tx_descriptor * | ||
904 | stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, | ||
905 | size_t len, unsigned long flags) | ||
906 | { | ||
907 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
908 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
909 | enum dma_slave_buswidth max_width; | ||
910 | struct stm32_mdma_desc *desc; | ||
911 | struct stm32_mdma_hwdesc *hwdesc; | ||
912 | u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst; | ||
913 | u32 best_burst, tlen; | ||
914 | size_t xfer_count, offset; | ||
915 | int src_bus_width, dst_bus_width; | ||
916 | int i; | ||
917 | |||
918 | /* | ||
919 | * Once DMA is in setup cyclic mode the channel we cannot assign this | ||
920 | * channel anymore. The DMA channel needs to be aborted or terminated | ||
921 | * to allow another request | ||
922 | */ | ||
923 | if (chan->desc && chan->desc->cyclic) { | ||
924 | dev_err(chan2dev(chan), | ||
925 | "Request not allowed when dma in cyclic mode\n"); | ||
926 | return NULL; | ||
927 | } | ||
928 | |||
929 | count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN); | ||
930 | desc = stm32_mdma_alloc_desc(chan, count); | ||
931 | if (!desc) | ||
932 | return NULL; | ||
933 | |||
934 | ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); | ||
935 | ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); | ||
936 | ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); | ||
937 | cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); | ||
938 | |||
939 | /* Enable sw req, some interrupts and clear other bits */ | ||
940 | ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX | | ||
941 | STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK | | ||
942 | STM32_MDMA_CCR_IRQ_MASK); | ||
943 | ccr |= STM32_MDMA_CCR_TEIE; | ||
944 | |||
945 | /* Enable SW request mode, dest/src inc and clear other bits */ | ||
946 | ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK | | ||
947 | STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE | | ||
948 | STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK | | ||
949 | STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK | | ||
950 | STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK | | ||
951 | STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK | | ||
952 | STM32_MDMA_CTCR_SINC_MASK); | ||
953 | ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) | | ||
954 | STM32_MDMA_CTCR_DINC(STM32_MDMA_INC); | ||
955 | |||
956 | /* Reset HW request */ | ||
957 | ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK; | ||
958 | |||
959 | /* Select bus */ | ||
960 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src); | ||
961 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest); | ||
962 | |||
963 | /* Clear CBNDTR registers */ | ||
964 | cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM | | ||
965 | STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK); | ||
966 | |||
967 | if (len <= STM32_MDMA_MAX_BLOCK_LEN) { | ||
968 | cbndtr |= STM32_MDMA_CBNDTR_BNDT(len); | ||
969 | if (len <= STM32_MDMA_MAX_BUF_LEN) { | ||
970 | /* Setup a buffer transfer */ | ||
971 | ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE; | ||
972 | ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER); | ||
973 | } else { | ||
974 | /* Setup a block transfer */ | ||
975 | ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE; | ||
976 | ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK); | ||
977 | } | ||
978 | |||
979 | tlen = STM32_MDMA_MAX_BUF_LEN; | ||
980 | ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); | ||
981 | |||
982 | /* Set source best burst size */ | ||
983 | max_width = stm32_mdma_get_max_width(src, len, tlen); | ||
984 | src_bus_width = stm32_mdma_get_width(chan, max_width); | ||
985 | |||
986 | max_burst = tlen / max_width; | ||
987 | best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst, | ||
988 | max_width); | ||
989 | mdma_burst = ilog2(best_burst); | ||
990 | |||
991 | ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) | | ||
992 | STM32_MDMA_CTCR_SSIZE(src_bus_width) | | ||
993 | STM32_MDMA_CTCR_SINCOS(src_bus_width); | ||
994 | |||
995 | /* Set destination best burst size */ | ||
996 | max_width = stm32_mdma_get_max_width(dest, len, tlen); | ||
997 | dst_bus_width = stm32_mdma_get_width(chan, max_width); | ||
998 | |||
999 | max_burst = tlen / max_width; | ||
1000 | best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst, | ||
1001 | max_width); | ||
1002 | mdma_burst = ilog2(best_burst); | ||
1003 | |||
1004 | ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) | | ||
1005 | STM32_MDMA_CTCR_DSIZE(dst_bus_width) | | ||
1006 | STM32_MDMA_CTCR_DINCOS(dst_bus_width); | ||
1007 | |||
1008 | if (dst_bus_width != src_bus_width) | ||
1009 | ctcr |= STM32_MDMA_CTCR_PKE; | ||
1010 | |||
1011 | /* Prepare hardware descriptor */ | ||
1012 | hwdesc = desc->hwdesc; | ||
1013 | hwdesc->ctcr = ctcr; | ||
1014 | hwdesc->cbndtr = cbndtr; | ||
1015 | hwdesc->csar = src; | ||
1016 | hwdesc->cdar = dest; | ||
1017 | hwdesc->cbrur = 0; | ||
1018 | hwdesc->clar = 0; | ||
1019 | hwdesc->ctbr = ctbr; | ||
1020 | hwdesc->cmar = 0; | ||
1021 | hwdesc->cmdr = 0; | ||
1022 | |||
1023 | stm32_mdma_dump_hwdesc(chan, hwdesc); | ||
1024 | } else { | ||
1025 | /* Setup a LLI transfer */ | ||
1026 | ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) | | ||
1027 | STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1)); | ||
1028 | ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE; | ||
1029 | tlen = STM32_MDMA_MAX_BUF_LEN; | ||
1030 | |||
1031 | for (i = 0, offset = 0; offset < len; | ||
1032 | i++, offset += xfer_count) { | ||
1033 | xfer_count = min_t(size_t, len - offset, | ||
1034 | STM32_MDMA_MAX_BLOCK_LEN); | ||
1035 | |||
1036 | /* Set source best burst size */ | ||
1037 | max_width = stm32_mdma_get_max_width(src, len, tlen); | ||
1038 | src_bus_width = stm32_mdma_get_width(chan, max_width); | ||
1039 | |||
1040 | max_burst = tlen / max_width; | ||
1041 | best_burst = stm32_mdma_get_best_burst(len, tlen, | ||
1042 | max_burst, | ||
1043 | max_width); | ||
1044 | mdma_burst = ilog2(best_burst); | ||
1045 | |||
1046 | ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) | | ||
1047 | STM32_MDMA_CTCR_SSIZE(src_bus_width) | | ||
1048 | STM32_MDMA_CTCR_SINCOS(src_bus_width); | ||
1049 | |||
1050 | /* Set destination best burst size */ | ||
1051 | max_width = stm32_mdma_get_max_width(dest, len, tlen); | ||
1052 | dst_bus_width = stm32_mdma_get_width(chan, max_width); | ||
1053 | |||
1054 | max_burst = tlen / max_width; | ||
1055 | best_burst = stm32_mdma_get_best_burst(len, tlen, | ||
1056 | max_burst, | ||
1057 | max_width); | ||
1058 | mdma_burst = ilog2(best_burst); | ||
1059 | |||
1060 | ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) | | ||
1061 | STM32_MDMA_CTCR_DSIZE(dst_bus_width) | | ||
1062 | STM32_MDMA_CTCR_DINCOS(dst_bus_width); | ||
1063 | |||
1064 | if (dst_bus_width != src_bus_width) | ||
1065 | ctcr |= STM32_MDMA_CTCR_PKE; | ||
1066 | |||
1067 | /* Prepare hardware descriptor */ | ||
1068 | stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i, | ||
1069 | src + offset, dest + offset, | ||
1070 | xfer_count, ctcr, ctbr, | ||
1071 | i == count - 1, i == 0, false); | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | desc->ccr = ccr; | ||
1076 | |||
1077 | desc->cyclic = false; | ||
1078 | |||
1079 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | ||
1080 | } | ||
1081 | |||
1082 | static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan) | ||
1083 | { | ||
1084 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
1085 | |||
1086 | dev_dbg(chan2dev(chan), "CCR: 0x%08x\n", | ||
1087 | stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id))); | ||
1088 | dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", | ||
1089 | stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id))); | ||
1090 | dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", | ||
1091 | stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id))); | ||
1092 | dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", | ||
1093 | stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id))); | ||
1094 | dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", | ||
1095 | stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id))); | ||
1096 | dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", | ||
1097 | stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id))); | ||
1098 | dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", | ||
1099 | stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id))); | ||
1100 | dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", | ||
1101 | stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id))); | ||
1102 | dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", | ||
1103 | stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id))); | ||
1104 | dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n", | ||
1105 | stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id))); | ||
1106 | } | ||
1107 | |||
1108 | static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) | ||
1109 | { | ||
1110 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
1111 | struct virt_dma_desc *vdesc; | ||
1112 | struct stm32_mdma_hwdesc *hwdesc; | ||
1113 | u32 id = chan->id; | ||
1114 | u32 status, reg; | ||
1115 | |||
1116 | vdesc = vchan_next_desc(&chan->vchan); | ||
1117 | if (!vdesc) { | ||
1118 | chan->desc = NULL; | ||
1119 | return; | ||
1120 | } | ||
1121 | |||
1122 | chan->desc = to_stm32_mdma_desc(vdesc); | ||
1123 | hwdesc = chan->desc->hwdesc; | ||
1124 | chan->curr_hwdesc = 0; | ||
1125 | |||
1126 | stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr); | ||
1127 | stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr); | ||
1128 | stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr); | ||
1129 | stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar); | ||
1130 | stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar); | ||
1131 | stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur); | ||
1132 | stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar); | ||
1133 | stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr); | ||
1134 | stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar); | ||
1135 | stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr); | ||
1136 | |||
1137 | /* Clear interrupt status if it is there */ | ||
1138 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id)); | ||
1139 | if (status) | ||
1140 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status); | ||
1141 | |||
1142 | stm32_mdma_dump_reg(chan); | ||
1143 | |||
1144 | /* Start DMA */ | ||
1145 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN); | ||
1146 | |||
1147 | /* Set SW request in case of MEM2MEM transfer */ | ||
1148 | if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) { | ||
1149 | reg = STM32_MDMA_CCR(id); | ||
1150 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ); | ||
1151 | } | ||
1152 | |||
1153 | chan->busy = true; | ||
1154 | |||
1155 | dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); | ||
1156 | } | ||
1157 | |||
1158 | static void stm32_mdma_issue_pending(struct dma_chan *c) | ||
1159 | { | ||
1160 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1161 | unsigned long flags; | ||
1162 | |||
1163 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
1164 | |||
1165 | if (!vchan_issue_pending(&chan->vchan)) | ||
1166 | goto end; | ||
1167 | |||
1168 | dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); | ||
1169 | |||
1170 | if (!chan->desc && !chan->busy) | ||
1171 | stm32_mdma_start_transfer(chan); | ||
1172 | |||
1173 | end: | ||
1174 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
1175 | } | ||
1176 | |||
1177 | static int stm32_mdma_pause(struct dma_chan *c) | ||
1178 | { | ||
1179 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1180 | unsigned long flags; | ||
1181 | int ret; | ||
1182 | |||
1183 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
1184 | ret = stm32_mdma_disable_chan(chan); | ||
1185 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
1186 | |||
1187 | if (!ret) | ||
1188 | dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan); | ||
1189 | |||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1193 | static int stm32_mdma_resume(struct dma_chan *c) | ||
1194 | { | ||
1195 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1196 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
1197 | struct stm32_mdma_hwdesc *hwdesc; | ||
1198 | unsigned long flags; | ||
1199 | u32 status, reg; | ||
1200 | |||
1201 | hwdesc = &chan->desc->hwdesc[chan->curr_hwdesc]; | ||
1202 | |||
1203 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
1204 | |||
1205 | /* Re-configure control register */ | ||
1206 | stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr); | ||
1207 | |||
1208 | /* Clear interrupt status if it is there */ | ||
1209 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); | ||
1210 | if (status) | ||
1211 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status); | ||
1212 | |||
1213 | stm32_mdma_dump_reg(chan); | ||
1214 | |||
1215 | /* Re-start DMA */ | ||
1216 | reg = STM32_MDMA_CCR(chan->id); | ||
1217 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN); | ||
1218 | |||
1219 | /* Set SW request in case of MEM2MEM transfer */ | ||
1220 | if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) | ||
1221 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ); | ||
1222 | |||
1223 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
1224 | |||
1225 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan); | ||
1226 | |||
1227 | return 0; | ||
1228 | } | ||
1229 | |||
1230 | static int stm32_mdma_terminate_all(struct dma_chan *c) | ||
1231 | { | ||
1232 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1233 | unsigned long flags; | ||
1234 | LIST_HEAD(head); | ||
1235 | |||
1236 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
1237 | if (chan->busy) { | ||
1238 | stm32_mdma_stop(chan); | ||
1239 | chan->desc = NULL; | ||
1240 | } | ||
1241 | vchan_get_all_descriptors(&chan->vchan, &head); | ||
1242 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
1243 | |||
1244 | vchan_dma_desc_free_list(&chan->vchan, &head); | ||
1245 | |||
1246 | return 0; | ||
1247 | } | ||
1248 | |||
1249 | static void stm32_mdma_synchronize(struct dma_chan *c) | ||
1250 | { | ||
1251 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1252 | |||
1253 | vchan_synchronize(&chan->vchan); | ||
1254 | } | ||
1255 | |||
1256 | static int stm32_mdma_slave_config(struct dma_chan *c, | ||
1257 | struct dma_slave_config *config) | ||
1258 | { | ||
1259 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1260 | |||
1261 | memcpy(&chan->dma_config, config, sizeof(*config)); | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, | ||
1267 | struct stm32_mdma_desc *desc, | ||
1268 | u32 curr_hwdesc) | ||
1269 | { | ||
1270 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
1271 | u32 cbndtr, residue, modulo, burst_size; | ||
1272 | int i; | ||
1273 | |||
1274 | residue = 0; | ||
1275 | for (i = curr_hwdesc + 1; i < desc->count; i++) { | ||
1276 | struct stm32_mdma_hwdesc *hwdesc = &desc->hwdesc[i]; | ||
1277 | |||
1278 | residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); | ||
1279 | } | ||
1280 | cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); | ||
1281 | residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; | ||
1282 | |||
1283 | if (!chan->mem_burst) | ||
1284 | return residue; | ||
1285 | |||
1286 | burst_size = chan->mem_burst * chan->mem_width; | ||
1287 | modulo = residue % burst_size; | ||
1288 | if (modulo) | ||
1289 | residue = residue - modulo + burst_size; | ||
1290 | |||
1291 | return residue; | ||
1292 | } | ||
1293 | |||
1294 | static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, | ||
1295 | dma_cookie_t cookie, | ||
1296 | struct dma_tx_state *state) | ||
1297 | { | ||
1298 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1299 | struct virt_dma_desc *vdesc; | ||
1300 | enum dma_status status; | ||
1301 | unsigned long flags; | ||
1302 | u32 residue = 0; | ||
1303 | |||
1304 | status = dma_cookie_status(c, cookie, state); | ||
1305 | if ((status == DMA_COMPLETE) || (!state)) | ||
1306 | return status; | ||
1307 | |||
1308 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
1309 | |||
1310 | vdesc = vchan_find_desc(&chan->vchan, cookie); | ||
1311 | if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) | ||
1312 | residue = stm32_mdma_desc_residue(chan, chan->desc, | ||
1313 | chan->curr_hwdesc); | ||
1314 | else if (vdesc) | ||
1315 | residue = stm32_mdma_desc_residue(chan, | ||
1316 | to_stm32_mdma_desc(vdesc), 0); | ||
1317 | dma_set_residue(state, residue); | ||
1318 | |||
1319 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
1320 | |||
1321 | return status; | ||
1322 | } | ||
1323 | |||
1324 | static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) | ||
1325 | { | ||
1326 | list_del(&chan->desc->vdesc.node); | ||
1327 | vchan_cookie_complete(&chan->desc->vdesc); | ||
1328 | chan->desc = NULL; | ||
1329 | chan->busy = false; | ||
1330 | |||
1331 | /* Start the next transfer if this driver has a next desc */ | ||
1332 | stm32_mdma_start_transfer(chan); | ||
1333 | } | ||
1334 | |||
1335 | static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) | ||
1336 | { | ||
1337 | struct stm32_mdma_device *dmadev = devid; | ||
1338 | struct stm32_mdma_chan *chan = devid; | ||
1339 | u32 reg, id, ien, status, flag; | ||
1340 | |||
1341 | /* Find out which channel generates the interrupt */ | ||
1342 | status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); | ||
1343 | if (status) { | ||
1344 | id = __ffs(status); | ||
1345 | } else { | ||
1346 | status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); | ||
1347 | if (!status) { | ||
1348 | dev_dbg(mdma2dev(dmadev), "spurious it\n"); | ||
1349 | return IRQ_NONE; | ||
1350 | } | ||
1351 | id = __ffs(status); | ||
1352 | /* | ||
1353 | * As GISR0 provides status for channel id from 0 to 31, | ||
1354 | * so GISR1 provides status for channel id from 32 to 62 | ||
1355 | */ | ||
1356 | id += 32; | ||
1357 | } | ||
1358 | |||
1359 | chan = &dmadev->chan[id]; | ||
1360 | if (!chan) { | ||
1361 | dev_err(chan2dev(chan), "MDMA channel not initialized\n"); | ||
1362 | goto exit; | ||
1363 | } | ||
1364 | |||
1365 | /* Handle interrupt for the channel */ | ||
1366 | spin_lock(&chan->vchan.lock); | ||
1367 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); | ||
1368 | ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); | ||
1369 | ien &= STM32_MDMA_CCR_IRQ_MASK; | ||
1370 | ien >>= 1; | ||
1371 | |||
1372 | if (!(status & ien)) { | ||
1373 | spin_unlock(&chan->vchan.lock); | ||
1374 | dev_dbg(chan2dev(chan), | ||
1375 | "spurious it (status=0x%04x, ien=0x%04x)\n", | ||
1376 | status, ien); | ||
1377 | return IRQ_NONE; | ||
1378 | } | ||
1379 | |||
1380 | flag = __ffs(status & ien); | ||
1381 | reg = STM32_MDMA_CIFCR(chan->id); | ||
1382 | |||
1383 | switch (1 << flag) { | ||
1384 | case STM32_MDMA_CISR_TEIF: | ||
1385 | id = chan->id; | ||
1386 | status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)); | ||
1387 | dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status); | ||
1388 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF); | ||
1389 | break; | ||
1390 | |||
1391 | case STM32_MDMA_CISR_CTCIF: | ||
1392 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF); | ||
1393 | stm32_mdma_xfer_end(chan); | ||
1394 | break; | ||
1395 | |||
1396 | case STM32_MDMA_CISR_BRTIF: | ||
1397 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF); | ||
1398 | break; | ||
1399 | |||
1400 | case STM32_MDMA_CISR_BTIF: | ||
1401 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF); | ||
1402 | chan->curr_hwdesc++; | ||
1403 | if (chan->desc && chan->desc->cyclic) { | ||
1404 | if (chan->curr_hwdesc == chan->desc->count) | ||
1405 | chan->curr_hwdesc = 0; | ||
1406 | vchan_cyclic_callback(&chan->desc->vdesc); | ||
1407 | } | ||
1408 | break; | ||
1409 | |||
1410 | case STM32_MDMA_CISR_TCIF: | ||
1411 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF); | ||
1412 | break; | ||
1413 | |||
1414 | default: | ||
1415 | dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n", | ||
1416 | 1 << flag, status); | ||
1417 | } | ||
1418 | |||
1419 | spin_unlock(&chan->vchan.lock); | ||
1420 | |||
1421 | exit: | ||
1422 | return IRQ_HANDLED; | ||
1423 | } | ||
1424 | |||
1425 | static int stm32_mdma_alloc_chan_resources(struct dma_chan *c) | ||
1426 | { | ||
1427 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1428 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
1429 | int ret; | ||
1430 | |||
1431 | chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device), | ||
1432 | c->device->dev, | ||
1433 | sizeof(struct stm32_mdma_hwdesc), | ||
1434 | __alignof__(struct stm32_mdma_hwdesc), | ||
1435 | 0); | ||
1436 | if (!chan->desc_pool) { | ||
1437 | dev_err(chan2dev(chan), "failed to allocate descriptor pool\n"); | ||
1438 | return -ENOMEM; | ||
1439 | } | ||
1440 | |||
1441 | ret = clk_prepare_enable(dmadev->clk); | ||
1442 | if (ret < 0) { | ||
1443 | dev_err(chan2dev(chan), "clk_prepare_enable failed: %d\n", ret); | ||
1444 | return ret; | ||
1445 | } | ||
1446 | |||
1447 | ret = stm32_mdma_disable_chan(chan); | ||
1448 | if (ret < 0) | ||
1449 | clk_disable_unprepare(dmadev->clk); | ||
1450 | |||
1451 | return ret; | ||
1452 | } | ||
1453 | |||
1454 | static void stm32_mdma_free_chan_resources(struct dma_chan *c) | ||
1455 | { | ||
1456 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | ||
1457 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | ||
1458 | unsigned long flags; | ||
1459 | |||
1460 | dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); | ||
1461 | |||
1462 | if (chan->busy) { | ||
1463 | spin_lock_irqsave(&chan->vchan.lock, flags); | ||
1464 | stm32_mdma_stop(chan); | ||
1465 | chan->desc = NULL; | ||
1466 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | ||
1467 | } | ||
1468 | |||
1469 | clk_disable_unprepare(dmadev->clk); | ||
1470 | vchan_free_chan_resources(to_virt_chan(c)); | ||
1471 | dmam_pool_destroy(chan->desc_pool); | ||
1472 | chan->desc_pool = NULL; | ||
1473 | } | ||
1474 | |||
1475 | static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, | ||
1476 | struct of_dma *ofdma) | ||
1477 | { | ||
1478 | struct stm32_mdma_device *dmadev = ofdma->of_dma_data; | ||
1479 | struct stm32_mdma_chan *chan; | ||
1480 | struct dma_chan *c; | ||
1481 | struct stm32_mdma_chan_config config; | ||
1482 | |||
1483 | if (dma_spec->args_count < 5) { | ||
1484 | dev_err(mdma2dev(dmadev), "Bad number of args\n"); | ||
1485 | return NULL; | ||
1486 | } | ||
1487 | |||
1488 | config.request = dma_spec->args[0]; | ||
1489 | config.priority_level = dma_spec->args[1]; | ||
1490 | config.transfer_config = dma_spec->args[2]; | ||
1491 | config.mask_addr = dma_spec->args[3]; | ||
1492 | config.mask_data = dma_spec->args[4]; | ||
1493 | |||
1494 | if (config.request >= dmadev->nr_requests) { | ||
1495 | dev_err(mdma2dev(dmadev), "Bad request line\n"); | ||
1496 | return NULL; | ||
1497 | } | ||
1498 | |||
1499 | if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) { | ||
1500 | dev_err(mdma2dev(dmadev), "Priority level not supported\n"); | ||
1501 | return NULL; | ||
1502 | } | ||
1503 | |||
1504 | c = dma_get_any_slave_channel(&dmadev->ddev); | ||
1505 | if (!c) { | ||
1506 | dev_err(mdma2dev(dmadev), "No more channel avalaible\n"); | ||
1507 | return NULL; | ||
1508 | } | ||
1509 | |||
1510 | chan = to_stm32_mdma_chan(c); | ||
1511 | chan->chan_config = config; | ||
1512 | |||
1513 | return c; | ||
1514 | } | ||
1515 | |||
1516 | static const struct of_device_id stm32_mdma_of_match[] = { | ||
1517 | { .compatible = "st,stm32h7-mdma", }, | ||
1518 | { /* sentinel */ }, | ||
1519 | }; | ||
1520 | MODULE_DEVICE_TABLE(of, stm32_mdma_of_match); | ||
1521 | |||
1522 | static int stm32_mdma_probe(struct platform_device *pdev) | ||
1523 | { | ||
1524 | struct stm32_mdma_chan *chan; | ||
1525 | struct stm32_mdma_device *dmadev; | ||
1526 | struct dma_device *dd; | ||
1527 | struct device_node *of_node; | ||
1528 | struct resource *res; | ||
1529 | u32 nr_channels, nr_requests; | ||
1530 | int i, count, ret; | ||
1531 | |||
1532 | of_node = pdev->dev.of_node; | ||
1533 | if (!of_node) | ||
1534 | return -ENODEV; | ||
1535 | |||
1536 | ret = device_property_read_u32(&pdev->dev, "dma-channels", | ||
1537 | &nr_channels); | ||
1538 | if (ret) { | ||
1539 | nr_channels = STM32_MDMA_MAX_CHANNELS; | ||
1540 | dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n", | ||
1541 | nr_channels); | ||
1542 | } | ||
1543 | |||
1544 | ret = device_property_read_u32(&pdev->dev, "dma-requests", | ||
1545 | &nr_requests); | ||
1546 | if (ret) { | ||
1547 | nr_requests = STM32_MDMA_MAX_REQUESTS; | ||
1548 | dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n", | ||
1549 | nr_requests); | ||
1550 | } | ||
1551 | |||
1552 | count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", | ||
1553 | NULL, 0); | ||
1554 | if (count < 0) | ||
1555 | count = 0; | ||
1556 | |||
1557 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count, | ||
1558 | GFP_KERNEL); | ||
1559 | if (!dmadev) | ||
1560 | return -ENOMEM; | ||
1561 | |||
1562 | dmadev->nr_channels = nr_channels; | ||
1563 | dmadev->nr_requests = nr_requests; | ||
1564 | device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", | ||
1565 | dmadev->ahb_addr_masks, | ||
1566 | count); | ||
1567 | dmadev->nr_ahb_addr_masks = count; | ||
1568 | |||
1569 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1570 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | ||
1571 | if (IS_ERR(dmadev->base)) | ||
1572 | return PTR_ERR(dmadev->base); | ||
1573 | |||
1574 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); | ||
1575 | if (IS_ERR(dmadev->clk)) { | ||
1576 | ret = PTR_ERR(dmadev->clk); | ||
1577 | if (ret == -EPROBE_DEFER) | ||
1578 | dev_info(&pdev->dev, "Missing controller clock\n"); | ||
1579 | return ret; | ||
1580 | } | ||
1581 | |||
1582 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); | ||
1583 | if (!IS_ERR(dmadev->rst)) { | ||
1584 | reset_control_assert(dmadev->rst); | ||
1585 | udelay(2); | ||
1586 | reset_control_deassert(dmadev->rst); | ||
1587 | } | ||
1588 | |||
1589 | dd = &dmadev->ddev; | ||
1590 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | ||
1591 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); | ||
1592 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | ||
1593 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | ||
1594 | dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources; | ||
1595 | dd->device_free_chan_resources = stm32_mdma_free_chan_resources; | ||
1596 | dd->device_tx_status = stm32_mdma_tx_status; | ||
1597 | dd->device_issue_pending = stm32_mdma_issue_pending; | ||
1598 | dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg; | ||
1599 | dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic; | ||
1600 | dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy; | ||
1601 | dd->device_config = stm32_mdma_slave_config; | ||
1602 | dd->device_pause = stm32_mdma_pause; | ||
1603 | dd->device_resume = stm32_mdma_resume; | ||
1604 | dd->device_terminate_all = stm32_mdma_terminate_all; | ||
1605 | dd->device_synchronize = stm32_mdma_synchronize; | ||
1606 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1607 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1608 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1609 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); | ||
1610 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1611 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1612 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1613 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); | ||
1614 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | ||
1615 | BIT(DMA_MEM_TO_MEM); | ||
1616 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1617 | dd->max_burst = STM32_MDMA_MAX_BURST; | ||
1618 | dd->dev = &pdev->dev; | ||
1619 | INIT_LIST_HEAD(&dd->channels); | ||
1620 | |||
1621 | for (i = 0; i < dmadev->nr_channels; i++) { | ||
1622 | chan = &dmadev->chan[i]; | ||
1623 | chan->id = i; | ||
1624 | chan->vchan.desc_free = stm32_mdma_desc_free; | ||
1625 | vchan_init(&chan->vchan, dd); | ||
1626 | } | ||
1627 | |||
1628 | dmadev->irq = platform_get_irq(pdev, 0); | ||
1629 | if (dmadev->irq < 0) { | ||
1630 | dev_err(&pdev->dev, "failed to get IRQ\n"); | ||
1631 | return dmadev->irq; | ||
1632 | } | ||
1633 | |||
1634 | ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler, | ||
1635 | 0, dev_name(&pdev->dev), dmadev); | ||
1636 | if (ret) { | ||
1637 | dev_err(&pdev->dev, "failed to request IRQ\n"); | ||
1638 | return ret; | ||
1639 | } | ||
1640 | |||
1641 | ret = dma_async_device_register(dd); | ||
1642 | if (ret) | ||
1643 | return ret; | ||
1644 | |||
1645 | ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev); | ||
1646 | if (ret < 0) { | ||
1647 | dev_err(&pdev->dev, | ||
1648 | "STM32 MDMA DMA OF registration failed %d\n", ret); | ||
1649 | goto err_unregister; | ||
1650 | } | ||
1651 | |||
1652 | platform_set_drvdata(pdev, dmadev); | ||
1653 | |||
1654 | dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); | ||
1655 | |||
1656 | return 0; | ||
1657 | |||
1658 | err_unregister: | ||
1659 | dma_async_device_unregister(dd); | ||
1660 | |||
1661 | return ret; | ||
1662 | } | ||
1663 | |||
1664 | static struct platform_driver stm32_mdma_driver = { | ||
1665 | .probe = stm32_mdma_probe, | ||
1666 | .driver = { | ||
1667 | .name = "stm32-mdma", | ||
1668 | .of_match_table = stm32_mdma_of_match, | ||
1669 | }, | ||
1670 | }; | ||
1671 | |||
1672 | static int __init stm32_mdma_init(void) | ||
1673 | { | ||
1674 | return platform_driver_register(&stm32_mdma_driver); | ||
1675 | } | ||
1676 | |||
1677 | subsys_initcall(stm32_mdma_init); | ||
1678 | |||
1679 | MODULE_DESCRIPTION("Driver for STM32 MDMA controller"); | ||
1680 | MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); | ||
1681 | MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>"); | ||
1682 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index bcd496edc70f..0cd13f17fc11 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -42,12 +42,18 @@ | |||
42 | 42 | ||
43 | #define DMA_STAT 0x30 | 43 | #define DMA_STAT 0x30 |
44 | 44 | ||
45 | /* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */ | ||
46 | #define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4) | ||
47 | |||
45 | /* | 48 | /* |
46 | * sun8i specific registers | 49 | * sun8i specific registers |
47 | */ | 50 | */ |
48 | #define SUN8I_DMA_GATE 0x20 | 51 | #define SUN8I_DMA_GATE 0x20 |
49 | #define SUN8I_DMA_GATE_ENABLE 0x4 | 52 | #define SUN8I_DMA_GATE_ENABLE 0x4 |
50 | 53 | ||
54 | #define SUNXI_H3_SECURE_REG 0x20 | ||
55 | #define SUNXI_H3_DMA_GATE 0x28 | ||
56 | #define SUNXI_H3_DMA_GATE_ENABLE 0x4 | ||
51 | /* | 57 | /* |
52 | * Channels specific registers | 58 | * Channels specific registers |
53 | */ | 59 | */ |
@@ -62,16 +68,19 @@ | |||
62 | #define DMA_CHAN_LLI_ADDR 0x08 | 68 | #define DMA_CHAN_LLI_ADDR 0x08 |
63 | 69 | ||
64 | #define DMA_CHAN_CUR_CFG 0x0c | 70 | #define DMA_CHAN_CUR_CFG 0x0c |
65 | #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f) | 71 | #define DMA_CHAN_MAX_DRQ 0x1f |
72 | #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ) | ||
66 | #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) | 73 | #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) |
67 | #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) | 74 | #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) |
68 | #define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7) | 75 | #define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7) |
76 | #define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6) | ||
69 | #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) | 77 | #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) |
70 | 78 | ||
71 | #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) | 79 | #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) |
72 | #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) | 80 | #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) |
73 | #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) | 81 | #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) |
74 | #define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16) | 82 | #define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16) |
83 | #define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16) | ||
75 | #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) | 84 | #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) |
76 | 85 | ||
77 | #define DMA_CHAN_CUR_SRC 0x10 | 86 | #define DMA_CHAN_CUR_SRC 0x10 |
@@ -90,6 +99,9 @@ | |||
90 | #define NORMAL_WAIT 8 | 99 | #define NORMAL_WAIT 8 |
91 | #define DRQ_SDRAM 1 | 100 | #define DRQ_SDRAM 1 |
92 | 101 | ||
102 | /* forward declaration */ | ||
103 | struct sun6i_dma_dev; | ||
104 | |||
93 | /* | 105 | /* |
94 | * Hardware channels / ports representation | 106 | * Hardware channels / ports representation |
95 | * | 107 | * |
@@ -111,7 +123,12 @@ struct sun6i_dma_config { | |||
111 | * however these SoCs really have and need this bit, as seen in the | 123 | * however these SoCs really have and need this bit, as seen in the |
112 | * BSP kernel source code. | 124 | * BSP kernel source code. |
113 | */ | 125 | */ |
114 | bool gate_needed; | 126 | void (*clock_autogate_enable)(struct sun6i_dma_dev *); |
127 | void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst); | ||
128 | u32 src_burst_lengths; | ||
129 | u32 dst_burst_lengths; | ||
130 | u32 src_addr_widths; | ||
131 | u32 dst_addr_widths; | ||
115 | }; | 132 | }; |
116 | 133 | ||
117 | /* | 134 | /* |
@@ -175,6 +192,9 @@ struct sun6i_dma_dev { | |||
175 | struct sun6i_pchan *pchans; | 192 | struct sun6i_pchan *pchans; |
176 | struct sun6i_vchan *vchans; | 193 | struct sun6i_vchan *vchans; |
177 | const struct sun6i_dma_config *cfg; | 194 | const struct sun6i_dma_config *cfg; |
195 | u32 num_pchans; | ||
196 | u32 num_vchans; | ||
197 | u32 max_request; | ||
178 | }; | 198 | }; |
179 | 199 | ||
180 | static struct device *chan2dev(struct dma_chan *chan) | 200 | static struct device *chan2dev(struct dma_chan *chan) |
@@ -251,8 +271,12 @@ static inline s8 convert_burst(u32 maxburst) | |||
251 | switch (maxburst) { | 271 | switch (maxburst) { |
252 | case 1: | 272 | case 1: |
253 | return 0; | 273 | return 0; |
274 | case 4: | ||
275 | return 1; | ||
254 | case 8: | 276 | case 8: |
255 | return 2; | 277 | return 2; |
278 | case 16: | ||
279 | return 3; | ||
256 | default: | 280 | default: |
257 | return -EINVAL; | 281 | return -EINVAL; |
258 | } | 282 | } |
@@ -260,11 +284,29 @@ static inline s8 convert_burst(u32 maxburst) | |||
260 | 284 | ||
261 | static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) | 285 | static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) |
262 | { | 286 | { |
263 | if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || | 287 | return ilog2(addr_width); |
264 | (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | 288 | } |
265 | return -EINVAL; | 289 | |
290 | static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev) | ||
291 | { | ||
292 | writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE); | ||
293 | } | ||
294 | |||
295 | static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev) | ||
296 | { | ||
297 | writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE); | ||
298 | } | ||
266 | 299 | ||
267 | return addr_width >> 1; | 300 | static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst) |
301 | { | ||
302 | *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) | | ||
303 | DMA_CHAN_CFG_DST_BURST_A31(dst_burst); | ||
304 | } | ||
305 | |||
306 | static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst) | ||
307 | { | ||
308 | *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) | | ||
309 | DMA_CHAN_CFG_DST_BURST_H3(dst_burst); | ||
268 | } | 310 | } |
269 | 311 | ||
270 | static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) | 312 | static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan) |
@@ -399,7 +441,6 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) | |||
399 | static void sun6i_dma_tasklet(unsigned long data) | 441 | static void sun6i_dma_tasklet(unsigned long data) |
400 | { | 442 | { |
401 | struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; | 443 | struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; |
402 | const struct sun6i_dma_config *cfg = sdev->cfg; | ||
403 | struct sun6i_vchan *vchan; | 444 | struct sun6i_vchan *vchan; |
404 | struct sun6i_pchan *pchan; | 445 | struct sun6i_pchan *pchan; |
405 | unsigned int pchan_alloc = 0; | 446 | unsigned int pchan_alloc = 0; |
@@ -427,7 +468,7 @@ static void sun6i_dma_tasklet(unsigned long data) | |||
427 | } | 468 | } |
428 | 469 | ||
429 | spin_lock_irq(&sdev->lock); | 470 | spin_lock_irq(&sdev->lock); |
430 | for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) { | 471 | for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { |
431 | pchan = &sdev->pchans[pchan_idx]; | 472 | pchan = &sdev->pchans[pchan_idx]; |
432 | 473 | ||
433 | if (pchan->vchan || list_empty(&sdev->pending)) | 474 | if (pchan->vchan || list_empty(&sdev->pending)) |
@@ -448,7 +489,7 @@ static void sun6i_dma_tasklet(unsigned long data) | |||
448 | } | 489 | } |
449 | spin_unlock_irq(&sdev->lock); | 490 | spin_unlock_irq(&sdev->lock); |
450 | 491 | ||
451 | for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) { | 492 | for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) { |
452 | if (!(pchan_alloc & BIT(pchan_idx))) | 493 | if (!(pchan_alloc & BIT(pchan_idx))) |
453 | continue; | 494 | continue; |
454 | 495 | ||
@@ -470,7 +511,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) | |||
470 | int i, j, ret = IRQ_NONE; | 511 | int i, j, ret = IRQ_NONE; |
471 | u32 status; | 512 | u32 status; |
472 | 513 | ||
473 | for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) { | 514 | for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) { |
474 | status = readl(sdev->base + DMA_IRQ_STAT(i)); | 515 | status = readl(sdev->base + DMA_IRQ_STAT(i)); |
475 | if (!status) | 516 | if (!status) |
476 | continue; | 517 | continue; |
@@ -510,47 +551,49 @@ static int set_config(struct sun6i_dma_dev *sdev, | |||
510 | enum dma_transfer_direction direction, | 551 | enum dma_transfer_direction direction, |
511 | u32 *p_cfg) | 552 | u32 *p_cfg) |
512 | { | 553 | { |
554 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | ||
555 | u32 src_maxburst, dst_maxburst; | ||
513 | s8 src_width, dst_width, src_burst, dst_burst; | 556 | s8 src_width, dst_width, src_burst, dst_burst; |
514 | 557 | ||
558 | src_addr_width = sconfig->src_addr_width; | ||
559 | dst_addr_width = sconfig->dst_addr_width; | ||
560 | src_maxburst = sconfig->src_maxburst; | ||
561 | dst_maxburst = sconfig->dst_maxburst; | ||
562 | |||
515 | switch (direction) { | 563 | switch (direction) { |
516 | case DMA_MEM_TO_DEV: | 564 | case DMA_MEM_TO_DEV: |
517 | src_burst = convert_burst(sconfig->src_maxburst ? | 565 | if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
518 | sconfig->src_maxburst : 8); | 566 | src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
519 | src_width = convert_buswidth(sconfig->src_addr_width != | 567 | src_maxburst = src_maxburst ? src_maxburst : 8; |
520 | DMA_SLAVE_BUSWIDTH_UNDEFINED ? | ||
521 | sconfig->src_addr_width : | ||
522 | DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
523 | dst_burst = convert_burst(sconfig->dst_maxburst); | ||
524 | dst_width = convert_buswidth(sconfig->dst_addr_width); | ||
525 | break; | 568 | break; |
526 | case DMA_DEV_TO_MEM: | 569 | case DMA_DEV_TO_MEM: |
527 | src_burst = convert_burst(sconfig->src_maxburst); | 570 | if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) |
528 | src_width = convert_buswidth(sconfig->src_addr_width); | 571 | dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
529 | dst_burst = convert_burst(sconfig->dst_maxburst ? | 572 | dst_maxburst = dst_maxburst ? dst_maxburst : 8; |
530 | sconfig->dst_maxburst : 8); | ||
531 | dst_width = convert_buswidth(sconfig->dst_addr_width != | ||
532 | DMA_SLAVE_BUSWIDTH_UNDEFINED ? | ||
533 | sconfig->dst_addr_width : | ||
534 | DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
535 | break; | 573 | break; |
536 | default: | 574 | default: |
537 | return -EINVAL; | 575 | return -EINVAL; |
538 | } | 576 | } |
539 | 577 | ||
540 | if (src_burst < 0) | 578 | if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths)) |
541 | return src_burst; | 579 | return -EINVAL; |
542 | if (src_width < 0) | 580 | if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths)) |
543 | return src_width; | 581 | return -EINVAL; |
544 | if (dst_burst < 0) | 582 | if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths)) |
545 | return dst_burst; | 583 | return -EINVAL; |
546 | if (dst_width < 0) | 584 | if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths)) |
547 | return dst_width; | 585 | return -EINVAL; |
548 | 586 | ||
549 | *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | | 587 | src_width = convert_buswidth(src_addr_width); |
550 | DMA_CHAN_CFG_SRC_WIDTH(src_width) | | 588 | dst_width = convert_buswidth(dst_addr_width); |
551 | DMA_CHAN_CFG_DST_BURST(dst_burst) | | 589 | dst_burst = convert_burst(dst_maxburst); |
590 | src_burst = convert_burst(src_maxburst); | ||
591 | |||
592 | *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) | | ||
552 | DMA_CHAN_CFG_DST_WIDTH(dst_width); | 593 | DMA_CHAN_CFG_DST_WIDTH(dst_width); |
553 | 594 | ||
595 | sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst); | ||
596 | |||
554 | return 0; | 597 | return 0; |
555 | } | 598 | } |
556 | 599 | ||
@@ -593,11 +636,11 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( | |||
593 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | 636 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | |
594 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 637 | DMA_CHAN_CFG_DST_LINEAR_MODE | |
595 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 638 | DMA_CHAN_CFG_SRC_LINEAR_MODE | |
596 | DMA_CHAN_CFG_SRC_BURST(burst) | | ||
597 | DMA_CHAN_CFG_SRC_WIDTH(width) | | 639 | DMA_CHAN_CFG_SRC_WIDTH(width) | |
598 | DMA_CHAN_CFG_DST_BURST(burst) | | ||
599 | DMA_CHAN_CFG_DST_WIDTH(width); | 640 | DMA_CHAN_CFG_DST_WIDTH(width); |
600 | 641 | ||
642 | sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst); | ||
643 | |||
601 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); | 644 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); |
602 | 645 | ||
603 | sun6i_dma_dump_lli(vchan, v_lli); | 646 | sun6i_dma_dump_lli(vchan, v_lli); |
@@ -948,7 +991,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, | |||
948 | struct dma_chan *chan; | 991 | struct dma_chan *chan; |
949 | u8 port = dma_spec->args[0]; | 992 | u8 port = dma_spec->args[0]; |
950 | 993 | ||
951 | if (port > sdev->cfg->nr_max_requests) | 994 | if (port > sdev->max_request) |
952 | return NULL; | 995 | return NULL; |
953 | 996 | ||
954 | chan = dma_get_any_slave_channel(&sdev->slave); | 997 | chan = dma_get_any_slave_channel(&sdev->slave); |
@@ -981,7 +1024,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) | |||
981 | { | 1024 | { |
982 | int i; | 1025 | int i; |
983 | 1026 | ||
984 | for (i = 0; i < sdev->cfg->nr_max_vchans; i++) { | 1027 | for (i = 0; i < sdev->num_vchans; i++) { |
985 | struct sun6i_vchan *vchan = &sdev->vchans[i]; | 1028 | struct sun6i_vchan *vchan = &sdev->vchans[i]; |
986 | 1029 | ||
987 | list_del(&vchan->vc.chan.device_node); | 1030 | list_del(&vchan->vc.chan.device_node); |
@@ -1009,6 +1052,15 @@ static struct sun6i_dma_config sun6i_a31_dma_cfg = { | |||
1009 | .nr_max_channels = 16, | 1052 | .nr_max_channels = 16, |
1010 | .nr_max_requests = 30, | 1053 | .nr_max_requests = 30, |
1011 | .nr_max_vchans = 53, | 1054 | .nr_max_vchans = 53, |
1055 | .set_burst_length = sun6i_set_burst_length_a31, | ||
1056 | .src_burst_lengths = BIT(1) | BIT(8), | ||
1057 | .dst_burst_lengths = BIT(1) | BIT(8), | ||
1058 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1059 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1060 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1061 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1062 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1063 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1012 | }; | 1064 | }; |
1013 | 1065 | ||
1014 | /* | 1066 | /* |
@@ -1020,24 +1072,76 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { | |||
1020 | .nr_max_channels = 8, | 1072 | .nr_max_channels = 8, |
1021 | .nr_max_requests = 24, | 1073 | .nr_max_requests = 24, |
1022 | .nr_max_vchans = 37, | 1074 | .nr_max_vchans = 37, |
1023 | .gate_needed = true, | 1075 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, |
1076 | .set_burst_length = sun6i_set_burst_length_a31, | ||
1077 | .src_burst_lengths = BIT(1) | BIT(8), | ||
1078 | .dst_burst_lengths = BIT(1) | BIT(8), | ||
1079 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1080 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1081 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1082 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1083 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1084 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1024 | }; | 1085 | }; |
1025 | 1086 | ||
1026 | static struct sun6i_dma_config sun8i_a83t_dma_cfg = { | 1087 | static struct sun6i_dma_config sun8i_a83t_dma_cfg = { |
1027 | .nr_max_channels = 8, | 1088 | .nr_max_channels = 8, |
1028 | .nr_max_requests = 28, | 1089 | .nr_max_requests = 28, |
1029 | .nr_max_vchans = 39, | 1090 | .nr_max_vchans = 39, |
1091 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, | ||
1092 | .set_burst_length = sun6i_set_burst_length_a31, | ||
1093 | .src_burst_lengths = BIT(1) | BIT(8), | ||
1094 | .dst_burst_lengths = BIT(1) | BIT(8), | ||
1095 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1096 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1097 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1098 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1099 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1100 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1030 | }; | 1101 | }; |
1031 | 1102 | ||
1032 | /* | 1103 | /* |
1033 | * The H3 has 12 physical channels, a maximum DRQ port id of 27, | 1104 | * The H3 has 12 physical channels, a maximum DRQ port id of 27, |
1034 | * and a total of 34 usable source and destination endpoints. | 1105 | * and a total of 34 usable source and destination endpoints. |
1106 | * It also supports additional burst lengths and bus widths, | ||
1107 | * and the burst length fields have different offsets. | ||
1035 | */ | 1108 | */ |
1036 | 1109 | ||
1037 | static struct sun6i_dma_config sun8i_h3_dma_cfg = { | 1110 | static struct sun6i_dma_config sun8i_h3_dma_cfg = { |
1038 | .nr_max_channels = 12, | 1111 | .nr_max_channels = 12, |
1039 | .nr_max_requests = 27, | 1112 | .nr_max_requests = 27, |
1040 | .nr_max_vchans = 34, | 1113 | .nr_max_vchans = 34, |
1114 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | ||
1115 | .set_burst_length = sun6i_set_burst_length_h3, | ||
1116 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | ||
1117 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | ||
1118 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1119 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1120 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1121 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | ||
1122 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1123 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1124 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1125 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | ||
1126 | }; | ||
1127 | |||
1128 | /* | ||
1129 | * The A64 binding uses the number of dma channels from the | ||
1130 | * device tree node. | ||
1131 | */ | ||
1132 | static struct sun6i_dma_config sun50i_a64_dma_cfg = { | ||
1133 | .clock_autogate_enable = sun6i_enable_clock_autogate_h3, | ||
1134 | .set_burst_length = sun6i_set_burst_length_h3, | ||
1135 | .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | ||
1136 | .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16), | ||
1137 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1138 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1139 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1140 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | ||
1141 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1142 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1143 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | ||
1144 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), | ||
1041 | }; | 1145 | }; |
1042 | 1146 | ||
1043 | /* | 1147 | /* |
@@ -1049,7 +1153,16 @@ static struct sun6i_dma_config sun8i_v3s_dma_cfg = { | |||
1049 | .nr_max_channels = 8, | 1153 | .nr_max_channels = 8, |
1050 | .nr_max_requests = 23, | 1154 | .nr_max_requests = 23, |
1051 | .nr_max_vchans = 24, | 1155 | .nr_max_vchans = 24, |
1052 | .gate_needed = true, | 1156 | .clock_autogate_enable = sun6i_enable_clock_autogate_a23, |
1157 | .set_burst_length = sun6i_set_burst_length_a31, | ||
1158 | .src_burst_lengths = BIT(1) | BIT(8), | ||
1159 | .dst_burst_lengths = BIT(1) | BIT(8), | ||
1160 | .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1161 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1162 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1163 | .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1164 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1165 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES), | ||
1053 | }; | 1166 | }; |
1054 | 1167 | ||
1055 | static const struct of_device_id sun6i_dma_match[] = { | 1168 | static const struct of_device_id sun6i_dma_match[] = { |
@@ -1058,13 +1171,14 @@ static const struct of_device_id sun6i_dma_match[] = { | |||
1058 | { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, | 1171 | { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, |
1059 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, | 1172 | { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, |
1060 | { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, | 1173 | { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, |
1174 | { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, | ||
1061 | { /* sentinel */ } | 1175 | { /* sentinel */ } |
1062 | }; | 1176 | }; |
1063 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); | 1177 | MODULE_DEVICE_TABLE(of, sun6i_dma_match); |
1064 | 1178 | ||
1065 | static int sun6i_dma_probe(struct platform_device *pdev) | 1179 | static int sun6i_dma_probe(struct platform_device *pdev) |
1066 | { | 1180 | { |
1067 | const struct of_device_id *device; | 1181 | struct device_node *np = pdev->dev.of_node; |
1068 | struct sun6i_dma_dev *sdc; | 1182 | struct sun6i_dma_dev *sdc; |
1069 | struct resource *res; | 1183 | struct resource *res; |
1070 | int ret, i; | 1184 | int ret, i; |
@@ -1073,10 +1187,9 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1073 | if (!sdc) | 1187 | if (!sdc) |
1074 | return -ENOMEM; | 1188 | return -ENOMEM; |
1075 | 1189 | ||
1076 | device = of_match_device(sun6i_dma_match, &pdev->dev); | 1190 | sdc->cfg = of_device_get_match_data(&pdev->dev); |
1077 | if (!device) | 1191 | if (!sdc->cfg) |
1078 | return -ENODEV; | 1192 | return -ENODEV; |
1079 | sdc->cfg = device->data; | ||
1080 | 1193 | ||
1081 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1194 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1082 | sdc->base = devm_ioremap_resource(&pdev->dev, res); | 1195 | sdc->base = devm_ioremap_resource(&pdev->dev, res); |
@@ -1129,37 +1242,57 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1129 | sdc->slave.device_pause = sun6i_dma_pause; | 1242 | sdc->slave.device_pause = sun6i_dma_pause; |
1130 | sdc->slave.device_resume = sun6i_dma_resume; | 1243 | sdc->slave.device_resume = sun6i_dma_resume; |
1131 | sdc->slave.device_terminate_all = sun6i_dma_terminate_all; | 1244 | sdc->slave.device_terminate_all = sun6i_dma_terminate_all; |
1132 | sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | 1245 | sdc->slave.src_addr_widths = sdc->cfg->src_addr_widths; |
1133 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | 1246 | sdc->slave.dst_addr_widths = sdc->cfg->dst_addr_widths; |
1134 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
1135 | sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
1136 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | ||
1137 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | ||
1138 | sdc->slave.directions = BIT(DMA_DEV_TO_MEM) | | 1247 | sdc->slave.directions = BIT(DMA_DEV_TO_MEM) | |
1139 | BIT(DMA_MEM_TO_DEV); | 1248 | BIT(DMA_MEM_TO_DEV); |
1140 | sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1249 | sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1141 | sdc->slave.dev = &pdev->dev; | 1250 | sdc->slave.dev = &pdev->dev; |
1142 | 1251 | ||
1143 | sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, | 1252 | sdc->num_pchans = sdc->cfg->nr_max_channels; |
1253 | sdc->num_vchans = sdc->cfg->nr_max_vchans; | ||
1254 | sdc->max_request = sdc->cfg->nr_max_requests; | ||
1255 | |||
1256 | ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans); | ||
1257 | if (ret && !sdc->num_pchans) { | ||
1258 | dev_err(&pdev->dev, "Can't get dma-channels.\n"); | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | ret = of_property_read_u32(np, "dma-requests", &sdc->max_request); | ||
1263 | if (ret && !sdc->max_request) { | ||
1264 | dev_info(&pdev->dev, "Missing dma-requests, using %u.\n", | ||
1265 | DMA_CHAN_MAX_DRQ); | ||
1266 | sdc->max_request = DMA_CHAN_MAX_DRQ; | ||
1267 | } | ||
1268 | |||
1269 | /* | ||
1270 | * If the number of vchans is not specified, derive it from the | ||
1271 | * highest port number, at most one channel per port and direction. | ||
1272 | */ | ||
1273 | if (!sdc->num_vchans) | ||
1274 | sdc->num_vchans = 2 * (sdc->max_request + 1); | ||
1275 | |||
1276 | sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans, | ||
1144 | sizeof(struct sun6i_pchan), GFP_KERNEL); | 1277 | sizeof(struct sun6i_pchan), GFP_KERNEL); |
1145 | if (!sdc->pchans) | 1278 | if (!sdc->pchans) |
1146 | return -ENOMEM; | 1279 | return -ENOMEM; |
1147 | 1280 | ||
1148 | sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans, | 1281 | sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans, |
1149 | sizeof(struct sun6i_vchan), GFP_KERNEL); | 1282 | sizeof(struct sun6i_vchan), GFP_KERNEL); |
1150 | if (!sdc->vchans) | 1283 | if (!sdc->vchans) |
1151 | return -ENOMEM; | 1284 | return -ENOMEM; |
1152 | 1285 | ||
1153 | tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); | 1286 | tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); |
1154 | 1287 | ||
1155 | for (i = 0; i < sdc->cfg->nr_max_channels; i++) { | 1288 | for (i = 0; i < sdc->num_pchans; i++) { |
1156 | struct sun6i_pchan *pchan = &sdc->pchans[i]; | 1289 | struct sun6i_pchan *pchan = &sdc->pchans[i]; |
1157 | 1290 | ||
1158 | pchan->idx = i; | 1291 | pchan->idx = i; |
1159 | pchan->base = sdc->base + 0x100 + i * 0x40; | 1292 | pchan->base = sdc->base + 0x100 + i * 0x40; |
1160 | } | 1293 | } |
1161 | 1294 | ||
1162 | for (i = 0; i < sdc->cfg->nr_max_vchans; i++) { | 1295 | for (i = 0; i < sdc->num_vchans; i++) { |
1163 | struct sun6i_vchan *vchan = &sdc->vchans[i]; | 1296 | struct sun6i_vchan *vchan = &sdc->vchans[i]; |
1164 | 1297 | ||
1165 | INIT_LIST_HEAD(&vchan->node); | 1298 | INIT_LIST_HEAD(&vchan->node); |
@@ -1199,8 +1332,8 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
1199 | goto err_dma_unregister; | 1332 | goto err_dma_unregister; |
1200 | } | 1333 | } |
1201 | 1334 | ||
1202 | if (sdc->cfg->gate_needed) | 1335 | if (sdc->cfg->clock_autogate_enable) |
1203 | writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE); | 1336 | sdc->cfg->clock_autogate_enable(sdc); |
1204 | 1337 | ||
1205 | return 0; | 1338 | return 0; |
1206 | 1339 | ||
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index f1d04b70ee67..7df910e7c348 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -49,12 +49,12 @@ struct ti_am335x_xbar_data { | |||
49 | 49 | ||
50 | struct ti_am335x_xbar_map { | 50 | struct ti_am335x_xbar_map { |
51 | u16 dma_line; | 51 | u16 dma_line; |
52 | u16 mux_val; | 52 | u8 mux_val; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) | 55 | static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) |
56 | { | 56 | { |
57 | writeb_relaxed(val & 0x1f, iomem + event); | 57 | writeb_relaxed(val, iomem + event); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void ti_am335x_xbar_free(struct device *dev, void *route_data) | 60 | static void ti_am335x_xbar_free(struct device *dev, void *route_data) |
@@ -105,7 +105,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
105 | } | 105 | } |
106 | 106 | ||
107 | map->dma_line = (u16)dma_spec->args[0]; | 107 | map->dma_line = (u16)dma_spec->args[0]; |
108 | map->mux_val = (u16)dma_spec->args[2]; | 108 | map->mux_val = (u8)dma_spec->args[2]; |
109 | 109 | ||
110 | dma_spec->args[2] = 0; | 110 | dma_spec->args[2] = 0; |
111 | dma_spec->args_count = 2; | 111 | dma_spec->args_count = 2; |
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 8722bcba489d..5eef13380ca8 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c | |||
@@ -366,6 +366,20 @@ struct xilinx_dma_chan { | |||
366 | u16 tdest; | 366 | u16 tdest; |
367 | }; | 367 | }; |
368 | 368 | ||
369 | /** | ||
370 | * enum xdma_ip_type: DMA IP type. | ||
371 | * | ||
372 | * XDMA_TYPE_AXIDMA: Axi dma ip. | ||
373 | * XDMA_TYPE_CDMA: Axi cdma ip. | ||
374 | * XDMA_TYPE_VDMA: Axi vdma ip. | ||
375 | * | ||
376 | */ | ||
377 | enum xdma_ip_type { | ||
378 | XDMA_TYPE_AXIDMA = 0, | ||
379 | XDMA_TYPE_CDMA, | ||
380 | XDMA_TYPE_VDMA, | ||
381 | }; | ||
382 | |||
369 | struct xilinx_dma_config { | 383 | struct xilinx_dma_config { |
370 | enum xdma_ip_type dmatype; | 384 | enum xdma_ip_type dmatype; |
371 | int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, | 385 | int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, |
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 3ae300052553..34b98f276ed0 100644 --- a/include/linux/dma/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h | |||
@@ -41,20 +41,6 @@ struct xilinx_vdma_config { | |||
41 | int ext_fsync; | 41 | int ext_fsync; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | /** | ||
45 | * enum xdma_ip_type: DMA IP type. | ||
46 | * | ||
47 | * XDMA_TYPE_AXIDMA: Axi dma ip. | ||
48 | * XDMA_TYPE_CDMA: Axi cdma ip. | ||
49 | * XDMA_TYPE_VDMA: Axi vdma ip. | ||
50 | * | ||
51 | */ | ||
52 | enum xdma_ip_type { | ||
53 | XDMA_TYPE_AXIDMA = 0, | ||
54 | XDMA_TYPE_CDMA, | ||
55 | XDMA_TYPE_VDMA, | ||
56 | }; | ||
57 | |||
58 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | 44 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, |
59 | struct xilinx_vdma_config *cfg); | 45 | struct xilinx_vdma_config *cfg); |
60 | 46 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8319101170fc..f838764993eb 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -329,7 +329,7 @@ enum dma_slave_buswidth { | |||
329 | * @src_addr_width: this is the width in bytes of the source (RX) | 329 | * @src_addr_width: this is the width in bytes of the source (RX) |
330 | * register where DMA data shall be read. If the source | 330 | * register where DMA data shall be read. If the source |
331 | * is memory this may be ignored depending on architecture. | 331 | * is memory this may be ignored depending on architecture. |
332 | * Legal values: 1, 2, 4, 8. | 332 | * Legal values: 1, 2, 3, 4, 8, 16, 32, 64. |
333 | * @dst_addr_width: same as src_addr_width but for destination | 333 | * @dst_addr_width: same as src_addr_width but for destination |
334 | * target (TX) mutatis mutandis. | 334 | * target (TX) mutatis mutandis. |
335 | * @src_maxburst: the maximum number of words (note: words, as in | 335 | * @src_maxburst: the maximum number of words (note: words, as in |
@@ -404,14 +404,16 @@ enum dma_residue_granularity { | |||
404 | DMA_RESIDUE_GRANULARITY_BURST = 2, | 404 | DMA_RESIDUE_GRANULARITY_BURST = 2, |
405 | }; | 405 | }; |
406 | 406 | ||
407 | /* struct dma_slave_caps - expose capabilities of a slave channel only | 407 | /** |
408 | * | 408 | * struct dma_slave_caps - expose capabilities of a slave channel only |
409 | * @src_addr_widths: bit mask of src addr widths the channel supports | 409 | * @src_addr_widths: bit mask of src addr widths the channel supports. |
410 | * @dst_addr_widths: bit mask of dstn addr widths the channel supports | 410 | * Width is specified in bytes, e.g. for a channel supporting |
411 | * @directions: bit mask of slave direction the channel supported | 411 | * a width of 4 the mask should have BIT(4) set. |
412 | * since the enum dma_transfer_direction is not defined as bits for each | 412 | * @dst_addr_widths: bit mask of dst addr widths the channel supports |
413 | * type of direction, the dma controller should fill (1 << <TYPE>) and same | 413 | * @directions: bit mask of slave directions the channel supports. |
414 | * should be checked by controller as well | 414 | * Since the enum dma_transfer_direction is not defined as bit flag for |
415 | * each type, the dma controller should set BIT(<TYPE>) and same | ||
416 | * should be checked by controller as well | ||
415 | * @max_burst: max burst capability per-transfer | 417 | * @max_burst: max burst capability per-transfer |
416 | * @cmd_pause: true, if pause and thereby resume is supported | 418 | * @cmd_pause: true, if pause and thereby resume is supported |
417 | * @cmd_terminate: true, if terminate cmd is supported | 419 | * @cmd_terminate: true, if terminate cmd is supported |
@@ -678,11 +680,13 @@ struct dma_filter { | |||
678 | * @dev_id: unique device ID | 680 | * @dev_id: unique device ID |
679 | * @dev: struct device reference for dma mapping api | 681 | * @dev: struct device reference for dma mapping api |
680 | * @src_addr_widths: bit mask of src addr widths the device supports | 682 | * @src_addr_widths: bit mask of src addr widths the device supports |
683 | * Width is specified in bytes, e.g. for a device supporting | ||
684 | * a width of 4 the mask should have BIT(4) set. | ||
681 | * @dst_addr_widths: bit mask of dst addr widths the device supports | 685 | * @dst_addr_widths: bit mask of dst addr widths the device supports |
682 | * @directions: bit mask of slave direction the device supports since | 686 | * @directions: bit mask of slave directions the device supports. |
683 | * the enum dma_transfer_direction is not defined as bits for | 687 | * Since the enum dma_transfer_direction is not defined as bit flag for |
684 | * each type of direction, the dma controller should fill (1 << | 688 | * each type, the dma controller should set BIT(<TYPE>) and same |
685 | * <TYPE>) and same should be checked by controller as well | 689 | * should be checked by controller as well |
686 | * @max_burst: max burst capability per-transfer | 690 | * @max_burst: max burst capability per-transfer |
687 | * @residue_granularity: granularity of the transfer residue reported | 691 | * @residue_granularity: granularity of the transfer residue reported |
688 | * by tx_status | 692 | * by tx_status |