diff options
-rw-r--r-- | Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt | 15 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/dma/ti-edma.txt | 117 | ||||
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/boot/dts/am335x-evm.dts | 9 | ||||
-rw-r--r-- | arch/arm/boot/dts/am335x-pepper.dts | 11 | ||||
-rw-r--r-- | arch/arm/boot/dts/am33xx.dtsi | 96 | ||||
-rw-r--r-- | arch/arm/boot/dts/am4372.dtsi | 82 | ||||
-rw-r--r-- | arch/arm/boot/dts/am437x-gp-evm.dts | 9 | ||||
-rw-r--r-- | arch/arm/common/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/common/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/common/edma.c | 1876 | ||||
-rw-r--r-- | arch/arm/mach-davinci/devices-da8xx.c | 122 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm355.c | 40 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm365.c | 25 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm644x.c | 40 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dm646x.c | 44 | ||||
-rw-r--r-- | arch/arm/mach-omap2/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 2 | ||||
-rw-r--r-- | drivers/dma/edma.c | 1839 | ||||
-rw-r--r-- | drivers/dma/ti-dma-crossbar.c | 251 | ||||
-rw-r--r-- | include/linux/platform_data/edma.h | 104 |
21 files changed, 2197 insertions, 2491 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt index 63a48928f3a8..b152a75dceae 100644 --- a/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt +++ b/Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt | |||
@@ -2,9 +2,10 @@ Texas Instruments DMA Crossbar (DMA request router) | |||
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: "ti,dra7-dma-crossbar" for DRA7xx DMA crossbar | 4 | - compatible: "ti,dra7-dma-crossbar" for DRA7xx DMA crossbar |
5 | "ti,am335x-edma-crossbar" for AM335x and AM437x | ||
5 | - reg: Memory map for accessing module | 6 | - reg: Memory map for accessing module |
6 | - #dma-cells: Should be set to <1>. | 7 | - #dma-cells: Should be set to to match with the DMA controller's dma-cells |
7 | Clients should use the crossbar request number (input) | 8 | for ti,dra7-dma-crossbar and <3> for ti,am335x-edma-crossbar. |
8 | - dma-requests: Number of DMA requests the crossbar can receive | 9 | - dma-requests: Number of DMA requests the crossbar can receive |
9 | - dma-masters: phandle pointing to the DMA controller | 10 | - dma-masters: phandle pointing to the DMA controller |
10 | 11 | ||
@@ -14,6 +15,15 @@ The DMA controller node need to have the following poroperties: | |||
14 | Optional properties: | 15 | Optional properties: |
15 | - ti,dma-safe-map: Safe routing value for unused request lines | 16 | - ti,dma-safe-map: Safe routing value for unused request lines |
16 | 17 | ||
18 | Notes: | ||
19 | When requesting channel via ti,dra7-dma-crossbar, the DMA clinet must request | ||
20 | the DMA event number as crossbar ID (input to the DMA crossbar). | ||
21 | |||
22 | For ti,am335x-edma-crossbar: the meaning of parameters of dmas for clients: | ||
23 | dmas = <&edma_xbar 12 0 1>; where <12> is the DMA request number, <0> is the TC | ||
24 | the event should be assigned and <1> is the mux selection for in the crossbar. | ||
25 | When mux 0 is used the DMA channel can be requested directly from edma node. | ||
26 | |||
17 | Example: | 27 | Example: |
18 | 28 | ||
19 | /* DMA controller */ | 29 | /* DMA controller */ |
@@ -47,6 +57,7 @@ uart1: serial@4806a000 { | |||
47 | ti,hwmods = "uart1"; | 57 | ti,hwmods = "uart1"; |
48 | clock-frequency = <48000000>; | 58 | clock-frequency = <48000000>; |
49 | status = "disabled"; | 59 | status = "disabled"; |
60 | /* Requesting crossbar input 49 and 50 */ | ||
50 | dmas = <&sdma_xbar 49>, <&sdma_xbar 50>; | 61 | dmas = <&sdma_xbar 49>, <&sdma_xbar 50>; |
51 | dma-names = "tx", "rx"; | 62 | dma-names = "tx", "rx"; |
52 | }; | 63 | }; |
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 5ba525a10035..d3d0a4fb1c73 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt | |||
@@ -1,4 +1,119 @@ | |||
1 | TI EDMA | 1 | Texas Instruments eDMA |
2 | |||
3 | The eDMA3 consists of two components: Channel controller (CC) and Transfer | ||
4 | Controller(s) (TC). The CC is the main entry for DMA users since it is | ||
5 | responsible for the DMA channel handling, while the TCs are responsible to | ||
6 | execute the actual DMA tansfer. | ||
7 | |||
8 | ------------------------------------------------------------------------------ | ||
9 | eDMA3 Channel Controller | ||
10 | |||
11 | Required properties: | ||
12 | - compatible: "ti,edma3-tpcc" for the channel controller(s) | ||
13 | - #dma-cells: Should be set to <2>. The first number is the DMA request | ||
14 | number and the second is the TC the channel is serviced on. | ||
15 | - reg: Memory map of eDMA CC | ||
16 | - reg-names: "edma3_cc" | ||
17 | - interrupts: Interrupt lines for CCINT, MPERR and CCERRINT. | ||
18 | - interrupt-names: "edma3_ccint", "emda3_mperr" and "edma3_ccerrint" | ||
19 | - ti,tptcs: List of TPTCs associated with the eDMA in the following form: | ||
20 | <&tptc_phandle TC_priority_number>. The highest priority is 0. | ||
21 | |||
22 | Optional properties: | ||
23 | - ti,hwmods: Name of the hwmods associated to the eDMA CC | ||
24 | - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow | ||
25 | these channels will be SW triggered channels. The list must | ||
26 | contain 16 bits numbers, see example. | ||
27 | - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by | ||
28 | the driver, they are allocated to be used by for example the | ||
29 | DSP. See example. | ||
30 | |||
31 | ------------------------------------------------------------------------------ | ||
32 | eDMA3 Transfer Controller | ||
33 | |||
34 | Required properties: | ||
35 | - compatible: "ti,edma3-tptc" for the transfer controller(s) | ||
36 | - reg: Memory map of eDMA TC | ||
37 | - interrupts: Interrupt number for TCerrint. | ||
38 | |||
39 | Optional properties: | ||
40 | - ti,hwmods: Name of the hwmods associated to the given eDMA TC | ||
41 | - interrupt-names: "edma3_tcerrint" | ||
42 | |||
43 | ------------------------------------------------------------------------------ | ||
44 | Example: | ||
45 | |||
46 | edma: edma@49000000 { | ||
47 | compatible = "ti,edma3-tpcc"; | ||
48 | ti,hwmods = "tpcc"; | ||
49 | reg = <0x49000000 0x10000>; | ||
50 | reg-names = "edma3_cc"; | ||
51 | interrupts = <12 13 14>; | ||
52 | interrupt-names = "edma3_ccint", "emda3_mperr", "edma3_ccerrint"; | ||
53 | dma-requests = <64>; | ||
54 | #dma-cells = <2>; | ||
55 | |||
56 | ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; | ||
57 | |||
58 | /* Channel 20 and 21 is allocated for memcpy */ | ||
59 | ti,edma-memcpy-channels = /bits/ 16 <20 21>; | ||
60 | /* The following PaRAM slots are reserved: 35-45 and 100-110 */ | ||
61 | ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, | ||
62 | /bits/ 16 <100 10>; | ||
63 | }; | ||
64 | |||
65 | edma_tptc0: tptc@49800000 { | ||
66 | compatible = "ti,edma3-tptc"; | ||
67 | ti,hwmods = "tptc0"; | ||
68 | reg = <0x49800000 0x100000>; | ||
69 | interrupts = <112>; | ||
70 | interrupt-names = "edm3_tcerrint"; | ||
71 | }; | ||
72 | |||
73 | edma_tptc1: tptc@49900000 { | ||
74 | compatible = "ti,edma3-tptc"; | ||
75 | ti,hwmods = "tptc1"; | ||
76 | reg = <0x49900000 0x100000>; | ||
77 | interrupts = <113>; | ||
78 | interrupt-names = "edm3_tcerrint"; | ||
79 | }; | ||
80 | |||
81 | edma_tptc2: tptc@49a00000 { | ||
82 | compatible = "ti,edma3-tptc"; | ||
83 | ti,hwmods = "tptc2"; | ||
84 | reg = <0x49a00000 0x100000>; | ||
85 | interrupts = <114>; | ||
86 | interrupt-names = "edm3_tcerrint"; | ||
87 | }; | ||
88 | |||
89 | sham: sham@53100000 { | ||
90 | compatible = "ti,omap4-sham"; | ||
91 | ti,hwmods = "sham"; | ||
92 | reg = <0x53100000 0x200>; | ||
93 | interrupts = <109>; | ||
94 | /* DMA channel 36 executed on eDMA TC0 - low priority queue */ | ||
95 | dmas = <&edma 36 0>; | ||
96 | dma-names = "rx"; | ||
97 | }; | ||
98 | |||
99 | mcasp0: mcasp@48038000 { | ||
100 | compatible = "ti,am33xx-mcasp-audio"; | ||
101 | ti,hwmods = "mcasp0"; | ||
102 | reg = <0x48038000 0x2000>, | ||
103 | <0x46000000 0x400000>; | ||
104 | reg-names = "mpu", "dat"; | ||
105 | interrupts = <80>, <81>; | ||
106 | interrupt-names = "tx", "rx"; | ||
107 | status = "disabled"; | ||
108 | /* DMA channels 8 and 9 executed on eDMA TC2 - high priority queue */ | ||
109 | dmas = <&edma 8 2>, | ||
110 | <&edma 9 2>; | ||
111 | dma-names = "tx", "rx"; | ||
112 | }; | ||
113 | |||
114 | ------------------------------------------------------------------------------ | ||
115 | DEPRECATED binding, new DTS files must use the ti,edma3-tpcc/ti,edma3-tptc | ||
116 | binding. | ||
2 | 117 | ||
3 | Required properties: | 118 | Required properties: |
4 | - compatible : "ti,edma3" | 119 | - compatible : "ti,edma3" |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 72ad724c67ae..513e38701418 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -736,7 +736,6 @@ config ARCH_DAVINCI | |||
736 | select GENERIC_CLOCKEVENTS | 736 | select GENERIC_CLOCKEVENTS |
737 | select GENERIC_IRQ_CHIP | 737 | select GENERIC_IRQ_CHIP |
738 | select HAVE_IDE | 738 | select HAVE_IDE |
739 | select TI_PRIV_EDMA | ||
740 | select USE_OF | 739 | select USE_OF |
741 | select ZONE_DMA | 740 | select ZONE_DMA |
742 | help | 741 | help |
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index 1942a5c8132d..507980672c32 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts | |||
@@ -743,8 +743,8 @@ | |||
743 | &mmc3 { | 743 | &mmc3 { |
744 | /* these are on the crossbar and are outlined in the | 744 | /* these are on the crossbar and are outlined in the |
745 | xbar-event-map element */ | 745 | xbar-event-map element */ |
746 | dmas = <&edma 12 | 746 | dmas = <&edma_xbar 12 0 1 |
747 | &edma 13>; | 747 | &edma_xbar 13 0 2>; |
748 | dma-names = "tx", "rx"; | 748 | dma-names = "tx", "rx"; |
749 | status = "okay"; | 749 | status = "okay"; |
750 | vmmc-supply = <&wlan_en_reg>; | 750 | vmmc-supply = <&wlan_en_reg>; |
@@ -766,11 +766,6 @@ | |||
766 | }; | 766 | }; |
767 | }; | 767 | }; |
768 | 768 | ||
769 | &edma { | ||
770 | ti,edma-xbar-event-map = /bits/ 16 <1 12 | ||
771 | 2 13>; | ||
772 | }; | ||
773 | |||
774 | &sham { | 769 | &sham { |
775 | status = "okay"; | 770 | status = "okay"; |
776 | }; | 771 | }; |
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts index 7106114c7464..39073b921664 100644 --- a/arch/arm/boot/dts/am335x-pepper.dts +++ b/arch/arm/boot/dts/am335x-pepper.dts | |||
@@ -339,13 +339,6 @@ | |||
339 | ti,non-removable; | 339 | ti,non-removable; |
340 | }; | 340 | }; |
341 | 341 | ||
342 | &edma { | ||
343 | /* Map eDMA MMC2 Events from Crossbar */ | ||
344 | ti,edma-xbar-event-map = /bits/ 16 <1 12 | ||
345 | 2 13>; | ||
346 | }; | ||
347 | |||
348 | |||
349 | &mmc3 { | 342 | &mmc3 { |
350 | /* Wifi & Bluetooth on MMC #3 */ | 343 | /* Wifi & Bluetooth on MMC #3 */ |
351 | status = "okay"; | 344 | status = "okay"; |
@@ -354,8 +347,8 @@ | |||
354 | vmmmc-supply = <&v3v3c_reg>; | 347 | vmmmc-supply = <&v3v3c_reg>; |
355 | bus-width = <4>; | 348 | bus-width = <4>; |
356 | ti,non-removable; | 349 | ti,non-removable; |
357 | dmas = <&edma 12 | 350 | dmas = <&edma_xbar 12 0 1 |
358 | &edma 13>; | 351 | &edma_xbar 13 0 2>; |
359 | dma-names = "tx", "rx"; | 352 | dma-names = "tx", "rx"; |
360 | }; | 353 | }; |
361 | 354 | ||
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index d23e2524d694..6053e75c6e99 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -174,12 +174,54 @@ | |||
174 | }; | 174 | }; |
175 | 175 | ||
176 | edma: edma@49000000 { | 176 | edma: edma@49000000 { |
177 | compatible = "ti,edma3"; | 177 | compatible = "ti,edma3-tpcc"; |
178 | ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; | 178 | ti,hwmods = "tpcc"; |
179 | reg = <0x49000000 0x10000>, | 179 | reg = <0x49000000 0x10000>; |
180 | <0x44e10f90 0x40>; | 180 | reg-names = "edma3_cc"; |
181 | interrupts = <12 13 14>; | 181 | interrupts = <12 13 14>; |
182 | #dma-cells = <1>; | 182 | interrupt-names = "edma3_ccint", "emda3_mperr", |
183 | "edma3_ccerrint"; | ||
184 | dma-requests = <64>; | ||
185 | #dma-cells = <2>; | ||
186 | |||
187 | ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>, | ||
188 | <&edma_tptc2 0>; | ||
189 | |||
190 | ti,edma-memcpy-channels = /bits/ 16 <20 21>; | ||
191 | }; | ||
192 | |||
193 | edma_tptc0: tptc@49800000 { | ||
194 | compatible = "ti,edma3-tptc"; | ||
195 | ti,hwmods = "tptc0"; | ||
196 | reg = <0x49800000 0x100000>; | ||
197 | interrupts = <112>; | ||
198 | interrupt-names = "edma3_tcerrint"; | ||
199 | }; | ||
200 | |||
201 | edma_tptc1: tptc@49900000 { | ||
202 | compatible = "ti,edma3-tptc"; | ||
203 | ti,hwmods = "tptc1"; | ||
204 | reg = <0x49900000 0x100000>; | ||
205 | interrupts = <113>; | ||
206 | interrupt-names = "edma3_tcerrint"; | ||
207 | }; | ||
208 | |||
209 | edma_tptc2: tptc@49a00000 { | ||
210 | compatible = "ti,edma3-tptc"; | ||
211 | ti,hwmods = "tptc2"; | ||
212 | reg = <0x49a00000 0x100000>; | ||
213 | interrupts = <114>; | ||
214 | interrupt-names = "edma3_tcerrint"; | ||
215 | }; | ||
216 | |||
217 | edma_xbar: dma-router@44e10f90 { | ||
218 | compatible = "ti,am335x-edma-crossbar"; | ||
219 | reg = <0x44e10f90 0x40>; | ||
220 | |||
221 | #dma-cells = <3>; | ||
222 | dma-requests = <32>; | ||
223 | |||
224 | dma-masters = <&edma>; | ||
183 | }; | 225 | }; |
184 | 226 | ||
185 | gpio0: gpio@44e07000 { | 227 | gpio0: gpio@44e07000 { |
@@ -233,7 +275,7 @@ | |||
233 | reg = <0x44e09000 0x2000>; | 275 | reg = <0x44e09000 0x2000>; |
234 | interrupts = <72>; | 276 | interrupts = <72>; |
235 | status = "disabled"; | 277 | status = "disabled"; |
236 | dmas = <&edma 26>, <&edma 27>; | 278 | dmas = <&edma 26 0>, <&edma 27 0>; |
237 | dma-names = "tx", "rx"; | 279 | dma-names = "tx", "rx"; |
238 | }; | 280 | }; |
239 | 281 | ||
@@ -244,7 +286,7 @@ | |||
244 | reg = <0x48022000 0x2000>; | 286 | reg = <0x48022000 0x2000>; |
245 | interrupts = <73>; | 287 | interrupts = <73>; |
246 | status = "disabled"; | 288 | status = "disabled"; |
247 | dmas = <&edma 28>, <&edma 29>; | 289 | dmas = <&edma 28 0>, <&edma 29 0>; |
248 | dma-names = "tx", "rx"; | 290 | dma-names = "tx", "rx"; |
249 | }; | 291 | }; |
250 | 292 | ||
@@ -255,7 +297,7 @@ | |||
255 | reg = <0x48024000 0x2000>; | 297 | reg = <0x48024000 0x2000>; |
256 | interrupts = <74>; | 298 | interrupts = <74>; |
257 | status = "disabled"; | 299 | status = "disabled"; |
258 | dmas = <&edma 30>, <&edma 31>; | 300 | dmas = <&edma 30 0>, <&edma 31 0>; |
259 | dma-names = "tx", "rx"; | 301 | dma-names = "tx", "rx"; |
260 | }; | 302 | }; |
261 | 303 | ||
@@ -322,8 +364,8 @@ | |||
322 | ti,dual-volt; | 364 | ti,dual-volt; |
323 | ti,needs-special-reset; | 365 | ti,needs-special-reset; |
324 | ti,needs-special-hs-handling; | 366 | ti,needs-special-hs-handling; |
325 | dmas = <&edma 24 | 367 | dmas = <&edma_xbar 24 0 0 |
326 | &edma 25>; | 368 | &edma_xbar 25 0 0>; |
327 | dma-names = "tx", "rx"; | 369 | dma-names = "tx", "rx"; |
328 | interrupts = <64>; | 370 | interrupts = <64>; |
329 | interrupt-parent = <&intc>; | 371 | interrupt-parent = <&intc>; |
@@ -335,8 +377,8 @@ | |||
335 | compatible = "ti,omap4-hsmmc"; | 377 | compatible = "ti,omap4-hsmmc"; |
336 | ti,hwmods = "mmc2"; | 378 | ti,hwmods = "mmc2"; |
337 | ti,needs-special-reset; | 379 | ti,needs-special-reset; |
338 | dmas = <&edma 2 | 380 | dmas = <&edma 2 0 |
339 | &edma 3>; | 381 | &edma 3 0>; |
340 | dma-names = "tx", "rx"; | 382 | dma-names = "tx", "rx"; |
341 | interrupts = <28>; | 383 | interrupts = <28>; |
342 | interrupt-parent = <&intc>; | 384 | interrupt-parent = <&intc>; |
@@ -474,10 +516,10 @@ | |||
474 | interrupts = <65>; | 516 | interrupts = <65>; |
475 | ti,spi-num-cs = <2>; | 517 | ti,spi-num-cs = <2>; |
476 | ti,hwmods = "spi0"; | 518 | ti,hwmods = "spi0"; |
477 | dmas = <&edma 16 | 519 | dmas = <&edma 16 0 |
478 | &edma 17 | 520 | &edma 17 0 |
479 | &edma 18 | 521 | &edma 18 0 |
480 | &edma 19>; | 522 | &edma 19 0>; |
481 | dma-names = "tx0", "rx0", "tx1", "rx1"; | 523 | dma-names = "tx0", "rx0", "tx1", "rx1"; |
482 | status = "disabled"; | 524 | status = "disabled"; |
483 | }; | 525 | }; |
@@ -490,10 +532,10 @@ | |||
490 | interrupts = <125>; | 532 | interrupts = <125>; |
491 | ti,spi-num-cs = <2>; | 533 | ti,spi-num-cs = <2>; |
492 | ti,hwmods = "spi1"; | 534 | ti,hwmods = "spi1"; |
493 | dmas = <&edma 42 | 535 | dmas = <&edma 42 0 |
494 | &edma 43 | 536 | &edma 43 0 |
495 | &edma 44 | 537 | &edma 44 0 |
496 | &edma 45>; | 538 | &edma 45 0>; |
497 | dma-names = "tx0", "rx0", "tx1", "rx1"; | 539 | dma-names = "tx0", "rx0", "tx1", "rx1"; |
498 | status = "disabled"; | 540 | status = "disabled"; |
499 | }; | 541 | }; |
@@ -831,7 +873,7 @@ | |||
831 | ti,hwmods = "sham"; | 873 | ti,hwmods = "sham"; |
832 | reg = <0x53100000 0x200>; | 874 | reg = <0x53100000 0x200>; |
833 | interrupts = <109>; | 875 | interrupts = <109>; |
834 | dmas = <&edma 36>; | 876 | dmas = <&edma 36 0>; |
835 | dma-names = "rx"; | 877 | dma-names = "rx"; |
836 | }; | 878 | }; |
837 | 879 | ||
@@ -840,8 +882,8 @@ | |||
840 | ti,hwmods = "aes"; | 882 | ti,hwmods = "aes"; |
841 | reg = <0x53500000 0xa0>; | 883 | reg = <0x53500000 0xa0>; |
842 | interrupts = <103>; | 884 | interrupts = <103>; |
843 | dmas = <&edma 6>, | 885 | dmas = <&edma 6 0>, |
844 | <&edma 5>; | 886 | <&edma 5 0>; |
845 | dma-names = "tx", "rx"; | 887 | dma-names = "tx", "rx"; |
846 | }; | 888 | }; |
847 | 889 | ||
@@ -854,8 +896,8 @@ | |||
854 | interrupts = <80>, <81>; | 896 | interrupts = <80>, <81>; |
855 | interrupt-names = "tx", "rx"; | 897 | interrupt-names = "tx", "rx"; |
856 | status = "disabled"; | 898 | status = "disabled"; |
857 | dmas = <&edma 8>, | 899 | dmas = <&edma 8 2>, |
858 | <&edma 9>; | 900 | <&edma 9 2>; |
859 | dma-names = "tx", "rx"; | 901 | dma-names = "tx", "rx"; |
860 | }; | 902 | }; |
861 | 903 | ||
@@ -868,8 +910,8 @@ | |||
868 | interrupts = <82>, <83>; | 910 | interrupts = <82>, <83>; |
869 | interrupt-names = "tx", "rx"; | 911 | interrupt-names = "tx", "rx"; |
870 | status = "disabled"; | 912 | status = "disabled"; |
871 | dmas = <&edma 10>, | 913 | dmas = <&edma 10 2>, |
872 | <&edma 11>; | 914 | <&edma 11 2>; |
873 | dma-names = "tx", "rx"; | 915 | dma-names = "tx", "rx"; |
874 | }; | 916 | }; |
875 | 917 | ||
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 0447c04a40cc..461548ed69fd 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -183,14 +183,56 @@ | |||
183 | }; | 183 | }; |
184 | 184 | ||
185 | edma: edma@49000000 { | 185 | edma: edma@49000000 { |
186 | compatible = "ti,edma3"; | 186 | compatible = "ti,edma3-tpcc"; |
187 | ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; | 187 | ti,hwmods = "tpcc"; |
188 | reg = <0x49000000 0x10000>, | 188 | reg = <0x49000000 0x10000>; |
189 | <0x44e10f90 0x10>; | 189 | reg-names = "edma3_cc"; |
190 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, | 190 | interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, |
191 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, | 191 | <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, |
192 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; | 192 | <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; |
193 | #dma-cells = <1>; | 193 | interrupt-names = "edma3_ccint", "emda3_mperr", |
194 | "edma3_ccerrint"; | ||
195 | dma-requests = <64>; | ||
196 | #dma-cells = <2>; | ||
197 | |||
198 | ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 5>, | ||
199 | <&edma_tptc2 0>; | ||
200 | |||
201 | ti,edma-memcpy-channels = /bits/ 16 <32 33>; | ||
202 | }; | ||
203 | |||
204 | edma_tptc0: tptc@49800000 { | ||
205 | compatible = "ti,edma3-tptc"; | ||
206 | ti,hwmods = "tptc0"; | ||
207 | reg = <0x49800000 0x100000>; | ||
208 | interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>; | ||
209 | interrupt-names = "edma3_tcerrint"; | ||
210 | }; | ||
211 | |||
212 | edma_tptc1: tptc@49900000 { | ||
213 | compatible = "ti,edma3-tptc"; | ||
214 | ti,hwmods = "tptc1"; | ||
215 | reg = <0x49900000 0x100000>; | ||
216 | interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; | ||
217 | interrupt-names = "edma3_tcerrint"; | ||
218 | }; | ||
219 | |||
220 | edma_tptc2: tptc@49a00000 { | ||
221 | compatible = "ti,edma3-tptc"; | ||
222 | ti,hwmods = "tptc2"; | ||
223 | reg = <0x49a00000 0x100000>; | ||
224 | interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; | ||
225 | interrupt-names = "edma3_tcerrint"; | ||
226 | }; | ||
227 | |||
228 | edma_xbar: dma-router@44e10f90 { | ||
229 | compatible = "ti,am335x-edma-crossbar"; | ||
230 | reg = <0x44e10f90 0x40>; | ||
231 | |||
232 | #dma-cells = <3>; | ||
233 | dma-requests = <64>; | ||
234 | |||
235 | dma-masters = <&edma>; | ||
194 | }; | 236 | }; |
195 | 237 | ||
196 | uart0: serial@44e09000 { | 238 | uart0: serial@44e09000 { |
@@ -495,8 +537,8 @@ | |||
495 | ti,hwmods = "mmc1"; | 537 | ti,hwmods = "mmc1"; |
496 | ti,dual-volt; | 538 | ti,dual-volt; |
497 | ti,needs-special-reset; | 539 | ti,needs-special-reset; |
498 | dmas = <&edma 24 | 540 | dmas = <&edma 24 0>, |
499 | &edma 25>; | 541 | <&edma 25 0>; |
500 | dma-names = "tx", "rx"; | 542 | dma-names = "tx", "rx"; |
501 | interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; | 543 | interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; |
502 | status = "disabled"; | 544 | status = "disabled"; |
@@ -507,8 +549,8 @@ | |||
507 | reg = <0x481d8000 0x1000>; | 549 | reg = <0x481d8000 0x1000>; |
508 | ti,hwmods = "mmc2"; | 550 | ti,hwmods = "mmc2"; |
509 | ti,needs-special-reset; | 551 | ti,needs-special-reset; |
510 | dmas = <&edma 2 | 552 | dmas = <&edma 2 0>, |
511 | &edma 3>; | 553 | <&edma 3 0>; |
512 | dma-names = "tx", "rx"; | 554 | dma-names = "tx", "rx"; |
513 | interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; | 555 | interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; |
514 | status = "disabled"; | 556 | status = "disabled"; |
@@ -775,7 +817,7 @@ | |||
775 | compatible = "ti,omap5-sham"; | 817 | compatible = "ti,omap5-sham"; |
776 | ti,hwmods = "sham"; | 818 | ti,hwmods = "sham"; |
777 | reg = <0x53100000 0x300>; | 819 | reg = <0x53100000 0x300>; |
778 | dmas = <&edma 36>; | 820 | dmas = <&edma 36 0>; |
779 | dma-names = "rx"; | 821 | dma-names = "rx"; |
780 | interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>; | 822 | interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>; |
781 | }; | 823 | }; |
@@ -785,8 +827,8 @@ | |||
785 | ti,hwmods = "aes"; | 827 | ti,hwmods = "aes"; |
786 | reg = <0x53501000 0xa0>; | 828 | reg = <0x53501000 0xa0>; |
787 | interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; | 829 | interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; |
788 | dmas = <&edma 6 | 830 | dmas = <&edma 6 0>, |
789 | &edma 5>; | 831 | <&edma 5 0>; |
790 | dma-names = "tx", "rx"; | 832 | dma-names = "tx", "rx"; |
791 | }; | 833 | }; |
792 | 834 | ||
@@ -795,8 +837,8 @@ | |||
795 | ti,hwmods = "des"; | 837 | ti,hwmods = "des"; |
796 | reg = <0x53701000 0xa0>; | 838 | reg = <0x53701000 0xa0>; |
797 | interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>; | 839 | interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>; |
798 | dmas = <&edma 34 | 840 | dmas = <&edma 34 0>, |
799 | &edma 33>; | 841 | <&edma 33 0>; |
800 | dma-names = "tx", "rx"; | 842 | dma-names = "tx", "rx"; |
801 | }; | 843 | }; |
802 | 844 | ||
@@ -809,8 +851,8 @@ | |||
809 | interrupts = <80>, <81>; | 851 | interrupts = <80>, <81>; |
810 | interrupt-names = "tx", "rx"; | 852 | interrupt-names = "tx", "rx"; |
811 | status = "disabled"; | 853 | status = "disabled"; |
812 | dmas = <&edma 8>, | 854 | dmas = <&edma 8 2>, |
813 | <&edma 9>; | 855 | <&edma 9 2>; |
814 | dma-names = "tx", "rx"; | 856 | dma-names = "tx", "rx"; |
815 | }; | 857 | }; |
816 | 858 | ||
@@ -823,8 +865,8 @@ | |||
823 | interrupts = <82>, <83>; | 865 | interrupts = <82>, <83>; |
824 | interrupt-names = "tx", "rx"; | 866 | interrupt-names = "tx", "rx"; |
825 | status = "disabled"; | 867 | status = "disabled"; |
826 | dmas = <&edma 10>, | 868 | dmas = <&edma 10 2>, |
827 | <&edma 11>; | 869 | <&edma 11 2>; |
828 | dma-names = "tx", "rx"; | 870 | dma-names = "tx", "rx"; |
829 | }; | 871 | }; |
830 | 872 | ||
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index 22038f21f228..28e3b252c08c 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts | |||
@@ -711,8 +711,8 @@ | |||
711 | status = "okay"; | 711 | status = "okay"; |
712 | /* these are on the crossbar and are outlined in the | 712 | /* these are on the crossbar and are outlined in the |
713 | xbar-event-map element */ | 713 | xbar-event-map element */ |
714 | dmas = <&edma 30 | 714 | dmas = <&edma_xbar 30 0 1>, |
715 | &edma 31>; | 715 | <&edma_xbar 31 0 2>; |
716 | dma-names = "tx", "rx"; | 716 | dma-names = "tx", "rx"; |
717 | vmmc-supply = <&vmmcwl_fixed>; | 717 | vmmc-supply = <&vmmcwl_fixed>; |
718 | bus-width = <4>; | 718 | bus-width = <4>; |
@@ -733,11 +733,6 @@ | |||
733 | }; | 733 | }; |
734 | }; | 734 | }; |
735 | 735 | ||
736 | &edma { | ||
737 | ti,edma-xbar-event-map = /bits/ 16 <1 30 | ||
738 | 2 31>; | ||
739 | }; | ||
740 | |||
741 | &uart3 { | 736 | &uart3 { |
742 | status = "okay"; | 737 | status = "okay"; |
743 | pinctrl-names = "default"; | 738 | pinctrl-names = "default"; |
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index c3a4e9ceba34..9353184d730d 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -17,6 +17,3 @@ config SHARP_PARAM | |||
17 | 17 | ||
18 | config SHARP_SCOOP | 18 | config SHARP_SCOOP |
19 | bool | 19 | bool |
20 | |||
21 | config TI_PRIV_EDMA | ||
22 | bool | ||
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 6ee5959a813b..27f23b15b1ea 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile | |||
@@ -15,6 +15,5 @@ obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o | |||
15 | CFLAGS_REMOVE_mcpm_entry.o = -pg | 15 | CFLAGS_REMOVE_mcpm_entry.o = -pg |
16 | AFLAGS_mcpm_head.o := -march=armv7-a | 16 | AFLAGS_mcpm_head.o := -march=armv7-a |
17 | AFLAGS_vlock.o := -march=armv7-a | 17 | AFLAGS_vlock.o := -march=armv7-a |
18 | obj-$(CONFIG_TI_PRIV_EDMA) += edma.o | ||
19 | obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o | 18 | obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o |
20 | obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o | 19 | obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c deleted file mode 100644 index 873dbfcc7dc9..000000000000 --- a/arch/arm/common/edma.c +++ /dev/null | |||
@@ -1,1876 +0,0 @@ | |||
1 | /* | ||
2 | * EDMA3 support for DaVinci | ||
3 | * | ||
4 | * Copyright (C) 2006-2009 Texas Instruments. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/edma.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/of_address.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/of_dma.h> | ||
33 | #include <linux/of_irq.h> | ||
34 | #include <linux/pm_runtime.h> | ||
35 | |||
36 | #include <linux/platform_data/edma.h> | ||
37 | |||
38 | /* Offsets matching "struct edmacc_param" */ | ||
39 | #define PARM_OPT 0x00 | ||
40 | #define PARM_SRC 0x04 | ||
41 | #define PARM_A_B_CNT 0x08 | ||
42 | #define PARM_DST 0x0c | ||
43 | #define PARM_SRC_DST_BIDX 0x10 | ||
44 | #define PARM_LINK_BCNTRLD 0x14 | ||
45 | #define PARM_SRC_DST_CIDX 0x18 | ||
46 | #define PARM_CCNT 0x1c | ||
47 | |||
48 | #define PARM_SIZE 0x20 | ||
49 | |||
50 | /* Offsets for EDMA CC global channel registers and their shadows */ | ||
51 | #define SH_ER 0x00 /* 64 bits */ | ||
52 | #define SH_ECR 0x08 /* 64 bits */ | ||
53 | #define SH_ESR 0x10 /* 64 bits */ | ||
54 | #define SH_CER 0x18 /* 64 bits */ | ||
55 | #define SH_EER 0x20 /* 64 bits */ | ||
56 | #define SH_EECR 0x28 /* 64 bits */ | ||
57 | #define SH_EESR 0x30 /* 64 bits */ | ||
58 | #define SH_SER 0x38 /* 64 bits */ | ||
59 | #define SH_SECR 0x40 /* 64 bits */ | ||
60 | #define SH_IER 0x50 /* 64 bits */ | ||
61 | #define SH_IECR 0x58 /* 64 bits */ | ||
62 | #define SH_IESR 0x60 /* 64 bits */ | ||
63 | #define SH_IPR 0x68 /* 64 bits */ | ||
64 | #define SH_ICR 0x70 /* 64 bits */ | ||
65 | #define SH_IEVAL 0x78 | ||
66 | #define SH_QER 0x80 | ||
67 | #define SH_QEER 0x84 | ||
68 | #define SH_QEECR 0x88 | ||
69 | #define SH_QEESR 0x8c | ||
70 | #define SH_QSER 0x90 | ||
71 | #define SH_QSECR 0x94 | ||
72 | #define SH_SIZE 0x200 | ||
73 | |||
74 | /* Offsets for EDMA CC global registers */ | ||
75 | #define EDMA_REV 0x0000 | ||
76 | #define EDMA_CCCFG 0x0004 | ||
77 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ | ||
78 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ | ||
79 | #define EDMA_QDMAQNUM 0x0260 | ||
80 | #define EDMA_QUETCMAP 0x0280 | ||
81 | #define EDMA_QUEPRI 0x0284 | ||
82 | #define EDMA_EMR 0x0300 /* 64 bits */ | ||
83 | #define EDMA_EMCR 0x0308 /* 64 bits */ | ||
84 | #define EDMA_QEMR 0x0310 | ||
85 | #define EDMA_QEMCR 0x0314 | ||
86 | #define EDMA_CCERR 0x0318 | ||
87 | #define EDMA_CCERRCLR 0x031c | ||
88 | #define EDMA_EEVAL 0x0320 | ||
89 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ | ||
90 | #define EDMA_QRAE 0x0380 /* 4 registers */ | ||
91 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ | ||
92 | #define EDMA_QSTAT 0x0600 /* 2 registers */ | ||
93 | #define EDMA_QWMTHRA 0x0620 | ||
94 | #define EDMA_QWMTHRB 0x0624 | ||
95 | #define EDMA_CCSTAT 0x0640 | ||
96 | |||
97 | #define EDMA_M 0x1000 /* global channel registers */ | ||
98 | #define EDMA_ECR 0x1008 | ||
99 | #define EDMA_ECRH 0x100C | ||
100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ | ||
101 | #define EDMA_PARM 0x4000 /* 128 param entries */ | ||
102 | |||
103 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | ||
104 | |||
105 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | ||
106 | |||
107 | /* CCCFG register */ | ||
108 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | ||
109 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | ||
110 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | ||
111 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | ||
112 | #define CHMAP_EXIST BIT(24) | ||
113 | |||
114 | #define EDMA_MAX_DMACH 64 | ||
115 | #define EDMA_MAX_PARAMENTRY 512 | ||
116 | |||
117 | /*****************************************************************************/ | ||
118 | |||
119 | static void __iomem *edmacc_regs_base[EDMA_MAX_CC]; | ||
120 | |||
121 | static inline unsigned int edma_read(unsigned ctlr, int offset) | ||
122 | { | ||
123 | return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset); | ||
124 | } | ||
125 | |||
126 | static inline void edma_write(unsigned ctlr, int offset, int val) | ||
127 | { | ||
128 | __raw_writel(val, edmacc_regs_base[ctlr] + offset); | ||
129 | } | ||
130 | static inline void edma_modify(unsigned ctlr, int offset, unsigned and, | ||
131 | unsigned or) | ||
132 | { | ||
133 | unsigned val = edma_read(ctlr, offset); | ||
134 | val &= and; | ||
135 | val |= or; | ||
136 | edma_write(ctlr, offset, val); | ||
137 | } | ||
138 | static inline void edma_and(unsigned ctlr, int offset, unsigned and) | ||
139 | { | ||
140 | unsigned val = edma_read(ctlr, offset); | ||
141 | val &= and; | ||
142 | edma_write(ctlr, offset, val); | ||
143 | } | ||
144 | static inline void edma_or(unsigned ctlr, int offset, unsigned or) | ||
145 | { | ||
146 | unsigned val = edma_read(ctlr, offset); | ||
147 | val |= or; | ||
148 | edma_write(ctlr, offset, val); | ||
149 | } | ||
150 | static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i) | ||
151 | { | ||
152 | return edma_read(ctlr, offset + (i << 2)); | ||
153 | } | ||
154 | static inline void edma_write_array(unsigned ctlr, int offset, int i, | ||
155 | unsigned val) | ||
156 | { | ||
157 | edma_write(ctlr, offset + (i << 2), val); | ||
158 | } | ||
159 | static inline void edma_modify_array(unsigned ctlr, int offset, int i, | ||
160 | unsigned and, unsigned or) | ||
161 | { | ||
162 | edma_modify(ctlr, offset + (i << 2), and, or); | ||
163 | } | ||
164 | static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or) | ||
165 | { | ||
166 | edma_or(ctlr, offset + (i << 2), or); | ||
167 | } | ||
168 | static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j, | ||
169 | unsigned or) | ||
170 | { | ||
171 | edma_or(ctlr, offset + ((i*2 + j) << 2), or); | ||
172 | } | ||
173 | static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j, | ||
174 | unsigned val) | ||
175 | { | ||
176 | edma_write(ctlr, offset + ((i*2 + j) << 2), val); | ||
177 | } | ||
178 | static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset) | ||
179 | { | ||
180 | return edma_read(ctlr, EDMA_SHADOW0 + offset); | ||
181 | } | ||
182 | static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset, | ||
183 | int i) | ||
184 | { | ||
185 | return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2)); | ||
186 | } | ||
187 | static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val) | ||
188 | { | ||
189 | edma_write(ctlr, EDMA_SHADOW0 + offset, val); | ||
190 | } | ||
191 | static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i, | ||
192 | unsigned val) | ||
193 | { | ||
194 | edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val); | ||
195 | } | ||
196 | static inline unsigned int edma_parm_read(unsigned ctlr, int offset, | ||
197 | int param_no) | ||
198 | { | ||
199 | return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5)); | ||
200 | } | ||
201 | static inline void edma_parm_write(unsigned ctlr, int offset, int param_no, | ||
202 | unsigned val) | ||
203 | { | ||
204 | edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val); | ||
205 | } | ||
206 | static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no, | ||
207 | unsigned and, unsigned or) | ||
208 | { | ||
209 | edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or); | ||
210 | } | ||
211 | static inline void edma_parm_and(unsigned ctlr, int offset, int param_no, | ||
212 | unsigned and) | ||
213 | { | ||
214 | edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and); | ||
215 | } | ||
216 | static inline void edma_parm_or(unsigned ctlr, int offset, int param_no, | ||
217 | unsigned or) | ||
218 | { | ||
219 | edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or); | ||
220 | } | ||
221 | |||
222 | static inline void set_bits(int offset, int len, unsigned long *p) | ||
223 | { | ||
224 | for (; len > 0; len--) | ||
225 | set_bit(offset + (len - 1), p); | ||
226 | } | ||
227 | |||
228 | static inline void clear_bits(int offset, int len, unsigned long *p) | ||
229 | { | ||
230 | for (; len > 0; len--) | ||
231 | clear_bit(offset + (len - 1), p); | ||
232 | } | ||
233 | |||
234 | /*****************************************************************************/ | ||
235 | |||
236 | /* actual number of DMA channels and slots on this silicon */ | ||
237 | struct edma { | ||
238 | /* how many dma resources of each type */ | ||
239 | unsigned num_channels; | ||
240 | unsigned num_region; | ||
241 | unsigned num_slots; | ||
242 | unsigned num_tc; | ||
243 | enum dma_event_q default_queue; | ||
244 | |||
245 | /* list of channels with no even trigger; terminated by "-1" */ | ||
246 | const s8 *noevent; | ||
247 | |||
248 | struct edma_soc_info *info; | ||
249 | |||
250 | /* The edma_inuse bit for each PaRAM slot is clear unless the | ||
251 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | ||
252 | */ | ||
253 | DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
254 | |||
255 | /* The edma_unused bit for each channel is clear unless | ||
256 | * it is not being used on this platform. It uses a bit | ||
257 | * of SOC-specific initialization code. | ||
258 | */ | ||
259 | DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH); | ||
260 | |||
261 | unsigned irq_res_start; | ||
262 | unsigned irq_res_end; | ||
263 | |||
264 | struct dma_interrupt_data { | ||
265 | void (*callback)(unsigned channel, unsigned short ch_status, | ||
266 | void *data); | ||
267 | void *data; | ||
268 | } intr_data[EDMA_MAX_DMACH]; | ||
269 | }; | ||
270 | |||
271 | static struct edma *edma_cc[EDMA_MAX_CC]; | ||
272 | static int arch_num_cc; | ||
273 | |||
274 | /* dummy param set used to (re)initialize parameter RAM slots */ | ||
275 | static const struct edmacc_param dummy_paramset = { | ||
276 | .link_bcntrld = 0xffff, | ||
277 | .ccnt = 1, | ||
278 | }; | ||
279 | |||
280 | static const struct of_device_id edma_of_ids[] = { | ||
281 | { .compatible = "ti,edma3", }, | ||
282 | {} | ||
283 | }; | ||
284 | |||
285 | /*****************************************************************************/ | ||
286 | |||
287 | static void map_dmach_queue(unsigned ctlr, unsigned ch_no, | ||
288 | enum dma_event_q queue_no) | ||
289 | { | ||
290 | int bit = (ch_no & 0x7) * 4; | ||
291 | |||
292 | /* default to low priority queue */ | ||
293 | if (queue_no == EVENTQ_DEFAULT) | ||
294 | queue_no = edma_cc[ctlr]->default_queue; | ||
295 | |||
296 | queue_no &= 7; | ||
297 | edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3), | ||
298 | ~(0x7 << bit), queue_no << bit); | ||
299 | } | ||
300 | |||
301 | static void assign_priority_to_queue(unsigned ctlr, int queue_no, | ||
302 | int priority) | ||
303 | { | ||
304 | int bit = queue_no * 4; | ||
305 | edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit), | ||
306 | ((priority & 0x7) << bit)); | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * map_dmach_param - Maps channel number to param entry number | ||
311 | * | ||
312 | * This maps the dma channel number to param entry numberter. In | ||
313 | * other words using the DMA channel mapping registers a param entry | ||
314 | * can be mapped to any channel | ||
315 | * | ||
316 | * Callers are responsible for ensuring the channel mapping logic is | ||
317 | * included in that particular EDMA variant (Eg : dm646x) | ||
318 | * | ||
319 | */ | ||
320 | static void map_dmach_param(unsigned ctlr) | ||
321 | { | ||
322 | int i; | ||
323 | for (i = 0; i < EDMA_MAX_DMACH; i++) | ||
324 | edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5)); | ||
325 | } | ||
326 | |||
327 | static inline void | ||
328 | setup_dma_interrupt(unsigned lch, | ||
329 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
330 | void *data) | ||
331 | { | ||
332 | unsigned ctlr; | ||
333 | |||
334 | ctlr = EDMA_CTLR(lch); | ||
335 | lch = EDMA_CHAN_SLOT(lch); | ||
336 | |||
337 | if (!callback) | ||
338 | edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5, | ||
339 | BIT(lch & 0x1f)); | ||
340 | |||
341 | edma_cc[ctlr]->intr_data[lch].callback = callback; | ||
342 | edma_cc[ctlr]->intr_data[lch].data = data; | ||
343 | |||
344 | if (callback) { | ||
345 | edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5, | ||
346 | BIT(lch & 0x1f)); | ||
347 | edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5, | ||
348 | BIT(lch & 0x1f)); | ||
349 | } | ||
350 | } | ||
351 | |||
352 | static int irq2ctlr(int irq) | ||
353 | { | ||
354 | if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end) | ||
355 | return 0; | ||
356 | else if (irq >= edma_cc[1]->irq_res_start && | ||
357 | irq <= edma_cc[1]->irq_res_end) | ||
358 | return 1; | ||
359 | |||
360 | return -1; | ||
361 | } | ||
362 | |||
363 | /****************************************************************************** | ||
364 | * | ||
365 | * DMA interrupt handler | ||
366 | * | ||
367 | *****************************************************************************/ | ||
368 | static irqreturn_t dma_irq_handler(int irq, void *data) | ||
369 | { | ||
370 | int ctlr; | ||
371 | u32 sh_ier; | ||
372 | u32 sh_ipr; | ||
373 | u32 bank; | ||
374 | |||
375 | ctlr = irq2ctlr(irq); | ||
376 | if (ctlr < 0) | ||
377 | return IRQ_NONE; | ||
378 | |||
379 | dev_dbg(data, "dma_irq_handler\n"); | ||
380 | |||
381 | sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0); | ||
382 | if (!sh_ipr) { | ||
383 | sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1); | ||
384 | if (!sh_ipr) | ||
385 | return IRQ_NONE; | ||
386 | sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1); | ||
387 | bank = 1; | ||
388 | } else { | ||
389 | sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0); | ||
390 | bank = 0; | ||
391 | } | ||
392 | |||
393 | do { | ||
394 | u32 slot; | ||
395 | u32 channel; | ||
396 | |||
397 | dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr); | ||
398 | |||
399 | slot = __ffs(sh_ipr); | ||
400 | sh_ipr &= ~(BIT(slot)); | ||
401 | |||
402 | if (sh_ier & BIT(slot)) { | ||
403 | channel = (bank << 5) | slot; | ||
404 | /* Clear the corresponding IPR bits */ | ||
405 | edma_shadow0_write_array(ctlr, SH_ICR, bank, | ||
406 | BIT(slot)); | ||
407 | if (edma_cc[ctlr]->intr_data[channel].callback) | ||
408 | edma_cc[ctlr]->intr_data[channel].callback( | ||
409 | channel, EDMA_DMA_COMPLETE, | ||
410 | edma_cc[ctlr]->intr_data[channel].data); | ||
411 | } | ||
412 | } while (sh_ipr); | ||
413 | |||
414 | edma_shadow0_write(ctlr, SH_IEVAL, 1); | ||
415 | return IRQ_HANDLED; | ||
416 | } | ||
417 | |||
418 | /****************************************************************************** | ||
419 | * | ||
420 | * DMA error interrupt handler | ||
421 | * | ||
422 | *****************************************************************************/ | ||
423 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | ||
424 | { | ||
425 | int i; | ||
426 | int ctlr; | ||
427 | unsigned int cnt = 0; | ||
428 | |||
429 | ctlr = irq2ctlr(irq); | ||
430 | if (ctlr < 0) | ||
431 | return IRQ_NONE; | ||
432 | |||
433 | dev_dbg(data, "dma_ccerr_handler\n"); | ||
434 | |||
435 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && | ||
436 | (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && | ||
437 | (edma_read(ctlr, EDMA_QEMR) == 0) && | ||
438 | (edma_read(ctlr, EDMA_CCERR) == 0)) | ||
439 | return IRQ_NONE; | ||
440 | |||
441 | while (1) { | ||
442 | int j = -1; | ||
443 | if (edma_read_array(ctlr, EDMA_EMR, 0)) | ||
444 | j = 0; | ||
445 | else if (edma_read_array(ctlr, EDMA_EMR, 1)) | ||
446 | j = 1; | ||
447 | if (j >= 0) { | ||
448 | dev_dbg(data, "EMR%d %08x\n", j, | ||
449 | edma_read_array(ctlr, EDMA_EMR, j)); | ||
450 | for (i = 0; i < 32; i++) { | ||
451 | int k = (j << 5) + i; | ||
452 | if (edma_read_array(ctlr, EDMA_EMR, j) & | ||
453 | BIT(i)) { | ||
454 | /* Clear the corresponding EMR bits */ | ||
455 | edma_write_array(ctlr, EDMA_EMCR, j, | ||
456 | BIT(i)); | ||
457 | /* Clear any SER */ | ||
458 | edma_shadow0_write_array(ctlr, SH_SECR, | ||
459 | j, BIT(i)); | ||
460 | if (edma_cc[ctlr]->intr_data[k]. | ||
461 | callback) { | ||
462 | edma_cc[ctlr]->intr_data[k]. | ||
463 | callback(k, | ||
464 | EDMA_DMA_CC_ERROR, | ||
465 | edma_cc[ctlr]->intr_data | ||
466 | [k].data); | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | } else if (edma_read(ctlr, EDMA_QEMR)) { | ||
471 | dev_dbg(data, "QEMR %02x\n", | ||
472 | edma_read(ctlr, EDMA_QEMR)); | ||
473 | for (i = 0; i < 8; i++) { | ||
474 | if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) { | ||
475 | /* Clear the corresponding IPR bits */ | ||
476 | edma_write(ctlr, EDMA_QEMCR, BIT(i)); | ||
477 | edma_shadow0_write(ctlr, SH_QSECR, | ||
478 | BIT(i)); | ||
479 | |||
480 | /* NOTE: not reported!! */ | ||
481 | } | ||
482 | } | ||
483 | } else if (edma_read(ctlr, EDMA_CCERR)) { | ||
484 | dev_dbg(data, "CCERR %08x\n", | ||
485 | edma_read(ctlr, EDMA_CCERR)); | ||
486 | /* FIXME: CCERR.BIT(16) ignored! much better | ||
487 | * to just write CCERRCLR with CCERR value... | ||
488 | */ | ||
489 | for (i = 0; i < 8; i++) { | ||
490 | if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) { | ||
491 | /* Clear the corresponding IPR bits */ | ||
492 | edma_write(ctlr, EDMA_CCERRCLR, BIT(i)); | ||
493 | |||
494 | /* NOTE: not reported!! */ | ||
495 | } | ||
496 | } | ||
497 | } | ||
498 | if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) && | ||
499 | (edma_read_array(ctlr, EDMA_EMR, 1) == 0) && | ||
500 | (edma_read(ctlr, EDMA_QEMR) == 0) && | ||
501 | (edma_read(ctlr, EDMA_CCERR) == 0)) | ||
502 | break; | ||
503 | cnt++; | ||
504 | if (cnt > 10) | ||
505 | break; | ||
506 | } | ||
507 | edma_write(ctlr, EDMA_EEVAL, 1); | ||
508 | return IRQ_HANDLED; | ||
509 | } | ||
510 | |||
511 | static int reserve_contiguous_slots(int ctlr, unsigned int id, | ||
512 | unsigned int num_slots, | ||
513 | unsigned int start_slot) | ||
514 | { | ||
515 | int i, j; | ||
516 | unsigned int count = num_slots; | ||
517 | int stop_slot = start_slot; | ||
518 | DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY); | ||
519 | |||
520 | for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) { | ||
521 | j = EDMA_CHAN_SLOT(i); | ||
522 | if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) { | ||
523 | /* Record our current beginning slot */ | ||
524 | if (count == num_slots) | ||
525 | stop_slot = i; | ||
526 | |||
527 | count--; | ||
528 | set_bit(j, tmp_inuse); | ||
529 | |||
530 | if (count == 0) | ||
531 | break; | ||
532 | } else { | ||
533 | clear_bit(j, tmp_inuse); | ||
534 | |||
535 | if (id == EDMA_CONT_PARAMS_FIXED_EXACT) { | ||
536 | stop_slot = i; | ||
537 | break; | ||
538 | } else { | ||
539 | count = num_slots; | ||
540 | } | ||
541 | } | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * We have to clear any bits that we set | ||
546 | * if we run out parameter RAM slots, i.e we do find a set | ||
547 | * of contiguous parameter RAM slots but do not find the exact number | ||
548 | * requested as we may reach the total number of parameter RAM slots | ||
549 | */ | ||
550 | if (i == edma_cc[ctlr]->num_slots) | ||
551 | stop_slot = i; | ||
552 | |||
553 | j = start_slot; | ||
554 | for_each_set_bit_from(j, tmp_inuse, stop_slot) | ||
555 | clear_bit(j, edma_cc[ctlr]->edma_inuse); | ||
556 | |||
557 | if (count) | ||
558 | return -EBUSY; | ||
559 | |||
560 | for (j = i - num_slots + 1; j <= i; ++j) | ||
561 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j), | ||
562 | &dummy_paramset, PARM_SIZE); | ||
563 | |||
564 | return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1); | ||
565 | } | ||
566 | |||
567 | static int prepare_unused_channel_list(struct device *dev, void *data) | ||
568 | { | ||
569 | struct platform_device *pdev = to_platform_device(dev); | ||
570 | int i, count, ctlr; | ||
571 | struct of_phandle_args dma_spec; | ||
572 | |||
573 | if (dev->of_node) { | ||
574 | count = of_property_count_strings(dev->of_node, "dma-names"); | ||
575 | if (count < 0) | ||
576 | return 0; | ||
577 | for (i = 0; i < count; i++) { | ||
578 | if (of_parse_phandle_with_args(dev->of_node, "dmas", | ||
579 | "#dma-cells", i, | ||
580 | &dma_spec)) | ||
581 | continue; | ||
582 | |||
583 | if (!of_match_node(edma_of_ids, dma_spec.np)) { | ||
584 | of_node_put(dma_spec.np); | ||
585 | continue; | ||
586 | } | ||
587 | |||
588 | clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]), | ||
589 | edma_cc[0]->edma_unused); | ||
590 | of_node_put(dma_spec.np); | ||
591 | } | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* For non-OF case */ | ||
596 | for (i = 0; i < pdev->num_resources; i++) { | ||
597 | if ((pdev->resource[i].flags & IORESOURCE_DMA) && | ||
598 | (int)pdev->resource[i].start >= 0) { | ||
599 | ctlr = EDMA_CTLR(pdev->resource[i].start); | ||
600 | clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), | ||
601 | edma_cc[ctlr]->edma_unused); | ||
602 | } | ||
603 | } | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | /*-----------------------------------------------------------------------*/ | ||
609 | |||
610 | static bool unused_chan_list_done; | ||
611 | |||
612 | /* Resource alloc/free: dma channels, parameter RAM slots */ | ||
613 | |||
614 | /** | ||
615 | * edma_alloc_channel - allocate DMA channel and paired parameter RAM | ||
616 | * @channel: specific channel to allocate; negative for "any unmapped channel" | ||
617 | * @callback: optional; to be issued on DMA completion or errors | ||
618 | * @data: passed to callback | ||
619 | * @eventq_no: an EVENTQ_* constant, used to choose which Transfer | ||
620 | * Controller (TC) executes requests using this channel. Use | ||
621 | * EVENTQ_DEFAULT unless you really need a high priority queue. | ||
622 | * | ||
623 | * This allocates a DMA channel and its associated parameter RAM slot. | ||
624 | * The parameter RAM is initialized to hold a dummy transfer. | ||
625 | * | ||
626 | * Normal use is to pass a specific channel number as @channel, to make | ||
627 | * use of hardware events mapped to that channel. When the channel will | ||
628 | * be used only for software triggering or event chaining, channels not | ||
629 | * mapped to hardware events (or mapped to unused events) are preferable. | ||
630 | * | ||
631 | * DMA transfers start from a channel using edma_start(), or by | ||
632 | * chaining. When the transfer described in that channel's parameter RAM | ||
633 | * slot completes, that slot's data may be reloaded through a link. | ||
634 | * | ||
635 | * DMA errors are only reported to the @callback associated with the | ||
636 | * channel driving that transfer, but transfer completion callbacks can | ||
637 | * be sent to another channel under control of the TCC field in | ||
638 | * the option word of the transfer's parameter RAM set. Drivers must not | ||
639 | * use DMA transfer completion callbacks for channels they did not allocate. | ||
640 | * (The same applies to TCC codes used in transfer chaining.) | ||
641 | * | ||
642 | * Returns the number of the channel, else negative errno. | ||
643 | */ | ||
644 | int edma_alloc_channel(int channel, | ||
645 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
646 | void *data, | ||
647 | enum dma_event_q eventq_no) | ||
648 | { | ||
649 | unsigned i, done = 0, ctlr = 0; | ||
650 | int ret = 0; | ||
651 | |||
652 | if (!unused_chan_list_done) { | ||
653 | /* | ||
654 | * Scan all the platform devices to find out the EDMA channels | ||
655 | * used and clear them in the unused list, making the rest | ||
656 | * available for ARM usage. | ||
657 | */ | ||
658 | ret = bus_for_each_dev(&platform_bus_type, NULL, NULL, | ||
659 | prepare_unused_channel_list); | ||
660 | if (ret < 0) | ||
661 | return ret; | ||
662 | |||
663 | unused_chan_list_done = true; | ||
664 | } | ||
665 | |||
666 | if (channel >= 0) { | ||
667 | ctlr = EDMA_CTLR(channel); | ||
668 | channel = EDMA_CHAN_SLOT(channel); | ||
669 | } | ||
670 | |||
671 | if (channel < 0) { | ||
672 | for (i = 0; i < arch_num_cc; i++) { | ||
673 | channel = 0; | ||
674 | for (;;) { | ||
675 | channel = find_next_bit(edma_cc[i]->edma_unused, | ||
676 | edma_cc[i]->num_channels, | ||
677 | channel); | ||
678 | if (channel == edma_cc[i]->num_channels) | ||
679 | break; | ||
680 | if (!test_and_set_bit(channel, | ||
681 | edma_cc[i]->edma_inuse)) { | ||
682 | done = 1; | ||
683 | ctlr = i; | ||
684 | break; | ||
685 | } | ||
686 | channel++; | ||
687 | } | ||
688 | if (done) | ||
689 | break; | ||
690 | } | ||
691 | if (!done) | ||
692 | return -ENOMEM; | ||
693 | } else if (channel >= edma_cc[ctlr]->num_channels) { | ||
694 | return -EINVAL; | ||
695 | } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) { | ||
696 | return -EBUSY; | ||
697 | } | ||
698 | |||
699 | /* ensure access through shadow region 0 */ | ||
700 | edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); | ||
701 | |||
702 | /* ensure no events are pending */ | ||
703 | edma_stop(EDMA_CTLR_CHAN(ctlr, channel)); | ||
704 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), | ||
705 | &dummy_paramset, PARM_SIZE); | ||
706 | |||
707 | if (callback) | ||
708 | setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel), | ||
709 | callback, data); | ||
710 | |||
711 | map_dmach_queue(ctlr, channel, eventq_no); | ||
712 | |||
713 | return EDMA_CTLR_CHAN(ctlr, channel); | ||
714 | } | ||
715 | EXPORT_SYMBOL(edma_alloc_channel); | ||
716 | |||
717 | |||
718 | /** | ||
719 | * edma_free_channel - deallocate DMA channel | ||
720 | * @channel: dma channel returned from edma_alloc_channel() | ||
721 | * | ||
722 | * This deallocates the DMA channel and associated parameter RAM slot | ||
723 | * allocated by edma_alloc_channel(). | ||
724 | * | ||
725 | * Callers are responsible for ensuring the channel is inactive, and | ||
726 | * will not be reactivated by linking, chaining, or software calls to | ||
727 | * edma_start(). | ||
728 | */ | ||
729 | void edma_free_channel(unsigned channel) | ||
730 | { | ||
731 | unsigned ctlr; | ||
732 | |||
733 | ctlr = EDMA_CTLR(channel); | ||
734 | channel = EDMA_CHAN_SLOT(channel); | ||
735 | |||
736 | if (channel >= edma_cc[ctlr]->num_channels) | ||
737 | return; | ||
738 | |||
739 | setup_dma_interrupt(channel, NULL, NULL); | ||
740 | /* REVISIT should probably take out of shadow region 0 */ | ||
741 | |||
742 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel), | ||
743 | &dummy_paramset, PARM_SIZE); | ||
744 | clear_bit(channel, edma_cc[ctlr]->edma_inuse); | ||
745 | } | ||
746 | EXPORT_SYMBOL(edma_free_channel); | ||
747 | |||
748 | /** | ||
749 | * edma_alloc_slot - allocate DMA parameter RAM | ||
750 | * @slot: specific slot to allocate; negative for "any unused slot" | ||
751 | * | ||
752 | * This allocates a parameter RAM slot, initializing it to hold a | ||
753 | * dummy transfer. Slots allocated using this routine have not been | ||
754 | * mapped to a hardware DMA channel, and will normally be used by | ||
755 | * linking to them from a slot associated with a DMA channel. | ||
756 | * | ||
757 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific | ||
758 | * slots may be allocated on behalf of DSP firmware. | ||
759 | * | ||
760 | * Returns the number of the slot, else negative errno. | ||
761 | */ | ||
762 | int edma_alloc_slot(unsigned ctlr, int slot) | ||
763 | { | ||
764 | if (!edma_cc[ctlr]) | ||
765 | return -EINVAL; | ||
766 | |||
767 | if (slot >= 0) | ||
768 | slot = EDMA_CHAN_SLOT(slot); | ||
769 | |||
770 | if (slot < 0) { | ||
771 | slot = edma_cc[ctlr]->num_channels; | ||
772 | for (;;) { | ||
773 | slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse, | ||
774 | edma_cc[ctlr]->num_slots, slot); | ||
775 | if (slot == edma_cc[ctlr]->num_slots) | ||
776 | return -ENOMEM; | ||
777 | if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) | ||
778 | break; | ||
779 | } | ||
780 | } else if (slot < edma_cc[ctlr]->num_channels || | ||
781 | slot >= edma_cc[ctlr]->num_slots) { | ||
782 | return -EINVAL; | ||
783 | } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) { | ||
784 | return -EBUSY; | ||
785 | } | ||
786 | |||
787 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
788 | &dummy_paramset, PARM_SIZE); | ||
789 | |||
790 | return EDMA_CTLR_CHAN(ctlr, slot); | ||
791 | } | ||
792 | EXPORT_SYMBOL(edma_alloc_slot); | ||
793 | |||
794 | /** | ||
795 | * edma_free_slot - deallocate DMA parameter RAM | ||
796 | * @slot: parameter RAM slot returned from edma_alloc_slot() | ||
797 | * | ||
798 | * This deallocates the parameter RAM slot allocated by edma_alloc_slot(). | ||
799 | * Callers are responsible for ensuring the slot is inactive, and will | ||
800 | * not be activated. | ||
801 | */ | ||
802 | void edma_free_slot(unsigned slot) | ||
803 | { | ||
804 | unsigned ctlr; | ||
805 | |||
806 | ctlr = EDMA_CTLR(slot); | ||
807 | slot = EDMA_CHAN_SLOT(slot); | ||
808 | |||
809 | if (slot < edma_cc[ctlr]->num_channels || | ||
810 | slot >= edma_cc[ctlr]->num_slots) | ||
811 | return; | ||
812 | |||
813 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
814 | &dummy_paramset, PARM_SIZE); | ||
815 | clear_bit(slot, edma_cc[ctlr]->edma_inuse); | ||
816 | } | ||
817 | EXPORT_SYMBOL(edma_free_slot); | ||
818 | |||
819 | |||
820 | /** | ||
821 | * edma_alloc_cont_slots- alloc contiguous parameter RAM slots | ||
822 | * The API will return the starting point of a set of | ||
823 | * contiguous parameter RAM slots that have been requested | ||
824 | * | ||
825 | * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT | ||
826 | * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT | ||
827 | * @count: number of contiguous Paramter RAM slots | ||
828 | * @slot - the start value of Parameter RAM slot that should be passed if id | ||
829 | * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT | ||
830 | * | ||
831 | * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of | ||
832 | * contiguous Parameter RAM slots from parameter RAM 64 in the case of | ||
833 | * DaVinci SOCs and 32 in the case of DA8xx SOCs. | ||
834 | * | ||
835 | * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a | ||
836 | * set of contiguous parameter RAM slots from the "slot" that is passed as an | ||
837 | * argument to the API. | ||
838 | * | ||
839 | * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries | ||
840 | * starts looking for a set of contiguous parameter RAMs from the "slot" | ||
841 | * that is passed as an argument to the API. On failure the API will try to | ||
842 | * find a set of contiguous Parameter RAM slots from the remaining Parameter | ||
843 | * RAM slots | ||
844 | */ | ||
845 | int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count) | ||
846 | { | ||
847 | /* | ||
848 | * The start slot requested should be greater than | ||
849 | * the number of channels and lesser than the total number | ||
850 | * of slots | ||
851 | */ | ||
852 | if ((id != EDMA_CONT_PARAMS_ANY) && | ||
853 | (slot < edma_cc[ctlr]->num_channels || | ||
854 | slot >= edma_cc[ctlr]->num_slots)) | ||
855 | return -EINVAL; | ||
856 | |||
857 | /* | ||
858 | * The number of parameter RAM slots requested cannot be less than 1 | ||
859 | * and cannot be more than the number of slots minus the number of | ||
860 | * channels | ||
861 | */ | ||
862 | if (count < 1 || count > | ||
863 | (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels)) | ||
864 | return -EINVAL; | ||
865 | |||
866 | switch (id) { | ||
867 | case EDMA_CONT_PARAMS_ANY: | ||
868 | return reserve_contiguous_slots(ctlr, id, count, | ||
869 | edma_cc[ctlr]->num_channels); | ||
870 | case EDMA_CONT_PARAMS_FIXED_EXACT: | ||
871 | case EDMA_CONT_PARAMS_FIXED_NOT_EXACT: | ||
872 | return reserve_contiguous_slots(ctlr, id, count, slot); | ||
873 | default: | ||
874 | return -EINVAL; | ||
875 | } | ||
876 | |||
877 | } | ||
878 | EXPORT_SYMBOL(edma_alloc_cont_slots); | ||
879 | |||
880 | /** | ||
881 | * edma_free_cont_slots - deallocate DMA parameter RAM slots | ||
882 | * @slot: first parameter RAM of a set of parameter RAM slots to be freed | ||
883 | * @count: the number of contiguous parameter RAM slots to be freed | ||
884 | * | ||
885 | * This deallocates the parameter RAM slots allocated by | ||
886 | * edma_alloc_cont_slots. | ||
887 | * Callers/applications need to keep track of sets of contiguous | ||
888 | * parameter RAM slots that have been allocated using the edma_alloc_cont_slots | ||
889 | * API. | ||
890 | * Callers are responsible for ensuring the slots are inactive, and will | ||
891 | * not be activated. | ||
892 | */ | ||
893 | int edma_free_cont_slots(unsigned slot, int count) | ||
894 | { | ||
895 | unsigned ctlr, slot_to_free; | ||
896 | int i; | ||
897 | |||
898 | ctlr = EDMA_CTLR(slot); | ||
899 | slot = EDMA_CHAN_SLOT(slot); | ||
900 | |||
901 | if (slot < edma_cc[ctlr]->num_channels || | ||
902 | slot >= edma_cc[ctlr]->num_slots || | ||
903 | count < 1) | ||
904 | return -EINVAL; | ||
905 | |||
906 | for (i = slot; i < slot + count; ++i) { | ||
907 | ctlr = EDMA_CTLR(i); | ||
908 | slot_to_free = EDMA_CHAN_SLOT(i); | ||
909 | |||
910 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free), | ||
911 | &dummy_paramset, PARM_SIZE); | ||
912 | clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse); | ||
913 | } | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | EXPORT_SYMBOL(edma_free_cont_slots); | ||
918 | |||
919 | /*-----------------------------------------------------------------------*/ | ||
920 | |||
921 | /* Parameter RAM operations (i) -- read/write partial slots */ | ||
922 | |||
923 | /** | ||
924 | * edma_set_src - set initial DMA source address in parameter RAM slot | ||
925 | * @slot: parameter RAM slot being configured | ||
926 | * @src_port: physical address of source (memory, controller FIFO, etc) | ||
927 | * @addressMode: INCR, except in very rare cases | ||
928 | * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the | ||
929 | * width to use when addressing the fifo (e.g. W8BIT, W32BIT) | ||
930 | * | ||
931 | * Note that the source address is modified during the DMA transfer | ||
932 | * according to edma_set_src_index(). | ||
933 | */ | ||
934 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
935 | enum address_mode mode, enum fifo_width width) | ||
936 | { | ||
937 | unsigned ctlr; | ||
938 | |||
939 | ctlr = EDMA_CTLR(slot); | ||
940 | slot = EDMA_CHAN_SLOT(slot); | ||
941 | |||
942 | if (slot < edma_cc[ctlr]->num_slots) { | ||
943 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
944 | |||
945 | if (mode) { | ||
946 | /* set SAM and program FWID */ | ||
947 | i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8)); | ||
948 | } else { | ||
949 | /* clear SAM */ | ||
950 | i &= ~SAM; | ||
951 | } | ||
952 | edma_parm_write(ctlr, PARM_OPT, slot, i); | ||
953 | |||
954 | /* set the source port address | ||
955 | in source register of param structure */ | ||
956 | edma_parm_write(ctlr, PARM_SRC, slot, src_port); | ||
957 | } | ||
958 | } | ||
959 | EXPORT_SYMBOL(edma_set_src); | ||
960 | |||
961 | /** | ||
962 | * edma_set_dest - set initial DMA destination address in parameter RAM slot | ||
963 | * @slot: parameter RAM slot being configured | ||
964 | * @dest_port: physical address of destination (memory, controller FIFO, etc) | ||
965 | * @addressMode: INCR, except in very rare cases | ||
966 | * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the | ||
967 | * width to use when addressing the fifo (e.g. W8BIT, W32BIT) | ||
968 | * | ||
969 | * Note that the destination address is modified during the DMA transfer | ||
970 | * according to edma_set_dest_index(). | ||
971 | */ | ||
972 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
973 | enum address_mode mode, enum fifo_width width) | ||
974 | { | ||
975 | unsigned ctlr; | ||
976 | |||
977 | ctlr = EDMA_CTLR(slot); | ||
978 | slot = EDMA_CHAN_SLOT(slot); | ||
979 | |||
980 | if (slot < edma_cc[ctlr]->num_slots) { | ||
981 | unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot); | ||
982 | |||
983 | if (mode) { | ||
984 | /* set DAM and program FWID */ | ||
985 | i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8)); | ||
986 | } else { | ||
987 | /* clear DAM */ | ||
988 | i &= ~DAM; | ||
989 | } | ||
990 | edma_parm_write(ctlr, PARM_OPT, slot, i); | ||
991 | /* set the destination port address | ||
992 | in dest register of param structure */ | ||
993 | edma_parm_write(ctlr, PARM_DST, slot, dest_port); | ||
994 | } | ||
995 | } | ||
996 | EXPORT_SYMBOL(edma_set_dest); | ||
997 | |||
998 | /** | ||
999 | * edma_get_position - returns the current transfer point | ||
1000 | * @slot: parameter RAM slot being examined | ||
1001 | * @dst: true selects the dest position, false the source | ||
1002 | * | ||
1003 | * Returns the position of the current active slot | ||
1004 | */ | ||
1005 | dma_addr_t edma_get_position(unsigned slot, bool dst) | ||
1006 | { | ||
1007 | u32 offs, ctlr = EDMA_CTLR(slot); | ||
1008 | |||
1009 | slot = EDMA_CHAN_SLOT(slot); | ||
1010 | |||
1011 | offs = PARM_OFFSET(slot); | ||
1012 | offs += dst ? PARM_DST : PARM_SRC; | ||
1013 | |||
1014 | return edma_read(ctlr, offs); | ||
1015 | } | ||
1016 | |||
1017 | /** | ||
1018 | * edma_set_src_index - configure DMA source address indexing | ||
1019 | * @slot: parameter RAM slot being configured | ||
1020 | * @src_bidx: byte offset between source arrays in a frame | ||
1021 | * @src_cidx: byte offset between source frames in a block | ||
1022 | * | ||
1023 | * Offsets are specified to support either contiguous or discontiguous | ||
1024 | * memory transfers, or repeated access to a hardware register, as needed. | ||
1025 | * When accessing hardware registers, both offsets are normally zero. | ||
1026 | */ | ||
1027 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) | ||
1028 | { | ||
1029 | unsigned ctlr; | ||
1030 | |||
1031 | ctlr = EDMA_CTLR(slot); | ||
1032 | slot = EDMA_CHAN_SLOT(slot); | ||
1033 | |||
1034 | if (slot < edma_cc[ctlr]->num_slots) { | ||
1035 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
1036 | 0xffff0000, src_bidx); | ||
1037 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, | ||
1038 | 0xffff0000, src_cidx); | ||
1039 | } | ||
1040 | } | ||
1041 | EXPORT_SYMBOL(edma_set_src_index); | ||
1042 | |||
1043 | /** | ||
1044 | * edma_set_dest_index - configure DMA destination address indexing | ||
1045 | * @slot: parameter RAM slot being configured | ||
1046 | * @dest_bidx: byte offset between destination arrays in a frame | ||
1047 | * @dest_cidx: byte offset between destination frames in a block | ||
1048 | * | ||
1049 | * Offsets are specified to support either contiguous or discontiguous | ||
1050 | * memory transfers, or repeated access to a hardware register, as needed. | ||
1051 | * When accessing hardware registers, both offsets are normally zero. | ||
1052 | */ | ||
1053 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) | ||
1054 | { | ||
1055 | unsigned ctlr; | ||
1056 | |||
1057 | ctlr = EDMA_CTLR(slot); | ||
1058 | slot = EDMA_CHAN_SLOT(slot); | ||
1059 | |||
1060 | if (slot < edma_cc[ctlr]->num_slots) { | ||
1061 | edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot, | ||
1062 | 0x0000ffff, dest_bidx << 16); | ||
1063 | edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot, | ||
1064 | 0x0000ffff, dest_cidx << 16); | ||
1065 | } | ||
1066 | } | ||
1067 | EXPORT_SYMBOL(edma_set_dest_index); | ||
1068 | |||
1069 | /** | ||
1070 | * edma_set_transfer_params - configure DMA transfer parameters | ||
1071 | * @slot: parameter RAM slot being configured | ||
1072 | * @acnt: how many bytes per array (at least one) | ||
1073 | * @bcnt: how many arrays per frame (at least one) | ||
1074 | * @ccnt: how many frames per block (at least one) | ||
1075 | * @bcnt_rld: used only for A-Synchronized transfers; this specifies | ||
1076 | * the value to reload into bcnt when it decrements to zero | ||
1077 | * @sync_mode: ASYNC or ABSYNC | ||
1078 | * | ||
1079 | * See the EDMA3 documentation to understand how to configure and link | ||
1080 | * transfers using the fields in PaRAM slots. If you are not doing it | ||
1081 | * all at once with edma_write_slot(), you will use this routine | ||
1082 | * plus two calls each for source and destination, setting the initial | ||
1083 | * address and saying how to index that address. | ||
1084 | * | ||
1085 | * An example of an A-Synchronized transfer is a serial link using a | ||
1086 | * single word shift register. In that case, @acnt would be equal to | ||
1087 | * that word size; the serial controller issues a DMA synchronization | ||
1088 | * event to transfer each word, and memory access by the DMA transfer | ||
1089 | * controller will be word-at-a-time. | ||
1090 | * | ||
1091 | * An example of an AB-Synchronized transfer is a device using a FIFO. | ||
1092 | * In that case, @acnt equals the FIFO width and @bcnt equals its depth. | ||
1093 | * The controller with the FIFO issues DMA synchronization events when | ||
1094 | * the FIFO threshold is reached, and the DMA transfer controller will | ||
1095 | * transfer one frame to (or from) the FIFO. It will probably use | ||
1096 | * efficient burst modes to access memory. | ||
1097 | */ | ||
1098 | void edma_set_transfer_params(unsigned slot, | ||
1099 | u16 acnt, u16 bcnt, u16 ccnt, | ||
1100 | u16 bcnt_rld, enum sync_dimension sync_mode) | ||
1101 | { | ||
1102 | unsigned ctlr; | ||
1103 | |||
1104 | ctlr = EDMA_CTLR(slot); | ||
1105 | slot = EDMA_CHAN_SLOT(slot); | ||
1106 | |||
1107 | if (slot < edma_cc[ctlr]->num_slots) { | ||
1108 | edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot, | ||
1109 | 0x0000ffff, bcnt_rld << 16); | ||
1110 | if (sync_mode == ASYNC) | ||
1111 | edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM); | ||
1112 | else | ||
1113 | edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM); | ||
1114 | /* Set the acount, bcount, ccount registers */ | ||
1115 | edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt); | ||
1116 | edma_parm_write(ctlr, PARM_CCNT, slot, ccnt); | ||
1117 | } | ||
1118 | } | ||
1119 | EXPORT_SYMBOL(edma_set_transfer_params); | ||
1120 | |||
1121 | /** | ||
1122 | * edma_link - link one parameter RAM slot to another | ||
1123 | * @from: parameter RAM slot originating the link | ||
1124 | * @to: parameter RAM slot which is the link target | ||
1125 | * | ||
1126 | * The originating slot should not be part of any active DMA transfer. | ||
1127 | */ | ||
1128 | void edma_link(unsigned from, unsigned to) | ||
1129 | { | ||
1130 | unsigned ctlr_from, ctlr_to; | ||
1131 | |||
1132 | ctlr_from = EDMA_CTLR(from); | ||
1133 | from = EDMA_CHAN_SLOT(from); | ||
1134 | ctlr_to = EDMA_CTLR(to); | ||
1135 | to = EDMA_CHAN_SLOT(to); | ||
1136 | |||
1137 | if (from >= edma_cc[ctlr_from]->num_slots) | ||
1138 | return; | ||
1139 | if (to >= edma_cc[ctlr_to]->num_slots) | ||
1140 | return; | ||
1141 | edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000, | ||
1142 | PARM_OFFSET(to)); | ||
1143 | } | ||
1144 | EXPORT_SYMBOL(edma_link); | ||
1145 | |||
1146 | /** | ||
1147 | * edma_unlink - cut link from one parameter RAM slot | ||
1148 | * @from: parameter RAM slot originating the link | ||
1149 | * | ||
1150 | * The originating slot should not be part of any active DMA transfer. | ||
1151 | * Its link is set to 0xffff. | ||
1152 | */ | ||
1153 | void edma_unlink(unsigned from) | ||
1154 | { | ||
1155 | unsigned ctlr; | ||
1156 | |||
1157 | ctlr = EDMA_CTLR(from); | ||
1158 | from = EDMA_CHAN_SLOT(from); | ||
1159 | |||
1160 | if (from >= edma_cc[ctlr]->num_slots) | ||
1161 | return; | ||
1162 | edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff); | ||
1163 | } | ||
1164 | EXPORT_SYMBOL(edma_unlink); | ||
1165 | |||
1166 | /*-----------------------------------------------------------------------*/ | ||
1167 | |||
1168 | /* Parameter RAM operations (ii) -- read/write whole parameter sets */ | ||
1169 | |||
1170 | /** | ||
1171 | * edma_write_slot - write parameter RAM data for slot | ||
1172 | * @slot: number of parameter RAM slot being modified | ||
1173 | * @param: data to be written into parameter RAM slot | ||
1174 | * | ||
1175 | * Use this to assign all parameters of a transfer at once. This | ||
1176 | * allows more efficient setup of transfers than issuing multiple | ||
1177 | * calls to set up those parameters in small pieces, and provides | ||
1178 | * complete control over all transfer options. | ||
1179 | */ | ||
1180 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) | ||
1181 | { | ||
1182 | unsigned ctlr; | ||
1183 | |||
1184 | ctlr = EDMA_CTLR(slot); | ||
1185 | slot = EDMA_CHAN_SLOT(slot); | ||
1186 | |||
1187 | if (slot >= edma_cc[ctlr]->num_slots) | ||
1188 | return; | ||
1189 | memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param, | ||
1190 | PARM_SIZE); | ||
1191 | } | ||
1192 | EXPORT_SYMBOL(edma_write_slot); | ||
1193 | |||
1194 | /** | ||
1195 | * edma_read_slot - read parameter RAM data from slot | ||
1196 | * @slot: number of parameter RAM slot being copied | ||
1197 | * @param: where to store copy of parameter RAM data | ||
1198 | * | ||
1199 | * Use this to read data from a parameter RAM slot, perhaps to | ||
1200 | * save them as a template for later reuse. | ||
1201 | */ | ||
1202 | void edma_read_slot(unsigned slot, struct edmacc_param *param) | ||
1203 | { | ||
1204 | unsigned ctlr; | ||
1205 | |||
1206 | ctlr = EDMA_CTLR(slot); | ||
1207 | slot = EDMA_CHAN_SLOT(slot); | ||
1208 | |||
1209 | if (slot >= edma_cc[ctlr]->num_slots) | ||
1210 | return; | ||
1211 | memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot), | ||
1212 | PARM_SIZE); | ||
1213 | } | ||
1214 | EXPORT_SYMBOL(edma_read_slot); | ||
1215 | |||
1216 | /*-----------------------------------------------------------------------*/ | ||
1217 | |||
1218 | /* Various EDMA channel control operations */ | ||
1219 | |||
1220 | /** | ||
1221 | * edma_pause - pause dma on a channel | ||
1222 | * @channel: on which edma_start() has been called | ||
1223 | * | ||
1224 | * This temporarily disables EDMA hardware events on the specified channel, | ||
1225 | * preventing them from triggering new transfers on its behalf | ||
1226 | */ | ||
1227 | void edma_pause(unsigned channel) | ||
1228 | { | ||
1229 | unsigned ctlr; | ||
1230 | |||
1231 | ctlr = EDMA_CTLR(channel); | ||
1232 | channel = EDMA_CHAN_SLOT(channel); | ||
1233 | |||
1234 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1235 | unsigned int mask = BIT(channel & 0x1f); | ||
1236 | |||
1237 | edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask); | ||
1238 | } | ||
1239 | } | ||
1240 | EXPORT_SYMBOL(edma_pause); | ||
1241 | |||
1242 | /** | ||
1243 | * edma_resume - resumes dma on a paused channel | ||
1244 | * @channel: on which edma_pause() has been called | ||
1245 | * | ||
1246 | * This re-enables EDMA hardware events on the specified channel. | ||
1247 | */ | ||
1248 | void edma_resume(unsigned channel) | ||
1249 | { | ||
1250 | unsigned ctlr; | ||
1251 | |||
1252 | ctlr = EDMA_CTLR(channel); | ||
1253 | channel = EDMA_CHAN_SLOT(channel); | ||
1254 | |||
1255 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1256 | unsigned int mask = BIT(channel & 0x1f); | ||
1257 | |||
1258 | edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask); | ||
1259 | } | ||
1260 | } | ||
1261 | EXPORT_SYMBOL(edma_resume); | ||
1262 | |||
1263 | int edma_trigger_channel(unsigned channel) | ||
1264 | { | ||
1265 | unsigned ctlr; | ||
1266 | unsigned int mask; | ||
1267 | |||
1268 | ctlr = EDMA_CTLR(channel); | ||
1269 | channel = EDMA_CHAN_SLOT(channel); | ||
1270 | mask = BIT(channel & 0x1f); | ||
1271 | |||
1272 | edma_shadow0_write_array(ctlr, SH_ESR, (channel >> 5), mask); | ||
1273 | |||
1274 | pr_debug("EDMA: ESR%d %08x\n", (channel >> 5), | ||
1275 | edma_shadow0_read_array(ctlr, SH_ESR, (channel >> 5))); | ||
1276 | return 0; | ||
1277 | } | ||
1278 | EXPORT_SYMBOL(edma_trigger_channel); | ||
1279 | |||
1280 | /** | ||
1281 | * edma_start - start dma on a channel | ||
1282 | * @channel: channel being activated | ||
1283 | * | ||
1284 | * Channels with event associations will be triggered by their hardware | ||
1285 | * events, and channels without such associations will be triggered by | ||
1286 | * software. (At this writing there is no interface for using software | ||
1287 | * triggers except with channels that don't support hardware triggers.) | ||
1288 | * | ||
1289 | * Returns zero on success, else negative errno. | ||
1290 | */ | ||
1291 | int edma_start(unsigned channel) | ||
1292 | { | ||
1293 | unsigned ctlr; | ||
1294 | |||
1295 | ctlr = EDMA_CTLR(channel); | ||
1296 | channel = EDMA_CHAN_SLOT(channel); | ||
1297 | |||
1298 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1299 | int j = channel >> 5; | ||
1300 | unsigned int mask = BIT(channel & 0x1f); | ||
1301 | |||
1302 | /* EDMA channels without event association */ | ||
1303 | if (test_bit(channel, edma_cc[ctlr]->edma_unused)) { | ||
1304 | pr_debug("EDMA: ESR%d %08x\n", j, | ||
1305 | edma_shadow0_read_array(ctlr, SH_ESR, j)); | ||
1306 | edma_shadow0_write_array(ctlr, SH_ESR, j, mask); | ||
1307 | return 0; | ||
1308 | } | ||
1309 | |||
1310 | /* EDMA channel with event association */ | ||
1311 | pr_debug("EDMA: ER%d %08x\n", j, | ||
1312 | edma_shadow0_read_array(ctlr, SH_ER, j)); | ||
1313 | /* Clear any pending event or error */ | ||
1314 | edma_write_array(ctlr, EDMA_ECR, j, mask); | ||
1315 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | ||
1316 | /* Clear any SER */ | ||
1317 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | ||
1318 | edma_shadow0_write_array(ctlr, SH_EESR, j, mask); | ||
1319 | pr_debug("EDMA: EER%d %08x\n", j, | ||
1320 | edma_shadow0_read_array(ctlr, SH_EER, j)); | ||
1321 | return 0; | ||
1322 | } | ||
1323 | |||
1324 | return -EINVAL; | ||
1325 | } | ||
1326 | EXPORT_SYMBOL(edma_start); | ||
1327 | |||
1328 | /** | ||
1329 | * edma_stop - stops dma on the channel passed | ||
1330 | * @channel: channel being deactivated | ||
1331 | * | ||
1332 | * When @lch is a channel, any active transfer is paused and | ||
1333 | * all pending hardware events are cleared. The current transfer | ||
1334 | * may not be resumed, and the channel's Parameter RAM should be | ||
1335 | * reinitialized before being reused. | ||
1336 | */ | ||
1337 | void edma_stop(unsigned channel) | ||
1338 | { | ||
1339 | unsigned ctlr; | ||
1340 | |||
1341 | ctlr = EDMA_CTLR(channel); | ||
1342 | channel = EDMA_CHAN_SLOT(channel); | ||
1343 | |||
1344 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1345 | int j = channel >> 5; | ||
1346 | unsigned int mask = BIT(channel & 0x1f); | ||
1347 | |||
1348 | edma_shadow0_write_array(ctlr, SH_EECR, j, mask); | ||
1349 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); | ||
1350 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | ||
1351 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | ||
1352 | |||
1353 | /* clear possibly pending completion interrupt */ | ||
1354 | edma_shadow0_write_array(ctlr, SH_ICR, j, mask); | ||
1355 | |||
1356 | pr_debug("EDMA: EER%d %08x\n", j, | ||
1357 | edma_shadow0_read_array(ctlr, SH_EER, j)); | ||
1358 | |||
1359 | /* REVISIT: consider guarding against inappropriate event | ||
1360 | * chaining by overwriting with dummy_paramset. | ||
1361 | */ | ||
1362 | } | ||
1363 | } | ||
1364 | EXPORT_SYMBOL(edma_stop); | ||
1365 | |||
1366 | /****************************************************************************** | ||
1367 | * | ||
1368 | * It cleans ParamEntry qand bring back EDMA to initial state if media has | ||
1369 | * been removed before EDMA has finished.It is usedful for removable media. | ||
1370 | * Arguments: | ||
1371 | * ch_no - channel no | ||
1372 | * | ||
1373 | * Return: zero on success, or corresponding error no on failure | ||
1374 | * | ||
1375 | * FIXME this should not be needed ... edma_stop() should suffice. | ||
1376 | * | ||
1377 | *****************************************************************************/ | ||
1378 | |||
1379 | void edma_clean_channel(unsigned channel) | ||
1380 | { | ||
1381 | unsigned ctlr; | ||
1382 | |||
1383 | ctlr = EDMA_CTLR(channel); | ||
1384 | channel = EDMA_CHAN_SLOT(channel); | ||
1385 | |||
1386 | if (channel < edma_cc[ctlr]->num_channels) { | ||
1387 | int j = (channel >> 5); | ||
1388 | unsigned int mask = BIT(channel & 0x1f); | ||
1389 | |||
1390 | pr_debug("EDMA: EMR%d %08x\n", j, | ||
1391 | edma_read_array(ctlr, EDMA_EMR, j)); | ||
1392 | edma_shadow0_write_array(ctlr, SH_ECR, j, mask); | ||
1393 | /* Clear the corresponding EMR bits */ | ||
1394 | edma_write_array(ctlr, EDMA_EMCR, j, mask); | ||
1395 | /* Clear any SER */ | ||
1396 | edma_shadow0_write_array(ctlr, SH_SECR, j, mask); | ||
1397 | edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); | ||
1398 | } | ||
1399 | } | ||
1400 | EXPORT_SYMBOL(edma_clean_channel); | ||
1401 | |||
1402 | /* | ||
1403 | * edma_clear_event - clear an outstanding event on the DMA channel | ||
1404 | * Arguments: | ||
1405 | * channel - channel number | ||
1406 | */ | ||
1407 | void edma_clear_event(unsigned channel) | ||
1408 | { | ||
1409 | unsigned ctlr; | ||
1410 | |||
1411 | ctlr = EDMA_CTLR(channel); | ||
1412 | channel = EDMA_CHAN_SLOT(channel); | ||
1413 | |||
1414 | if (channel >= edma_cc[ctlr]->num_channels) | ||
1415 | return; | ||
1416 | if (channel < 32) | ||
1417 | edma_write(ctlr, EDMA_ECR, BIT(channel)); | ||
1418 | else | ||
1419 | edma_write(ctlr, EDMA_ECRH, BIT(channel - 32)); | ||
1420 | } | ||
1421 | EXPORT_SYMBOL(edma_clear_event); | ||
1422 | |||
1423 | /* | ||
1424 | * edma_assign_channel_eventq - move given channel to desired eventq | ||
1425 | * Arguments: | ||
1426 | * channel - channel number | ||
1427 | * eventq_no - queue to move the channel | ||
1428 | * | ||
1429 | * Can be used to move a channel to a selected event queue. | ||
1430 | */ | ||
1431 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no) | ||
1432 | { | ||
1433 | unsigned ctlr; | ||
1434 | |||
1435 | ctlr = EDMA_CTLR(channel); | ||
1436 | channel = EDMA_CHAN_SLOT(channel); | ||
1437 | |||
1438 | if (channel >= edma_cc[ctlr]->num_channels) | ||
1439 | return; | ||
1440 | |||
1441 | /* default to low priority queue */ | ||
1442 | if (eventq_no == EVENTQ_DEFAULT) | ||
1443 | eventq_no = edma_cc[ctlr]->default_queue; | ||
1444 | if (eventq_no >= edma_cc[ctlr]->num_tc) | ||
1445 | return; | ||
1446 | |||
1447 | map_dmach_queue(ctlr, channel, eventq_no); | ||
1448 | } | ||
1449 | EXPORT_SYMBOL(edma_assign_channel_eventq); | ||
1450 | |||
1451 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | ||
1452 | struct edma *edma_cc, int cc_id) | ||
1453 | { | ||
1454 | int i; | ||
1455 | u32 value, cccfg; | ||
1456 | s8 (*queue_priority_map)[2]; | ||
1457 | |||
1458 | /* Decode the eDMA3 configuration from CCCFG register */ | ||
1459 | cccfg = edma_read(cc_id, EDMA_CCCFG); | ||
1460 | |||
1461 | value = GET_NUM_REGN(cccfg); | ||
1462 | edma_cc->num_region = BIT(value); | ||
1463 | |||
1464 | value = GET_NUM_DMACH(cccfg); | ||
1465 | edma_cc->num_channels = BIT(value + 1); | ||
1466 | |||
1467 | value = GET_NUM_PAENTRY(cccfg); | ||
1468 | edma_cc->num_slots = BIT(value + 4); | ||
1469 | |||
1470 | value = GET_NUM_EVQUE(cccfg); | ||
1471 | edma_cc->num_tc = value + 1; | ||
1472 | |||
1473 | dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id, | ||
1474 | cccfg); | ||
1475 | dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); | ||
1476 | dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); | ||
1477 | dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); | ||
1478 | dev_dbg(dev, "num_tc: %u\n", edma_cc->num_tc); | ||
1479 | |||
1480 | /* Nothing need to be done if queue priority is provided */ | ||
1481 | if (pdata->queue_priority_mapping) | ||
1482 | return 0; | ||
1483 | |||
1484 | /* | ||
1485 | * Configure TC/queue priority as follows: | ||
1486 | * Q0 - priority 0 | ||
1487 | * Q1 - priority 1 | ||
1488 | * Q2 - priority 2 | ||
1489 | * ... | ||
1490 | * The meaning of priority numbers: 0 highest priority, 7 lowest | ||
1491 | * priority. So Q0 is the highest priority queue and the last queue has | ||
1492 | * the lowest priority. | ||
1493 | */ | ||
1494 | queue_priority_map = devm_kzalloc(dev, | ||
1495 | (edma_cc->num_tc + 1) * sizeof(s8), | ||
1496 | GFP_KERNEL); | ||
1497 | if (!queue_priority_map) | ||
1498 | return -ENOMEM; | ||
1499 | |||
1500 | for (i = 0; i < edma_cc->num_tc; i++) { | ||
1501 | queue_priority_map[i][0] = i; | ||
1502 | queue_priority_map[i][1] = i; | ||
1503 | } | ||
1504 | queue_priority_map[i][0] = -1; | ||
1505 | queue_priority_map[i][1] = -1; | ||
1506 | |||
1507 | pdata->queue_priority_mapping = queue_priority_map; | ||
1508 | /* Default queue has the lowest priority */ | ||
1509 | pdata->default_queue = i - 1; | ||
1510 | |||
1511 | return 0; | ||
1512 | } | ||
1513 | |||
1514 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) | ||
1515 | |||
1516 | static int edma_xbar_event_map(struct device *dev, struct device_node *node, | ||
1517 | struct edma_soc_info *pdata, size_t sz) | ||
1518 | { | ||
1519 | const char pname[] = "ti,edma-xbar-event-map"; | ||
1520 | struct resource res; | ||
1521 | void __iomem *xbar; | ||
1522 | s16 (*xbar_chans)[2]; | ||
1523 | size_t nelm = sz / sizeof(s16); | ||
1524 | u32 shift, offset, mux; | ||
1525 | int ret, i; | ||
1526 | |||
1527 | xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL); | ||
1528 | if (!xbar_chans) | ||
1529 | return -ENOMEM; | ||
1530 | |||
1531 | ret = of_address_to_resource(node, 1, &res); | ||
1532 | if (ret) | ||
1533 | return -ENOMEM; | ||
1534 | |||
1535 | xbar = devm_ioremap(dev, res.start, resource_size(&res)); | ||
1536 | if (!xbar) | ||
1537 | return -ENOMEM; | ||
1538 | |||
1539 | ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm); | ||
1540 | if (ret) | ||
1541 | return -EIO; | ||
1542 | |||
1543 | /* Invalidate last entry for the other user of this mess */ | ||
1544 | nelm >>= 1; | ||
1545 | xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1; | ||
1546 | |||
1547 | for (i = 0; i < nelm; i++) { | ||
1548 | shift = (xbar_chans[i][1] & 0x03) << 3; | ||
1549 | offset = xbar_chans[i][1] & 0xfffffffc; | ||
1550 | mux = readl(xbar + offset); | ||
1551 | mux &= ~(0xff << shift); | ||
1552 | mux |= xbar_chans[i][0] << shift; | ||
1553 | writel(mux, (xbar + offset)); | ||
1554 | } | ||
1555 | |||
1556 | pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; | ||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | static int edma_of_parse_dt(struct device *dev, | ||
1561 | struct device_node *node, | ||
1562 | struct edma_soc_info *pdata) | ||
1563 | { | ||
1564 | int ret = 0; | ||
1565 | struct property *prop; | ||
1566 | size_t sz; | ||
1567 | struct edma_rsv_info *rsv_info; | ||
1568 | |||
1569 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); | ||
1570 | if (!rsv_info) | ||
1571 | return -ENOMEM; | ||
1572 | pdata->rsv = rsv_info; | ||
1573 | |||
1574 | prop = of_find_property(node, "ti,edma-xbar-event-map", &sz); | ||
1575 | if (prop) | ||
1576 | ret = edma_xbar_event_map(dev, node, pdata, sz); | ||
1577 | |||
1578 | return ret; | ||
1579 | } | ||
1580 | |||
1581 | static struct of_dma_filter_info edma_filter_info = { | ||
1582 | .filter_fn = edma_filter_fn, | ||
1583 | }; | ||
1584 | |||
1585 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
1586 | struct device_node *node) | ||
1587 | { | ||
1588 | struct edma_soc_info *info; | ||
1589 | int ret; | ||
1590 | |||
1591 | info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); | ||
1592 | if (!info) | ||
1593 | return ERR_PTR(-ENOMEM); | ||
1594 | |||
1595 | ret = edma_of_parse_dt(dev, node, info); | ||
1596 | if (ret) | ||
1597 | return ERR_PTR(ret); | ||
1598 | |||
1599 | dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap); | ||
1600 | dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap); | ||
1601 | of_dma_controller_register(dev->of_node, of_dma_simple_xlate, | ||
1602 | &edma_filter_info); | ||
1603 | |||
1604 | return info; | ||
1605 | } | ||
1606 | #else | ||
1607 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
1608 | struct device_node *node) | ||
1609 | { | ||
1610 | return ERR_PTR(-ENOSYS); | ||
1611 | } | ||
1612 | #endif | ||
1613 | |||
1614 | static int edma_probe(struct platform_device *pdev) | ||
1615 | { | ||
1616 | struct edma_soc_info **info = pdev->dev.platform_data; | ||
1617 | struct edma_soc_info *ninfo[EDMA_MAX_CC] = {NULL}; | ||
1618 | s8 (*queue_priority_mapping)[2]; | ||
1619 | int i, j, off, ln, found = 0; | ||
1620 | int status = -1; | ||
1621 | const s16 (*rsv_chans)[2]; | ||
1622 | const s16 (*rsv_slots)[2]; | ||
1623 | const s16 (*xbar_chans)[2]; | ||
1624 | int irq[EDMA_MAX_CC] = {0, 0}; | ||
1625 | int err_irq[EDMA_MAX_CC] = {0, 0}; | ||
1626 | struct resource *r[EDMA_MAX_CC] = {NULL}; | ||
1627 | struct resource res[EDMA_MAX_CC]; | ||
1628 | char res_name[10]; | ||
1629 | struct device_node *node = pdev->dev.of_node; | ||
1630 | struct device *dev = &pdev->dev; | ||
1631 | int ret; | ||
1632 | struct platform_device_info edma_dev_info = { | ||
1633 | .name = "edma-dma-engine", | ||
1634 | .dma_mask = DMA_BIT_MASK(32), | ||
1635 | .parent = &pdev->dev, | ||
1636 | }; | ||
1637 | |||
1638 | if (node) { | ||
1639 | /* Check if this is a second instance registered */ | ||
1640 | if (arch_num_cc) { | ||
1641 | dev_err(dev, "only one EDMA instance is supported via DT\n"); | ||
1642 | return -ENODEV; | ||
1643 | } | ||
1644 | |||
1645 | ninfo[0] = edma_setup_info_from_dt(dev, node); | ||
1646 | if (IS_ERR(ninfo[0])) { | ||
1647 | dev_err(dev, "failed to get DT data\n"); | ||
1648 | return PTR_ERR(ninfo[0]); | ||
1649 | } | ||
1650 | |||
1651 | info = ninfo; | ||
1652 | } | ||
1653 | |||
1654 | if (!info) | ||
1655 | return -ENODEV; | ||
1656 | |||
1657 | pm_runtime_enable(dev); | ||
1658 | ret = pm_runtime_get_sync(dev); | ||
1659 | if (ret < 0) { | ||
1660 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | ||
1661 | return ret; | ||
1662 | } | ||
1663 | |||
1664 | for (j = 0; j < EDMA_MAX_CC; j++) { | ||
1665 | if (!info[j]) { | ||
1666 | if (!found) | ||
1667 | return -ENODEV; | ||
1668 | break; | ||
1669 | } | ||
1670 | if (node) { | ||
1671 | ret = of_address_to_resource(node, j, &res[j]); | ||
1672 | if (!ret) | ||
1673 | r[j] = &res[j]; | ||
1674 | } else { | ||
1675 | sprintf(res_name, "edma_cc%d", j); | ||
1676 | r[j] = platform_get_resource_byname(pdev, | ||
1677 | IORESOURCE_MEM, | ||
1678 | res_name); | ||
1679 | } | ||
1680 | if (!r[j]) { | ||
1681 | if (found) | ||
1682 | break; | ||
1683 | else | ||
1684 | return -ENODEV; | ||
1685 | } else { | ||
1686 | found = 1; | ||
1687 | } | ||
1688 | |||
1689 | edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]); | ||
1690 | if (IS_ERR(edmacc_regs_base[j])) | ||
1691 | return PTR_ERR(edmacc_regs_base[j]); | ||
1692 | |||
1693 | edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma), | ||
1694 | GFP_KERNEL); | ||
1695 | if (!edma_cc[j]) | ||
1696 | return -ENOMEM; | ||
1697 | |||
1698 | /* Get eDMA3 configuration from IP */ | ||
1699 | ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j); | ||
1700 | if (ret) | ||
1701 | return ret; | ||
1702 | |||
1703 | edma_cc[j]->default_queue = info[j]->default_queue; | ||
1704 | |||
1705 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", | ||
1706 | edmacc_regs_base[j]); | ||
1707 | |||
1708 | for (i = 0; i < edma_cc[j]->num_slots; i++) | ||
1709 | memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i), | ||
1710 | &dummy_paramset, PARM_SIZE); | ||
1711 | |||
1712 | /* Mark all channels as unused */ | ||
1713 | memset(edma_cc[j]->edma_unused, 0xff, | ||
1714 | sizeof(edma_cc[j]->edma_unused)); | ||
1715 | |||
1716 | if (info[j]->rsv) { | ||
1717 | |||
1718 | /* Clear the reserved channels in unused list */ | ||
1719 | rsv_chans = info[j]->rsv->rsv_chans; | ||
1720 | if (rsv_chans) { | ||
1721 | for (i = 0; rsv_chans[i][0] != -1; i++) { | ||
1722 | off = rsv_chans[i][0]; | ||
1723 | ln = rsv_chans[i][1]; | ||
1724 | clear_bits(off, ln, | ||
1725 | edma_cc[j]->edma_unused); | ||
1726 | } | ||
1727 | } | ||
1728 | |||
1729 | /* Set the reserved slots in inuse list */ | ||
1730 | rsv_slots = info[j]->rsv->rsv_slots; | ||
1731 | if (rsv_slots) { | ||
1732 | for (i = 0; rsv_slots[i][0] != -1; i++) { | ||
1733 | off = rsv_slots[i][0]; | ||
1734 | ln = rsv_slots[i][1]; | ||
1735 | set_bits(off, ln, | ||
1736 | edma_cc[j]->edma_inuse); | ||
1737 | } | ||
1738 | } | ||
1739 | } | ||
1740 | |||
1741 | /* Clear the xbar mapped channels in unused list */ | ||
1742 | xbar_chans = info[j]->xbar_chans; | ||
1743 | if (xbar_chans) { | ||
1744 | for (i = 0; xbar_chans[i][1] != -1; i++) { | ||
1745 | off = xbar_chans[i][1]; | ||
1746 | clear_bits(off, 1, | ||
1747 | edma_cc[j]->edma_unused); | ||
1748 | } | ||
1749 | } | ||
1750 | |||
1751 | if (node) { | ||
1752 | irq[j] = irq_of_parse_and_map(node, 0); | ||
1753 | err_irq[j] = irq_of_parse_and_map(node, 2); | ||
1754 | } else { | ||
1755 | char irq_name[10]; | ||
1756 | |||
1757 | sprintf(irq_name, "edma%d", j); | ||
1758 | irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1759 | |||
1760 | sprintf(irq_name, "edma%d_err", j); | ||
1761 | err_irq[j] = platform_get_irq_byname(pdev, irq_name); | ||
1762 | } | ||
1763 | edma_cc[j]->irq_res_start = irq[j]; | ||
1764 | edma_cc[j]->irq_res_end = err_irq[j]; | ||
1765 | |||
1766 | status = devm_request_irq(dev, irq[j], dma_irq_handler, 0, | ||
1767 | "edma", dev); | ||
1768 | if (status < 0) { | ||
1769 | dev_dbg(&pdev->dev, | ||
1770 | "devm_request_irq %d failed --> %d\n", | ||
1771 | irq[j], status); | ||
1772 | return status; | ||
1773 | } | ||
1774 | |||
1775 | status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0, | ||
1776 | "edma_error", dev); | ||
1777 | if (status < 0) { | ||
1778 | dev_dbg(&pdev->dev, | ||
1779 | "devm_request_irq %d failed --> %d\n", | ||
1780 | err_irq[j], status); | ||
1781 | return status; | ||
1782 | } | ||
1783 | |||
1784 | for (i = 0; i < edma_cc[j]->num_channels; i++) | ||
1785 | map_dmach_queue(j, i, info[j]->default_queue); | ||
1786 | |||
1787 | queue_priority_mapping = info[j]->queue_priority_mapping; | ||
1788 | |||
1789 | /* Event queue priority mapping */ | ||
1790 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1791 | assign_priority_to_queue(j, | ||
1792 | queue_priority_mapping[i][0], | ||
1793 | queue_priority_mapping[i][1]); | ||
1794 | |||
1795 | /* Map the channel to param entry if channel mapping logic | ||
1796 | * exist | ||
1797 | */ | ||
1798 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | ||
1799 | map_dmach_param(j); | ||
1800 | |||
1801 | for (i = 0; i < edma_cc[j]->num_region; i++) { | ||
1802 | edma_write_array2(j, EDMA_DRAE, i, 0, 0x0); | ||
1803 | edma_write_array2(j, EDMA_DRAE, i, 1, 0x0); | ||
1804 | edma_write_array(j, EDMA_QRAE, i, 0x0); | ||
1805 | } | ||
1806 | edma_cc[j]->info = info[j]; | ||
1807 | arch_num_cc++; | ||
1808 | |||
1809 | edma_dev_info.id = j; | ||
1810 | platform_device_register_full(&edma_dev_info); | ||
1811 | } | ||
1812 | |||
1813 | return 0; | ||
1814 | } | ||
1815 | |||
1816 | #ifdef CONFIG_PM_SLEEP | ||
1817 | static int edma_pm_resume(struct device *dev) | ||
1818 | { | ||
1819 | int i, j; | ||
1820 | |||
1821 | for (j = 0; j < arch_num_cc; j++) { | ||
1822 | struct edma *cc = edma_cc[j]; | ||
1823 | |||
1824 | s8 (*queue_priority_mapping)[2]; | ||
1825 | |||
1826 | queue_priority_mapping = cc->info->queue_priority_mapping; | ||
1827 | |||
1828 | /* Event queue priority mapping */ | ||
1829 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1830 | assign_priority_to_queue(j, | ||
1831 | queue_priority_mapping[i][0], | ||
1832 | queue_priority_mapping[i][1]); | ||
1833 | |||
1834 | /* | ||
1835 | * Map the channel to param entry if channel mapping logic | ||
1836 | * exist | ||
1837 | */ | ||
1838 | if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST) | ||
1839 | map_dmach_param(j); | ||
1840 | |||
1841 | for (i = 0; i < cc->num_channels; i++) { | ||
1842 | if (test_bit(i, cc->edma_inuse)) { | ||
1843 | /* ensure access through shadow region 0 */ | ||
1844 | edma_or_array2(j, EDMA_DRAE, 0, i >> 5, | ||
1845 | BIT(i & 0x1f)); | ||
1846 | |||
1847 | setup_dma_interrupt(i, | ||
1848 | cc->intr_data[i].callback, | ||
1849 | cc->intr_data[i].data); | ||
1850 | } | ||
1851 | } | ||
1852 | } | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | #endif | ||
1857 | |||
1858 | static const struct dev_pm_ops edma_pm_ops = { | ||
1859 | SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume) | ||
1860 | }; | ||
1861 | |||
1862 | static struct platform_driver edma_driver = { | ||
1863 | .driver = { | ||
1864 | .name = "edma", | ||
1865 | .pm = &edma_pm_ops, | ||
1866 | .of_match_table = edma_of_ids, | ||
1867 | }, | ||
1868 | .probe = edma_probe, | ||
1869 | }; | ||
1870 | |||
1871 | static int __init edma_init(void) | ||
1872 | { | ||
1873 | return platform_driver_probe(&edma_driver, edma_probe); | ||
1874 | } | ||
1875 | arch_initcall(edma_init); | ||
1876 | |||
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 29e08aac8294..28c90bc372bd 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -147,150 +147,118 @@ static s8 da850_queue_priority_mapping[][2] = { | |||
147 | {-1, -1} | 147 | {-1, -1} |
148 | }; | 148 | }; |
149 | 149 | ||
150 | static struct edma_soc_info da830_edma_cc0_info = { | 150 | static struct edma_soc_info da8xx_edma0_pdata = { |
151 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 151 | .queue_priority_mapping = da8xx_queue_priority_mapping, |
152 | .default_queue = EVENTQ_1, | 152 | .default_queue = EVENTQ_1, |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static struct edma_soc_info *da830_edma_info[EDMA_MAX_CC] = { | 155 | static struct edma_soc_info da850_edma1_pdata = { |
156 | &da830_edma_cc0_info, | 156 | .queue_priority_mapping = da850_queue_priority_mapping, |
157 | .default_queue = EVENTQ_0, | ||
157 | }; | 158 | }; |
158 | 159 | ||
159 | static struct edma_soc_info da850_edma_cc_info[] = { | 160 | static struct resource da8xx_edma0_resources[] = { |
160 | { | 161 | { |
161 | .queue_priority_mapping = da8xx_queue_priority_mapping, | 162 | .name = "edma3_cc", |
162 | .default_queue = EVENTQ_1, | ||
163 | }, | ||
164 | { | ||
165 | .queue_priority_mapping = da850_queue_priority_mapping, | ||
166 | .default_queue = EVENTQ_0, | ||
167 | }, | ||
168 | }; | ||
169 | |||
170 | static struct edma_soc_info *da850_edma_info[EDMA_MAX_CC] = { | ||
171 | &da850_edma_cc_info[0], | ||
172 | &da850_edma_cc_info[1], | ||
173 | }; | ||
174 | |||
175 | static struct resource da830_edma_resources[] = { | ||
176 | { | ||
177 | .name = "edma_cc0", | ||
178 | .start = DA8XX_TPCC_BASE, | 163 | .start = DA8XX_TPCC_BASE, |
179 | .end = DA8XX_TPCC_BASE + SZ_32K - 1, | 164 | .end = DA8XX_TPCC_BASE + SZ_32K - 1, |
180 | .flags = IORESOURCE_MEM, | 165 | .flags = IORESOURCE_MEM, |
181 | }, | 166 | }, |
182 | { | 167 | { |
183 | .name = "edma_tc0", | 168 | .name = "edma3_tc0", |
184 | .start = DA8XX_TPTC0_BASE, | 169 | .start = DA8XX_TPTC0_BASE, |
185 | .end = DA8XX_TPTC0_BASE + SZ_1K - 1, | 170 | .end = DA8XX_TPTC0_BASE + SZ_1K - 1, |
186 | .flags = IORESOURCE_MEM, | 171 | .flags = IORESOURCE_MEM, |
187 | }, | 172 | }, |
188 | { | 173 | { |
189 | .name = "edma_tc1", | 174 | .name = "edma3_tc1", |
190 | .start = DA8XX_TPTC1_BASE, | 175 | .start = DA8XX_TPTC1_BASE, |
191 | .end = DA8XX_TPTC1_BASE + SZ_1K - 1, | 176 | .end = DA8XX_TPTC1_BASE + SZ_1K - 1, |
192 | .flags = IORESOURCE_MEM, | 177 | .flags = IORESOURCE_MEM, |
193 | }, | 178 | }, |
194 | { | 179 | { |
195 | .name = "edma0", | 180 | .name = "edma3_ccint", |
196 | .start = IRQ_DA8XX_CCINT0, | 181 | .start = IRQ_DA8XX_CCINT0, |
197 | .flags = IORESOURCE_IRQ, | 182 | .flags = IORESOURCE_IRQ, |
198 | }, | 183 | }, |
199 | { | 184 | { |
200 | .name = "edma0_err", | 185 | .name = "edma3_ccerrint", |
201 | .start = IRQ_DA8XX_CCERRINT, | 186 | .start = IRQ_DA8XX_CCERRINT, |
202 | .flags = IORESOURCE_IRQ, | 187 | .flags = IORESOURCE_IRQ, |
203 | }, | 188 | }, |
204 | }; | 189 | }; |
205 | 190 | ||
206 | static struct resource da850_edma_resources[] = { | 191 | static struct resource da850_edma1_resources[] = { |
207 | { | ||
208 | .name = "edma_cc0", | ||
209 | .start = DA8XX_TPCC_BASE, | ||
210 | .end = DA8XX_TPCC_BASE + SZ_32K - 1, | ||
211 | .flags = IORESOURCE_MEM, | ||
212 | }, | ||
213 | { | ||
214 | .name = "edma_tc0", | ||
215 | .start = DA8XX_TPTC0_BASE, | ||
216 | .end = DA8XX_TPTC0_BASE + SZ_1K - 1, | ||
217 | .flags = IORESOURCE_MEM, | ||
218 | }, | ||
219 | { | ||
220 | .name = "edma_tc1", | ||
221 | .start = DA8XX_TPTC1_BASE, | ||
222 | .end = DA8XX_TPTC1_BASE + SZ_1K - 1, | ||
223 | .flags = IORESOURCE_MEM, | ||
224 | }, | ||
225 | { | 192 | { |
226 | .name = "edma_cc1", | 193 | .name = "edma3_cc", |
227 | .start = DA850_TPCC1_BASE, | 194 | .start = DA850_TPCC1_BASE, |
228 | .end = DA850_TPCC1_BASE + SZ_32K - 1, | 195 | .end = DA850_TPCC1_BASE + SZ_32K - 1, |
229 | .flags = IORESOURCE_MEM, | 196 | .flags = IORESOURCE_MEM, |
230 | }, | 197 | }, |
231 | { | 198 | { |
232 | .name = "edma_tc2", | 199 | .name = "edma3_tc0", |
233 | .start = DA850_TPTC2_BASE, | 200 | .start = DA850_TPTC2_BASE, |
234 | .end = DA850_TPTC2_BASE + SZ_1K - 1, | 201 | .end = DA850_TPTC2_BASE + SZ_1K - 1, |
235 | .flags = IORESOURCE_MEM, | 202 | .flags = IORESOURCE_MEM, |
236 | }, | 203 | }, |
237 | { | 204 | { |
238 | .name = "edma0", | 205 | .name = "edma3_ccint", |
239 | .start = IRQ_DA8XX_CCINT0, | ||
240 | .flags = IORESOURCE_IRQ, | ||
241 | }, | ||
242 | { | ||
243 | .name = "edma0_err", | ||
244 | .start = IRQ_DA8XX_CCERRINT, | ||
245 | .flags = IORESOURCE_IRQ, | ||
246 | }, | ||
247 | { | ||
248 | .name = "edma1", | ||
249 | .start = IRQ_DA850_CCINT1, | 206 | .start = IRQ_DA850_CCINT1, |
250 | .flags = IORESOURCE_IRQ, | 207 | .flags = IORESOURCE_IRQ, |
251 | }, | 208 | }, |
252 | { | 209 | { |
253 | .name = "edma1_err", | 210 | .name = "edma3_ccerrint", |
254 | .start = IRQ_DA850_CCERRINT1, | 211 | .start = IRQ_DA850_CCERRINT1, |
255 | .flags = IORESOURCE_IRQ, | 212 | .flags = IORESOURCE_IRQ, |
256 | }, | 213 | }, |
257 | }; | 214 | }; |
258 | 215 | ||
259 | static struct platform_device da830_edma_device = { | 216 | static const struct platform_device_info da8xx_edma0_device __initconst = { |
260 | .name = "edma", | 217 | .name = "edma", |
261 | .id = -1, | 218 | .id = 0, |
262 | .dev = { | 219 | .dma_mask = DMA_BIT_MASK(32), |
263 | .platform_data = da830_edma_info, | 220 | .res = da8xx_edma0_resources, |
264 | }, | 221 | .num_res = ARRAY_SIZE(da8xx_edma0_resources), |
265 | .num_resources = ARRAY_SIZE(da830_edma_resources), | 222 | .data = &da8xx_edma0_pdata, |
266 | .resource = da830_edma_resources, | 223 | .size_data = sizeof(da8xx_edma0_pdata), |
267 | }; | 224 | }; |
268 | 225 | ||
269 | static struct platform_device da850_edma_device = { | 226 | static const struct platform_device_info da850_edma1_device __initconst = { |
270 | .name = "edma", | 227 | .name = "edma", |
271 | .id = -1, | 228 | .id = 1, |
272 | .dev = { | 229 | .dma_mask = DMA_BIT_MASK(32), |
273 | .platform_data = da850_edma_info, | 230 | .res = da850_edma1_resources, |
274 | }, | 231 | .num_res = ARRAY_SIZE(da850_edma1_resources), |
275 | .num_resources = ARRAY_SIZE(da850_edma_resources), | 232 | .data = &da850_edma1_pdata, |
276 | .resource = da850_edma_resources, | 233 | .size_data = sizeof(da850_edma1_pdata), |
277 | }; | 234 | }; |
278 | 235 | ||
279 | int __init da830_register_edma(struct edma_rsv_info *rsv) | 236 | int __init da830_register_edma(struct edma_rsv_info *rsv) |
280 | { | 237 | { |
281 | da830_edma_cc0_info.rsv = rsv; | 238 | struct platform_device *edma_pdev; |
239 | |||
240 | da8xx_edma0_pdata.rsv = rsv; | ||
282 | 241 | ||
283 | return platform_device_register(&da830_edma_device); | 242 | edma_pdev = platform_device_register_full(&da8xx_edma0_device); |
243 | return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0; | ||
284 | } | 244 | } |
285 | 245 | ||
286 | int __init da850_register_edma(struct edma_rsv_info *rsv[2]) | 246 | int __init da850_register_edma(struct edma_rsv_info *rsv[2]) |
287 | { | 247 | { |
248 | struct platform_device *edma_pdev; | ||
249 | |||
288 | if (rsv) { | 250 | if (rsv) { |
289 | da850_edma_cc_info[0].rsv = rsv[0]; | 251 | da8xx_edma0_pdata.rsv = rsv[0]; |
290 | da850_edma_cc_info[1].rsv = rsv[1]; | 252 | da850_edma1_pdata.rsv = rsv[1]; |
291 | } | 253 | } |
292 | 254 | ||
293 | return platform_device_register(&da850_edma_device); | 255 | edma_pdev = platform_device_register_full(&da8xx_edma0_device); |
256 | if (IS_ERR(edma_pdev)) { | ||
257 | pr_warn("%s: Failed to register eDMA0\n", __func__); | ||
258 | return PTR_ERR(edma_pdev); | ||
259 | } | ||
260 | edma_pdev = platform_device_register_full(&da850_edma1_device); | ||
261 | return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0; | ||
294 | } | 262 | } |
295 | 263 | ||
296 | static struct resource da8xx_i2c_resources0[] = { | 264 | static struct resource da8xx_i2c_resources0[] = { |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 567dc56fe8cd..609950b8c191 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
@@ -569,61 +569,58 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
569 | 569 | ||
570 | /*----------------------------------------------------------------------*/ | 570 | /*----------------------------------------------------------------------*/ |
571 | 571 | ||
572 | static s8 | 572 | static s8 queue_priority_mapping[][2] = { |
573 | queue_priority_mapping[][2] = { | ||
574 | /* {event queue no, Priority} */ | 573 | /* {event queue no, Priority} */ |
575 | {0, 3}, | 574 | {0, 3}, |
576 | {1, 7}, | 575 | {1, 7}, |
577 | {-1, -1}, | 576 | {-1, -1}, |
578 | }; | 577 | }; |
579 | 578 | ||
580 | static struct edma_soc_info edma_cc0_info = { | 579 | static struct edma_soc_info dm355_edma_pdata = { |
581 | .queue_priority_mapping = queue_priority_mapping, | 580 | .queue_priority_mapping = queue_priority_mapping, |
582 | .default_queue = EVENTQ_1, | 581 | .default_queue = EVENTQ_1, |
583 | }; | 582 | }; |
584 | 583 | ||
585 | static struct edma_soc_info *dm355_edma_info[EDMA_MAX_CC] = { | ||
586 | &edma_cc0_info, | ||
587 | }; | ||
588 | |||
589 | static struct resource edma_resources[] = { | 584 | static struct resource edma_resources[] = { |
590 | { | 585 | { |
591 | .name = "edma_cc0", | 586 | .name = "edma3_cc", |
592 | .start = 0x01c00000, | 587 | .start = 0x01c00000, |
593 | .end = 0x01c00000 + SZ_64K - 1, | 588 | .end = 0x01c00000 + SZ_64K - 1, |
594 | .flags = IORESOURCE_MEM, | 589 | .flags = IORESOURCE_MEM, |
595 | }, | 590 | }, |
596 | { | 591 | { |
597 | .name = "edma_tc0", | 592 | .name = "edma3_tc0", |
598 | .start = 0x01c10000, | 593 | .start = 0x01c10000, |
599 | .end = 0x01c10000 + SZ_1K - 1, | 594 | .end = 0x01c10000 + SZ_1K - 1, |
600 | .flags = IORESOURCE_MEM, | 595 | .flags = IORESOURCE_MEM, |
601 | }, | 596 | }, |
602 | { | 597 | { |
603 | .name = "edma_tc1", | 598 | .name = "edma3_tc1", |
604 | .start = 0x01c10400, | 599 | .start = 0x01c10400, |
605 | .end = 0x01c10400 + SZ_1K - 1, | 600 | .end = 0x01c10400 + SZ_1K - 1, |
606 | .flags = IORESOURCE_MEM, | 601 | .flags = IORESOURCE_MEM, |
607 | }, | 602 | }, |
608 | { | 603 | { |
609 | .name = "edma0", | 604 | .name = "edma3_ccint", |
610 | .start = IRQ_CCINT0, | 605 | .start = IRQ_CCINT0, |
611 | .flags = IORESOURCE_IRQ, | 606 | .flags = IORESOURCE_IRQ, |
612 | }, | 607 | }, |
613 | { | 608 | { |
614 | .name = "edma0_err", | 609 | .name = "edma3_ccerrint", |
615 | .start = IRQ_CCERRINT, | 610 | .start = IRQ_CCERRINT, |
616 | .flags = IORESOURCE_IRQ, | 611 | .flags = IORESOURCE_IRQ, |
617 | }, | 612 | }, |
618 | /* not using (or muxing) TC*_ERR */ | 613 | /* not using (or muxing) TC*_ERR */ |
619 | }; | 614 | }; |
620 | 615 | ||
621 | static struct platform_device dm355_edma_device = { | 616 | static const struct platform_device_info dm355_edma_device __initconst = { |
622 | .name = "edma", | 617 | .name = "edma", |
623 | .id = 0, | 618 | .id = 0, |
624 | .dev.platform_data = dm355_edma_info, | 619 | .dma_mask = DMA_BIT_MASK(32), |
625 | .num_resources = ARRAY_SIZE(edma_resources), | 620 | .res = edma_resources, |
626 | .resource = edma_resources, | 621 | .num_res = ARRAY_SIZE(edma_resources), |
622 | .data = &dm355_edma_pdata, | ||
623 | .size_data = sizeof(dm355_edma_pdata), | ||
627 | }; | 624 | }; |
628 | 625 | ||
629 | static struct resource dm355_asp1_resources[] = { | 626 | static struct resource dm355_asp1_resources[] = { |
@@ -1062,13 +1059,18 @@ int __init dm355_init_video(struct vpfe_config *vpfe_cfg, | |||
1062 | 1059 | ||
1063 | static int __init dm355_init_devices(void) | 1060 | static int __init dm355_init_devices(void) |
1064 | { | 1061 | { |
1062 | struct platform_device *edma_pdev; | ||
1065 | int ret = 0; | 1063 | int ret = 0; |
1066 | 1064 | ||
1067 | if (!cpu_is_davinci_dm355()) | 1065 | if (!cpu_is_davinci_dm355()) |
1068 | return 0; | 1066 | return 0; |
1069 | 1067 | ||
1070 | davinci_cfg_reg(DM355_INT_EDMA_CC); | 1068 | davinci_cfg_reg(DM355_INT_EDMA_CC); |
1071 | platform_device_register(&dm355_edma_device); | 1069 | edma_pdev = platform_device_register_full(&dm355_edma_device); |
1070 | if (IS_ERR(edma_pdev)) { | ||
1071 | pr_warn("%s: Failed to register eDMA\n", __func__); | ||
1072 | return PTR_ERR(edma_pdev); | ||
1073 | } | ||
1072 | 1074 | ||
1073 | ret = davinci_init_wdt(); | 1075 | ret = davinci_init_wdt(); |
1074 | if (ret) | 1076 | if (ret) |
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 6a890a8486d0..2068cbeaeb03 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
@@ -853,8 +853,7 @@ static u8 dm365_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
853 | }; | 853 | }; |
854 | 854 | ||
855 | /* Four Transfer Controllers on DM365 */ | 855 | /* Four Transfer Controllers on DM365 */ |
856 | static s8 | 856 | static s8 dm365_queue_priority_mapping[][2] = { |
857 | dm365_queue_priority_mapping[][2] = { | ||
858 | /* {event queue no, Priority} */ | 857 | /* {event queue no, Priority} */ |
859 | {0, 7}, | 858 | {0, 7}, |
860 | {1, 7}, | 859 | {1, 7}, |
@@ -863,53 +862,49 @@ dm365_queue_priority_mapping[][2] = { | |||
863 | {-1, -1}, | 862 | {-1, -1}, |
864 | }; | 863 | }; |
865 | 864 | ||
866 | static struct edma_soc_info edma_cc0_info = { | 865 | static struct edma_soc_info dm365_edma_pdata = { |
867 | .queue_priority_mapping = dm365_queue_priority_mapping, | 866 | .queue_priority_mapping = dm365_queue_priority_mapping, |
868 | .default_queue = EVENTQ_3, | 867 | .default_queue = EVENTQ_3, |
869 | }; | 868 | }; |
870 | 869 | ||
871 | static struct edma_soc_info *dm365_edma_info[EDMA_MAX_CC] = { | ||
872 | &edma_cc0_info, | ||
873 | }; | ||
874 | |||
875 | static struct resource edma_resources[] = { | 870 | static struct resource edma_resources[] = { |
876 | { | 871 | { |
877 | .name = "edma_cc0", | 872 | .name = "edma3_cc", |
878 | .start = 0x01c00000, | 873 | .start = 0x01c00000, |
879 | .end = 0x01c00000 + SZ_64K - 1, | 874 | .end = 0x01c00000 + SZ_64K - 1, |
880 | .flags = IORESOURCE_MEM, | 875 | .flags = IORESOURCE_MEM, |
881 | }, | 876 | }, |
882 | { | 877 | { |
883 | .name = "edma_tc0", | 878 | .name = "edma3_tc0", |
884 | .start = 0x01c10000, | 879 | .start = 0x01c10000, |
885 | .end = 0x01c10000 + SZ_1K - 1, | 880 | .end = 0x01c10000 + SZ_1K - 1, |
886 | .flags = IORESOURCE_MEM, | 881 | .flags = IORESOURCE_MEM, |
887 | }, | 882 | }, |
888 | { | 883 | { |
889 | .name = "edma_tc1", | 884 | .name = "edma3_tc1", |
890 | .start = 0x01c10400, | 885 | .start = 0x01c10400, |
891 | .end = 0x01c10400 + SZ_1K - 1, | 886 | .end = 0x01c10400 + SZ_1K - 1, |
892 | .flags = IORESOURCE_MEM, | 887 | .flags = IORESOURCE_MEM, |
893 | }, | 888 | }, |
894 | { | 889 | { |
895 | .name = "edma_tc2", | 890 | .name = "edma3_tc2", |
896 | .start = 0x01c10800, | 891 | .start = 0x01c10800, |
897 | .end = 0x01c10800 + SZ_1K - 1, | 892 | .end = 0x01c10800 + SZ_1K - 1, |
898 | .flags = IORESOURCE_MEM, | 893 | .flags = IORESOURCE_MEM, |
899 | }, | 894 | }, |
900 | { | 895 | { |
901 | .name = "edma_tc3", | 896 | .name = "edma3_tc3", |
902 | .start = 0x01c10c00, | 897 | .start = 0x01c10c00, |
903 | .end = 0x01c10c00 + SZ_1K - 1, | 898 | .end = 0x01c10c00 + SZ_1K - 1, |
904 | .flags = IORESOURCE_MEM, | 899 | .flags = IORESOURCE_MEM, |
905 | }, | 900 | }, |
906 | { | 901 | { |
907 | .name = "edma0", | 902 | .name = "edma3_ccint", |
908 | .start = IRQ_CCINT0, | 903 | .start = IRQ_CCINT0, |
909 | .flags = IORESOURCE_IRQ, | 904 | .flags = IORESOURCE_IRQ, |
910 | }, | 905 | }, |
911 | { | 906 | { |
912 | .name = "edma0_err", | 907 | .name = "edma3_ccerrint", |
913 | .start = IRQ_CCERRINT, | 908 | .start = IRQ_CCERRINT, |
914 | .flags = IORESOURCE_IRQ, | 909 | .flags = IORESOURCE_IRQ, |
915 | }, | 910 | }, |
@@ -919,7 +914,7 @@ static struct resource edma_resources[] = { | |||
919 | static struct platform_device dm365_edma_device = { | 914 | static struct platform_device dm365_edma_device = { |
920 | .name = "edma", | 915 | .name = "edma", |
921 | .id = 0, | 916 | .id = 0, |
922 | .dev.platform_data = dm365_edma_info, | 917 | .dev.platform_data = &dm365_edma_pdata, |
923 | .num_resources = ARRAY_SIZE(edma_resources), | 918 | .num_resources = ARRAY_SIZE(edma_resources), |
924 | .resource = edma_resources, | 919 | .resource = edma_resources, |
925 | }; | 920 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index dc52657909c4..d38f5049d56e 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
@@ -498,61 +498,58 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
498 | 498 | ||
499 | /*----------------------------------------------------------------------*/ | 499 | /*----------------------------------------------------------------------*/ |
500 | 500 | ||
501 | static s8 | 501 | static s8 queue_priority_mapping[][2] = { |
502 | queue_priority_mapping[][2] = { | ||
503 | /* {event queue no, Priority} */ | 502 | /* {event queue no, Priority} */ |
504 | {0, 3}, | 503 | {0, 3}, |
505 | {1, 7}, | 504 | {1, 7}, |
506 | {-1, -1}, | 505 | {-1, -1}, |
507 | }; | 506 | }; |
508 | 507 | ||
509 | static struct edma_soc_info edma_cc0_info = { | 508 | static struct edma_soc_info dm644x_edma_pdata = { |
510 | .queue_priority_mapping = queue_priority_mapping, | 509 | .queue_priority_mapping = queue_priority_mapping, |
511 | .default_queue = EVENTQ_1, | 510 | .default_queue = EVENTQ_1, |
512 | }; | 511 | }; |
513 | 512 | ||
514 | static struct edma_soc_info *dm644x_edma_info[EDMA_MAX_CC] = { | ||
515 | &edma_cc0_info, | ||
516 | }; | ||
517 | |||
518 | static struct resource edma_resources[] = { | 513 | static struct resource edma_resources[] = { |
519 | { | 514 | { |
520 | .name = "edma_cc0", | 515 | .name = "edma3_cc", |
521 | .start = 0x01c00000, | 516 | .start = 0x01c00000, |
522 | .end = 0x01c00000 + SZ_64K - 1, | 517 | .end = 0x01c00000 + SZ_64K - 1, |
523 | .flags = IORESOURCE_MEM, | 518 | .flags = IORESOURCE_MEM, |
524 | }, | 519 | }, |
525 | { | 520 | { |
526 | .name = "edma_tc0", | 521 | .name = "edma3_tc0", |
527 | .start = 0x01c10000, | 522 | .start = 0x01c10000, |
528 | .end = 0x01c10000 + SZ_1K - 1, | 523 | .end = 0x01c10000 + SZ_1K - 1, |
529 | .flags = IORESOURCE_MEM, | 524 | .flags = IORESOURCE_MEM, |
530 | }, | 525 | }, |
531 | { | 526 | { |
532 | .name = "edma_tc1", | 527 | .name = "edma3_tc1", |
533 | .start = 0x01c10400, | 528 | .start = 0x01c10400, |
534 | .end = 0x01c10400 + SZ_1K - 1, | 529 | .end = 0x01c10400 + SZ_1K - 1, |
535 | .flags = IORESOURCE_MEM, | 530 | .flags = IORESOURCE_MEM, |
536 | }, | 531 | }, |
537 | { | 532 | { |
538 | .name = "edma0", | 533 | .name = "edma3_ccint", |
539 | .start = IRQ_CCINT0, | 534 | .start = IRQ_CCINT0, |
540 | .flags = IORESOURCE_IRQ, | 535 | .flags = IORESOURCE_IRQ, |
541 | }, | 536 | }, |
542 | { | 537 | { |
543 | .name = "edma0_err", | 538 | .name = "edma3_ccerrint", |
544 | .start = IRQ_CCERRINT, | 539 | .start = IRQ_CCERRINT, |
545 | .flags = IORESOURCE_IRQ, | 540 | .flags = IORESOURCE_IRQ, |
546 | }, | 541 | }, |
547 | /* not using TC*_ERR */ | 542 | /* not using TC*_ERR */ |
548 | }; | 543 | }; |
549 | 544 | ||
550 | static struct platform_device dm644x_edma_device = { | 545 | static const struct platform_device_info dm644x_edma_device __initconst = { |
551 | .name = "edma", | 546 | .name = "edma", |
552 | .id = 0, | 547 | .id = 0, |
553 | .dev.platform_data = dm644x_edma_info, | 548 | .dma_mask = DMA_BIT_MASK(32), |
554 | .num_resources = ARRAY_SIZE(edma_resources), | 549 | .res = edma_resources, |
555 | .resource = edma_resources, | 550 | .num_res = ARRAY_SIZE(edma_resources), |
551 | .data = &dm644x_edma_pdata, | ||
552 | .size_data = sizeof(dm644x_edma_pdata), | ||
556 | }; | 553 | }; |
557 | 554 | ||
558 | /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ | 555 | /* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */ |
@@ -950,12 +947,17 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg, | |||
950 | 947 | ||
951 | static int __init dm644x_init_devices(void) | 948 | static int __init dm644x_init_devices(void) |
952 | { | 949 | { |
950 | struct platform_device *edma_pdev; | ||
953 | int ret = 0; | 951 | int ret = 0; |
954 | 952 | ||
955 | if (!cpu_is_davinci_dm644x()) | 953 | if (!cpu_is_davinci_dm644x()) |
956 | return 0; | 954 | return 0; |
957 | 955 | ||
958 | platform_device_register(&dm644x_edma_device); | 956 | edma_pdev = platform_device_register_full(&dm644x_edma_device); |
957 | if (IS_ERR(edma_pdev)) { | ||
958 | pr_warn("%s: Failed to register eDMA\n", __func__); | ||
959 | return PTR_ERR(edma_pdev); | ||
960 | } | ||
959 | 961 | ||
960 | platform_device_register(&dm644x_mdio_device); | 962 | platform_device_register(&dm644x_mdio_device); |
961 | platform_device_register(&dm644x_emac_device); | 963 | platform_device_register(&dm644x_emac_device); |
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 3f842bb266d6..70eb42725eec 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
@@ -531,8 +531,7 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = { | |||
531 | /*----------------------------------------------------------------------*/ | 531 | /*----------------------------------------------------------------------*/ |
532 | 532 | ||
533 | /* Four Transfer Controllers on DM646x */ | 533 | /* Four Transfer Controllers on DM646x */ |
534 | static s8 | 534 | static s8 dm646x_queue_priority_mapping[][2] = { |
535 | dm646x_queue_priority_mapping[][2] = { | ||
536 | /* {event queue no, Priority} */ | 535 | /* {event queue no, Priority} */ |
537 | {0, 4}, | 536 | {0, 4}, |
538 | {1, 0}, | 537 | {1, 0}, |
@@ -541,65 +540,63 @@ dm646x_queue_priority_mapping[][2] = { | |||
541 | {-1, -1}, | 540 | {-1, -1}, |
542 | }; | 541 | }; |
543 | 542 | ||
544 | static struct edma_soc_info edma_cc0_info = { | 543 | static struct edma_soc_info dm646x_edma_pdata = { |
545 | .queue_priority_mapping = dm646x_queue_priority_mapping, | 544 | .queue_priority_mapping = dm646x_queue_priority_mapping, |
546 | .default_queue = EVENTQ_1, | 545 | .default_queue = EVENTQ_1, |
547 | }; | 546 | }; |
548 | 547 | ||
549 | static struct edma_soc_info *dm646x_edma_info[EDMA_MAX_CC] = { | ||
550 | &edma_cc0_info, | ||
551 | }; | ||
552 | |||
553 | static struct resource edma_resources[] = { | 548 | static struct resource edma_resources[] = { |
554 | { | 549 | { |
555 | .name = "edma_cc0", | 550 | .name = "edma3_cc", |
556 | .start = 0x01c00000, | 551 | .start = 0x01c00000, |
557 | .end = 0x01c00000 + SZ_64K - 1, | 552 | .end = 0x01c00000 + SZ_64K - 1, |
558 | .flags = IORESOURCE_MEM, | 553 | .flags = IORESOURCE_MEM, |
559 | }, | 554 | }, |
560 | { | 555 | { |
561 | .name = "edma_tc0", | 556 | .name = "edma3_tc0", |
562 | .start = 0x01c10000, | 557 | .start = 0x01c10000, |
563 | .end = 0x01c10000 + SZ_1K - 1, | 558 | .end = 0x01c10000 + SZ_1K - 1, |
564 | .flags = IORESOURCE_MEM, | 559 | .flags = IORESOURCE_MEM, |
565 | }, | 560 | }, |
566 | { | 561 | { |
567 | .name = "edma_tc1", | 562 | .name = "edma3_tc1", |
568 | .start = 0x01c10400, | 563 | .start = 0x01c10400, |
569 | .end = 0x01c10400 + SZ_1K - 1, | 564 | .end = 0x01c10400 + SZ_1K - 1, |
570 | .flags = IORESOURCE_MEM, | 565 | .flags = IORESOURCE_MEM, |
571 | }, | 566 | }, |
572 | { | 567 | { |
573 | .name = "edma_tc2", | 568 | .name = "edma3_tc2", |
574 | .start = 0x01c10800, | 569 | .start = 0x01c10800, |
575 | .end = 0x01c10800 + SZ_1K - 1, | 570 | .end = 0x01c10800 + SZ_1K - 1, |
576 | .flags = IORESOURCE_MEM, | 571 | .flags = IORESOURCE_MEM, |
577 | }, | 572 | }, |
578 | { | 573 | { |
579 | .name = "edma_tc3", | 574 | .name = "edma3_tc3", |
580 | .start = 0x01c10c00, | 575 | .start = 0x01c10c00, |
581 | .end = 0x01c10c00 + SZ_1K - 1, | 576 | .end = 0x01c10c00 + SZ_1K - 1, |
582 | .flags = IORESOURCE_MEM, | 577 | .flags = IORESOURCE_MEM, |
583 | }, | 578 | }, |
584 | { | 579 | { |
585 | .name = "edma0", | 580 | .name = "edma3_ccint", |
586 | .start = IRQ_CCINT0, | 581 | .start = IRQ_CCINT0, |
587 | .flags = IORESOURCE_IRQ, | 582 | .flags = IORESOURCE_IRQ, |
588 | }, | 583 | }, |
589 | { | 584 | { |
590 | .name = "edma0_err", | 585 | .name = "edma3_ccerrint", |
591 | .start = IRQ_CCERRINT, | 586 | .start = IRQ_CCERRINT, |
592 | .flags = IORESOURCE_IRQ, | 587 | .flags = IORESOURCE_IRQ, |
593 | }, | 588 | }, |
594 | /* not using TC*_ERR */ | 589 | /* not using TC*_ERR */ |
595 | }; | 590 | }; |
596 | 591 | ||
597 | static struct platform_device dm646x_edma_device = { | 592 | static const struct platform_device_info dm646x_edma_device __initconst = { |
598 | .name = "edma", | 593 | .name = "edma", |
599 | .id = 0, | 594 | .id = 0, |
600 | .dev.platform_data = dm646x_edma_info, | 595 | .dma_mask = DMA_BIT_MASK(32), |
601 | .num_resources = ARRAY_SIZE(edma_resources), | 596 | .res = edma_resources, |
602 | .resource = edma_resources, | 597 | .num_res = ARRAY_SIZE(edma_resources), |
598 | .data = &dm646x_edma_pdata, | ||
599 | .size_data = sizeof(dm646x_edma_pdata), | ||
603 | }; | 600 | }; |
604 | 601 | ||
605 | static struct resource dm646x_mcasp0_resources[] = { | 602 | static struct resource dm646x_mcasp0_resources[] = { |
@@ -936,9 +933,12 @@ void dm646x_setup_vpif(struct vpif_display_config *display_config, | |||
936 | 933 | ||
937 | int __init dm646x_init_edma(struct edma_rsv_info *rsv) | 934 | int __init dm646x_init_edma(struct edma_rsv_info *rsv) |
938 | { | 935 | { |
939 | edma_cc0_info.rsv = rsv; | 936 | struct platform_device *edma_pdev; |
937 | |||
938 | dm646x_edma_pdata.rsv = rsv; | ||
940 | 939 | ||
941 | return platform_device_register(&dm646x_edma_device); | 940 | edma_pdev = platform_device_register_full(&dm646x_edma_device); |
941 | return IS_ERR(edma_pdev) ? PTR_ERR(edma_pdev) : 0; | ||
942 | } | 942 | } |
943 | 943 | ||
944 | void __init dm646x_init(void) | 944 | void __init dm646x_init(void) |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 07d2e100caab..e0b6736db984 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -90,7 +90,6 @@ config ARCH_OMAP2PLUS | |||
90 | select OMAP_GPMC | 90 | select OMAP_GPMC |
91 | select PINCTRL | 91 | select PINCTRL |
92 | select SOC_BUS | 92 | select SOC_BUS |
93 | select TI_PRIV_EDMA | ||
94 | select OMAP_IRQCHIP | 93 | select OMAP_IRQCHIP |
95 | help | 94 | help |
96 | Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 | 95 | Systems based on OMAP2, OMAP3, OMAP4 or OMAP5 |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b4584757dae0..6a388a7c6429 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -486,7 +486,7 @@ config TI_EDMA | |||
486 | depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE | 486 | depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE |
487 | select DMA_ENGINE | 487 | select DMA_ENGINE |
488 | select DMA_VIRTUAL_CHANNELS | 488 | select DMA_VIRTUAL_CHANNELS |
489 | select TI_PRIV_EDMA | 489 | select TI_DMA_CROSSBAR if ARCH_OMAP |
490 | default n | 490 | default n |
491 | help | 491 | help |
492 | Enable support for the TI EDMA controller. This DMA | 492 | Enable support for the TI EDMA controller. This DMA |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 558b0b4e7536..31722d436a42 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -25,28 +25,93 @@ | |||
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/of_dma.h> | ||
29 | #include <linux/of_irq.h> | ||
30 | #include <linux/of_address.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/pm_runtime.h> | ||
28 | 33 | ||
29 | #include <linux/platform_data/edma.h> | 34 | #include <linux/platform_data/edma.h> |
30 | 35 | ||
31 | #include "dmaengine.h" | 36 | #include "dmaengine.h" |
32 | #include "virt-dma.h" | 37 | #include "virt-dma.h" |
33 | 38 | ||
34 | /* | 39 | /* Offsets matching "struct edmacc_param" */ |
35 | * This will go away when the private EDMA API is folded | 40 | #define PARM_OPT 0x00 |
36 | * into this driver and the platform device(s) are | 41 | #define PARM_SRC 0x04 |
37 | * instantiated in the arch code. We can only get away | 42 | #define PARM_A_B_CNT 0x08 |
38 | * with this simplification because DA8XX may not be built | 43 | #define PARM_DST 0x0c |
39 | * in the same kernel image with other DaVinci parts. This | 44 | #define PARM_SRC_DST_BIDX 0x10 |
40 | * avoids having to sprinkle dmaengine driver platform devices | 45 | #define PARM_LINK_BCNTRLD 0x14 |
41 | * and data throughout all the existing board files. | 46 | #define PARM_SRC_DST_CIDX 0x18 |
42 | */ | 47 | #define PARM_CCNT 0x1c |
43 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | 48 | |
44 | #define EDMA_CTLRS 2 | 49 | #define PARM_SIZE 0x20 |
45 | #define EDMA_CHANS 32 | 50 | |
46 | #else | 51 | /* Offsets for EDMA CC global channel registers and their shadows */ |
47 | #define EDMA_CTLRS 1 | 52 | #define SH_ER 0x00 /* 64 bits */ |
48 | #define EDMA_CHANS 64 | 53 | #define SH_ECR 0x08 /* 64 bits */ |
49 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | 54 | #define SH_ESR 0x10 /* 64 bits */ |
55 | #define SH_CER 0x18 /* 64 bits */ | ||
56 | #define SH_EER 0x20 /* 64 bits */ | ||
57 | #define SH_EECR 0x28 /* 64 bits */ | ||
58 | #define SH_EESR 0x30 /* 64 bits */ | ||
59 | #define SH_SER 0x38 /* 64 bits */ | ||
60 | #define SH_SECR 0x40 /* 64 bits */ | ||
61 | #define SH_IER 0x50 /* 64 bits */ | ||
62 | #define SH_IECR 0x58 /* 64 bits */ | ||
63 | #define SH_IESR 0x60 /* 64 bits */ | ||
64 | #define SH_IPR 0x68 /* 64 bits */ | ||
65 | #define SH_ICR 0x70 /* 64 bits */ | ||
66 | #define SH_IEVAL 0x78 | ||
67 | #define SH_QER 0x80 | ||
68 | #define SH_QEER 0x84 | ||
69 | #define SH_QEECR 0x88 | ||
70 | #define SH_QEESR 0x8c | ||
71 | #define SH_QSER 0x90 | ||
72 | #define SH_QSECR 0x94 | ||
73 | #define SH_SIZE 0x200 | ||
74 | |||
75 | /* Offsets for EDMA CC global registers */ | ||
76 | #define EDMA_REV 0x0000 | ||
77 | #define EDMA_CCCFG 0x0004 | ||
78 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ | ||
79 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ | ||
80 | #define EDMA_QDMAQNUM 0x0260 | ||
81 | #define EDMA_QUETCMAP 0x0280 | ||
82 | #define EDMA_QUEPRI 0x0284 | ||
83 | #define EDMA_EMR 0x0300 /* 64 bits */ | ||
84 | #define EDMA_EMCR 0x0308 /* 64 bits */ | ||
85 | #define EDMA_QEMR 0x0310 | ||
86 | #define EDMA_QEMCR 0x0314 | ||
87 | #define EDMA_CCERR 0x0318 | ||
88 | #define EDMA_CCERRCLR 0x031c | ||
89 | #define EDMA_EEVAL 0x0320 | ||
90 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ | ||
91 | #define EDMA_QRAE 0x0380 /* 4 registers */ | ||
92 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ | ||
93 | #define EDMA_QSTAT 0x0600 /* 2 registers */ | ||
94 | #define EDMA_QWMTHRA 0x0620 | ||
95 | #define EDMA_QWMTHRB 0x0624 | ||
96 | #define EDMA_CCSTAT 0x0640 | ||
97 | |||
98 | #define EDMA_M 0x1000 /* global channel registers */ | ||
99 | #define EDMA_ECR 0x1008 | ||
100 | #define EDMA_ECRH 0x100C | ||
101 | #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ | ||
102 | #define EDMA_PARM 0x4000 /* PaRAM entries */ | ||
103 | |||
104 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | ||
105 | |||
106 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | ||
107 | |||
108 | /* CCCFG register */ | ||
109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | ||
110 | #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ | ||
111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | ||
112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | ||
113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | ||
114 | #define CHMAP_EXIST BIT(24) | ||
50 | 115 | ||
51 | /* | 116 | /* |
52 | * Max of 20 segments per channel to conserve PaRAM slots | 117 | * Max of 20 segments per channel to conserve PaRAM slots |
@@ -59,6 +124,37 @@ | |||
59 | #define EDMA_MAX_SLOTS MAX_NR_SG | 124 | #define EDMA_MAX_SLOTS MAX_NR_SG |
60 | #define EDMA_DESCRIPTORS 16 | 125 | #define EDMA_DESCRIPTORS 16 |
61 | 126 | ||
127 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ | ||
128 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | ||
129 | #define EDMA_CONT_PARAMS_ANY 1001 | ||
130 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 | ||
131 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 | ||
132 | |||
133 | /* PaRAM slots are laid out like this */ | ||
134 | struct edmacc_param { | ||
135 | u32 opt; | ||
136 | u32 src; | ||
137 | u32 a_b_cnt; | ||
138 | u32 dst; | ||
139 | u32 src_dst_bidx; | ||
140 | u32 link_bcntrld; | ||
141 | u32 src_dst_cidx; | ||
142 | u32 ccnt; | ||
143 | } __packed; | ||
144 | |||
145 | /* fields in edmacc_param.opt */ | ||
146 | #define SAM BIT(0) | ||
147 | #define DAM BIT(1) | ||
148 | #define SYNCDIM BIT(2) | ||
149 | #define STATIC BIT(3) | ||
150 | #define EDMA_FWID (0x07 << 8) | ||
151 | #define TCCMODE BIT(11) | ||
152 | #define EDMA_TCC(t) ((t) << 12) | ||
153 | #define TCINTEN BIT(20) | ||
154 | #define ITCINTEN BIT(21) | ||
155 | #define TCCHEN BIT(22) | ||
156 | #define ITCCHEN BIT(23) | ||
157 | |||
62 | struct edma_pset { | 158 | struct edma_pset { |
63 | u32 len; | 159 | u32 len; |
64 | dma_addr_t addr; | 160 | dma_addr_t addr; |
@@ -105,26 +201,519 @@ struct edma_desc { | |||
105 | 201 | ||
106 | struct edma_cc; | 202 | struct edma_cc; |
107 | 203 | ||
204 | struct edma_tc { | ||
205 | struct device_node *node; | ||
206 | u16 id; | ||
207 | }; | ||
208 | |||
108 | struct edma_chan { | 209 | struct edma_chan { |
109 | struct virt_dma_chan vchan; | 210 | struct virt_dma_chan vchan; |
110 | struct list_head node; | 211 | struct list_head node; |
111 | struct edma_desc *edesc; | 212 | struct edma_desc *edesc; |
112 | struct edma_cc *ecc; | 213 | struct edma_cc *ecc; |
214 | struct edma_tc *tc; | ||
113 | int ch_num; | 215 | int ch_num; |
114 | bool alloced; | 216 | bool alloced; |
217 | bool hw_triggered; | ||
115 | int slot[EDMA_MAX_SLOTS]; | 218 | int slot[EDMA_MAX_SLOTS]; |
116 | int missed; | 219 | int missed; |
117 | struct dma_slave_config cfg; | 220 | struct dma_slave_config cfg; |
118 | }; | 221 | }; |
119 | 222 | ||
120 | struct edma_cc { | 223 | struct edma_cc { |
121 | int ctlr; | 224 | struct device *dev; |
225 | struct edma_soc_info *info; | ||
226 | void __iomem *base; | ||
227 | int id; | ||
228 | bool legacy_mode; | ||
229 | |||
230 | /* eDMA3 resource information */ | ||
231 | unsigned num_channels; | ||
232 | unsigned num_qchannels; | ||
233 | unsigned num_region; | ||
234 | unsigned num_slots; | ||
235 | unsigned num_tc; | ||
236 | bool chmap_exist; | ||
237 | enum dma_event_q default_queue; | ||
238 | |||
239 | /* | ||
240 | * The slot_inuse bit for each PaRAM slot is clear unless the slot is | ||
241 | * in use by Linux or if it is allocated to be used by DSP. | ||
242 | */ | ||
243 | unsigned long *slot_inuse; | ||
244 | |||
122 | struct dma_device dma_slave; | 245 | struct dma_device dma_slave; |
123 | struct edma_chan slave_chans[EDMA_CHANS]; | 246 | struct dma_device *dma_memcpy; |
124 | int num_slave_chans; | 247 | struct edma_chan *slave_chans; |
248 | struct edma_tc *tc_list; | ||
125 | int dummy_slot; | 249 | int dummy_slot; |
126 | }; | 250 | }; |
127 | 251 | ||
252 | /* dummy param set used to (re)initialize parameter RAM slots */ | ||
253 | static const struct edmacc_param dummy_paramset = { | ||
254 | .link_bcntrld = 0xffff, | ||
255 | .ccnt = 1, | ||
256 | }; | ||
257 | |||
258 | #define EDMA_BINDING_LEGACY 0 | ||
259 | #define EDMA_BINDING_TPCC 1 | ||
260 | static const struct of_device_id edma_of_ids[] = { | ||
261 | { | ||
262 | .compatible = "ti,edma3", | ||
263 | .data = (void *)EDMA_BINDING_LEGACY, | ||
264 | }, | ||
265 | { | ||
266 | .compatible = "ti,edma3-tpcc", | ||
267 | .data = (void *)EDMA_BINDING_TPCC, | ||
268 | }, | ||
269 | {} | ||
270 | }; | ||
271 | |||
272 | static inline unsigned int edma_read(struct edma_cc *ecc, int offset) | ||
273 | { | ||
274 | return (unsigned int)__raw_readl(ecc->base + offset); | ||
275 | } | ||
276 | |||
277 | static inline void edma_write(struct edma_cc *ecc, int offset, int val) | ||
278 | { | ||
279 | __raw_writel(val, ecc->base + offset); | ||
280 | } | ||
281 | |||
282 | static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, | ||
283 | unsigned or) | ||
284 | { | ||
285 | unsigned val = edma_read(ecc, offset); | ||
286 | |||
287 | val &= and; | ||
288 | val |= or; | ||
289 | edma_write(ecc, offset, val); | ||
290 | } | ||
291 | |||
292 | static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and) | ||
293 | { | ||
294 | unsigned val = edma_read(ecc, offset); | ||
295 | |||
296 | val &= and; | ||
297 | edma_write(ecc, offset, val); | ||
298 | } | ||
299 | |||
300 | static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) | ||
301 | { | ||
302 | unsigned val = edma_read(ecc, offset); | ||
303 | |||
304 | val |= or; | ||
305 | edma_write(ecc, offset, val); | ||
306 | } | ||
307 | |||
308 | static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, | ||
309 | int i) | ||
310 | { | ||
311 | return edma_read(ecc, offset + (i << 2)); | ||
312 | } | ||
313 | |||
314 | static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, | ||
315 | unsigned val) | ||
316 | { | ||
317 | edma_write(ecc, offset + (i << 2), val); | ||
318 | } | ||
319 | |||
320 | static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, | ||
321 | unsigned and, unsigned or) | ||
322 | { | ||
323 | edma_modify(ecc, offset + (i << 2), and, or); | ||
324 | } | ||
325 | |||
326 | static inline void edma_or_array(struct edma_cc *ecc, int offset, int i, | ||
327 | unsigned or) | ||
328 | { | ||
329 | edma_or(ecc, offset + (i << 2), or); | ||
330 | } | ||
331 | |||
332 | static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, | ||
333 | unsigned or) | ||
334 | { | ||
335 | edma_or(ecc, offset + ((i * 2 + j) << 2), or); | ||
336 | } | ||
337 | |||
338 | static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, | ||
339 | int j, unsigned val) | ||
340 | { | ||
341 | edma_write(ecc, offset + ((i * 2 + j) << 2), val); | ||
342 | } | ||
343 | |||
344 | static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset) | ||
345 | { | ||
346 | return edma_read(ecc, EDMA_SHADOW0 + offset); | ||
347 | } | ||
348 | |||
349 | static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, | ||
350 | int offset, int i) | ||
351 | { | ||
352 | return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); | ||
353 | } | ||
354 | |||
355 | static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, | ||
356 | unsigned val) | ||
357 | { | ||
358 | edma_write(ecc, EDMA_SHADOW0 + offset, val); | ||
359 | } | ||
360 | |||
361 | static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, | ||
362 | int i, unsigned val) | ||
363 | { | ||
364 | edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); | ||
365 | } | ||
366 | |||
367 | static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset, | ||
368 | int param_no) | ||
369 | { | ||
370 | return edma_read(ecc, EDMA_PARM + offset + (param_no << 5)); | ||
371 | } | ||
372 | |||
373 | static inline void edma_param_write(struct edma_cc *ecc, int offset, | ||
374 | int param_no, unsigned val) | ||
375 | { | ||
376 | edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val); | ||
377 | } | ||
378 | |||
379 | static inline void edma_param_modify(struct edma_cc *ecc, int offset, | ||
380 | int param_no, unsigned and, unsigned or) | ||
381 | { | ||
382 | edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); | ||
383 | } | ||
384 | |||
385 | static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no, | ||
386 | unsigned and) | ||
387 | { | ||
388 | edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and); | ||
389 | } | ||
390 | |||
391 | static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, | ||
392 | unsigned or) | ||
393 | { | ||
394 | edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); | ||
395 | } | ||
396 | |||
397 | static inline void set_bits(int offset, int len, unsigned long *p) | ||
398 | { | ||
399 | for (; len > 0; len--) | ||
400 | set_bit(offset + (len - 1), p); | ||
401 | } | ||
402 | |||
403 | static inline void clear_bits(int offset, int len, unsigned long *p) | ||
404 | { | ||
405 | for (; len > 0; len--) | ||
406 | clear_bit(offset + (len - 1), p); | ||
407 | } | ||
408 | |||
409 | static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, | ||
410 | int priority) | ||
411 | { | ||
412 | int bit = queue_no * 4; | ||
413 | |||
414 | edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); | ||
415 | } | ||
416 | |||
417 | static void edma_set_chmap(struct edma_chan *echan, int slot) | ||
418 | { | ||
419 | struct edma_cc *ecc = echan->ecc; | ||
420 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
421 | |||
422 | if (ecc->chmap_exist) { | ||
423 | slot = EDMA_CHAN_SLOT(slot); | ||
424 | edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); | ||
425 | } | ||
426 | } | ||
427 | |||
428 | static void edma_setup_interrupt(struct edma_chan *echan, bool enable) | ||
429 | { | ||
430 | struct edma_cc *ecc = echan->ecc; | ||
431 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
432 | |||
433 | if (enable) { | ||
434 | edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, | ||
435 | BIT(channel & 0x1f)); | ||
436 | edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, | ||
437 | BIT(channel & 0x1f)); | ||
438 | } else { | ||
439 | edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, | ||
440 | BIT(channel & 0x1f)); | ||
441 | } | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * paRAM slot management functions | ||
446 | */ | ||
447 | static void edma_write_slot(struct edma_cc *ecc, unsigned slot, | ||
448 | const struct edmacc_param *param) | ||
449 | { | ||
450 | slot = EDMA_CHAN_SLOT(slot); | ||
451 | if (slot >= ecc->num_slots) | ||
452 | return; | ||
453 | memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); | ||
454 | } | ||
455 | |||
456 | static void edma_read_slot(struct edma_cc *ecc, unsigned slot, | ||
457 | struct edmacc_param *param) | ||
458 | { | ||
459 | slot = EDMA_CHAN_SLOT(slot); | ||
460 | if (slot >= ecc->num_slots) | ||
461 | return; | ||
462 | memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); | ||
463 | } | ||
464 | |||
465 | /** | ||
466 | * edma_alloc_slot - allocate DMA parameter RAM | ||
467 | * @ecc: pointer to edma_cc struct | ||
468 | * @slot: specific slot to allocate; negative for "any unused slot" | ||
469 | * | ||
470 | * This allocates a parameter RAM slot, initializing it to hold a | ||
471 | * dummy transfer. Slots allocated using this routine have not been | ||
472 | * mapped to a hardware DMA channel, and will normally be used by | ||
473 | * linking to them from a slot associated with a DMA channel. | ||
474 | * | ||
475 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific | ||
476 | * slots may be allocated on behalf of DSP firmware. | ||
477 | * | ||
478 | * Returns the number of the slot, else negative errno. | ||
479 | */ | ||
480 | static int edma_alloc_slot(struct edma_cc *ecc, int slot) | ||
481 | { | ||
482 | if (slot > 0) { | ||
483 | slot = EDMA_CHAN_SLOT(slot); | ||
484 | /* Requesting entry paRAM slot for a HW triggered channel. */ | ||
485 | if (ecc->chmap_exist && slot < ecc->num_channels) | ||
486 | slot = EDMA_SLOT_ANY; | ||
487 | } | ||
488 | |||
489 | if (slot < 0) { | ||
490 | if (ecc->chmap_exist) | ||
491 | slot = 0; | ||
492 | else | ||
493 | slot = ecc->num_channels; | ||
494 | for (;;) { | ||
495 | slot = find_next_zero_bit(ecc->slot_inuse, | ||
496 | ecc->num_slots, | ||
497 | slot); | ||
498 | if (slot == ecc->num_slots) | ||
499 | return -ENOMEM; | ||
500 | if (!test_and_set_bit(slot, ecc->slot_inuse)) | ||
501 | break; | ||
502 | } | ||
503 | } else if (slot >= ecc->num_slots) { | ||
504 | return -EINVAL; | ||
505 | } else if (test_and_set_bit(slot, ecc->slot_inuse)) { | ||
506 | return -EBUSY; | ||
507 | } | ||
508 | |||
509 | edma_write_slot(ecc, slot, &dummy_paramset); | ||
510 | |||
511 | return EDMA_CTLR_CHAN(ecc->id, slot); | ||
512 | } | ||
513 | |||
514 | static void edma_free_slot(struct edma_cc *ecc, unsigned slot) | ||
515 | { | ||
516 | slot = EDMA_CHAN_SLOT(slot); | ||
517 | if (slot >= ecc->num_slots) | ||
518 | return; | ||
519 | |||
520 | edma_write_slot(ecc, slot, &dummy_paramset); | ||
521 | clear_bit(slot, ecc->slot_inuse); | ||
522 | } | ||
523 | |||
524 | /** | ||
525 | * edma_link - link one parameter RAM slot to another | ||
526 | * @ecc: pointer to edma_cc struct | ||
527 | * @from: parameter RAM slot originating the link | ||
528 | * @to: parameter RAM slot which is the link target | ||
529 | * | ||
530 | * The originating slot should not be part of any active DMA transfer. | ||
531 | */ | ||
532 | static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) | ||
533 | { | ||
534 | if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) | ||
535 | dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); | ||
536 | |||
537 | from = EDMA_CHAN_SLOT(from); | ||
538 | to = EDMA_CHAN_SLOT(to); | ||
539 | if (from >= ecc->num_slots || to >= ecc->num_slots) | ||
540 | return; | ||
541 | |||
542 | edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, | ||
543 | PARM_OFFSET(to)); | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * edma_get_position - returns the current transfer point | ||
548 | * @ecc: pointer to edma_cc struct | ||
549 | * @slot: parameter RAM slot being examined | ||
550 | * @dst: true selects the dest position, false the source | ||
551 | * | ||
552 | * Returns the position of the current active slot | ||
553 | */ | ||
554 | static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, | ||
555 | bool dst) | ||
556 | { | ||
557 | u32 offs; | ||
558 | |||
559 | slot = EDMA_CHAN_SLOT(slot); | ||
560 | offs = PARM_OFFSET(slot); | ||
561 | offs += dst ? PARM_DST : PARM_SRC; | ||
562 | |||
563 | return edma_read(ecc, offs); | ||
564 | } | ||
565 | |||
566 | /* | ||
567 | * Channels with event associations will be triggered by their hardware | ||
568 | * events, and channels without such associations will be triggered by | ||
569 | * software. (At this writing there is no interface for using software | ||
570 | * triggers except with channels that don't support hardware triggers.) | ||
571 | */ | ||
572 | static void edma_start(struct edma_chan *echan) | ||
573 | { | ||
574 | struct edma_cc *ecc = echan->ecc; | ||
575 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
576 | int j = (channel >> 5); | ||
577 | unsigned int mask = BIT(channel & 0x1f); | ||
578 | |||
579 | if (!echan->hw_triggered) { | ||
580 | /* EDMA channels without event association */ | ||
581 | dev_dbg(ecc->dev, "ESR%d %08x\n", j, | ||
582 | edma_shadow0_read_array(ecc, SH_ESR, j)); | ||
583 | edma_shadow0_write_array(ecc, SH_ESR, j, mask); | ||
584 | } else { | ||
585 | /* EDMA channel with event association */ | ||
586 | dev_dbg(ecc->dev, "ER%d %08x\n", j, | ||
587 | edma_shadow0_read_array(ecc, SH_ER, j)); | ||
588 | /* Clear any pending event or error */ | ||
589 | edma_write_array(ecc, EDMA_ECR, j, mask); | ||
590 | edma_write_array(ecc, EDMA_EMCR, j, mask); | ||
591 | /* Clear any SER */ | ||
592 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | ||
593 | edma_shadow0_write_array(ecc, SH_EESR, j, mask); | ||
594 | dev_dbg(ecc->dev, "EER%d %08x\n", j, | ||
595 | edma_shadow0_read_array(ecc, SH_EER, j)); | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void edma_stop(struct edma_chan *echan) | ||
600 | { | ||
601 | struct edma_cc *ecc = echan->ecc; | ||
602 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
603 | int j = (channel >> 5); | ||
604 | unsigned int mask = BIT(channel & 0x1f); | ||
605 | |||
606 | edma_shadow0_write_array(ecc, SH_EECR, j, mask); | ||
607 | edma_shadow0_write_array(ecc, SH_ECR, j, mask); | ||
608 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | ||
609 | edma_write_array(ecc, EDMA_EMCR, j, mask); | ||
610 | |||
611 | /* clear possibly pending completion interrupt */ | ||
612 | edma_shadow0_write_array(ecc, SH_ICR, j, mask); | ||
613 | |||
614 | dev_dbg(ecc->dev, "EER%d %08x\n", j, | ||
615 | edma_shadow0_read_array(ecc, SH_EER, j)); | ||
616 | |||
617 | /* REVISIT: consider guarding against inappropriate event | ||
618 | * chaining by overwriting with dummy_paramset. | ||
619 | */ | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * Temporarily disable EDMA hardware events on the specified channel, | ||
624 | * preventing them from triggering new transfers | ||
625 | */ | ||
626 | static void edma_pause(struct edma_chan *echan) | ||
627 | { | ||
628 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
629 | unsigned int mask = BIT(channel & 0x1f); | ||
630 | |||
631 | edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); | ||
632 | } | ||
633 | |||
634 | /* Re-enable EDMA hardware events on the specified channel. */ | ||
635 | static void edma_resume(struct edma_chan *echan) | ||
636 | { | ||
637 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
638 | unsigned int mask = BIT(channel & 0x1f); | ||
639 | |||
640 | edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); | ||
641 | } | ||
642 | |||
643 | static void edma_trigger_channel(struct edma_chan *echan) | ||
644 | { | ||
645 | struct edma_cc *ecc = echan->ecc; | ||
646 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
647 | unsigned int mask = BIT(channel & 0x1f); | ||
648 | |||
649 | edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); | ||
650 | |||
651 | dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), | ||
652 | edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); | ||
653 | } | ||
654 | |||
655 | static void edma_clean_channel(struct edma_chan *echan) | ||
656 | { | ||
657 | struct edma_cc *ecc = echan->ecc; | ||
658 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
659 | int j = (channel >> 5); | ||
660 | unsigned int mask = BIT(channel & 0x1f); | ||
661 | |||
662 | dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); | ||
663 | edma_shadow0_write_array(ecc, SH_ECR, j, mask); | ||
664 | /* Clear the corresponding EMR bits */ | ||
665 | edma_write_array(ecc, EDMA_EMCR, j, mask); | ||
666 | /* Clear any SER */ | ||
667 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | ||
668 | edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); | ||
669 | } | ||
670 | |||
671 | /* Move channel to a specific event queue */ | ||
672 | static void edma_assign_channel_eventq(struct edma_chan *echan, | ||
673 | enum dma_event_q eventq_no) | ||
674 | { | ||
675 | struct edma_cc *ecc = echan->ecc; | ||
676 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
677 | int bit = (channel & 0x7) * 4; | ||
678 | |||
679 | /* default to low priority queue */ | ||
680 | if (eventq_no == EVENTQ_DEFAULT) | ||
681 | eventq_no = ecc->default_queue; | ||
682 | if (eventq_no >= ecc->num_tc) | ||
683 | return; | ||
684 | |||
685 | eventq_no &= 7; | ||
686 | edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), | ||
687 | eventq_no << bit); | ||
688 | } | ||
689 | |||
690 | static int edma_alloc_channel(struct edma_chan *echan, | ||
691 | enum dma_event_q eventq_no) | ||
692 | { | ||
693 | struct edma_cc *ecc = echan->ecc; | ||
694 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | ||
695 | |||
696 | /* ensure access through shadow region 0 */ | ||
697 | edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); | ||
698 | |||
699 | /* ensure no events are pending */ | ||
700 | edma_stop(echan); | ||
701 | |||
702 | edma_setup_interrupt(echan, true); | ||
703 | |||
704 | edma_assign_channel_eventq(echan, eventq_no); | ||
705 | |||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static void edma_free_channel(struct edma_chan *echan) | ||
710 | { | ||
711 | /* ensure no events are pending */ | ||
712 | edma_stop(echan); | ||
713 | /* REVISIT should probably take out of shadow region 0 */ | ||
714 | edma_setup_interrupt(echan, false); | ||
715 | } | ||
716 | |||
128 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | 717 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) |
129 | { | 718 | { |
130 | return container_of(d, struct edma_cc, dma_slave); | 719 | return container_of(d, struct edma_cc, dma_slave); |
@@ -135,8 +724,7 @@ static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | |||
135 | return container_of(c, struct edma_chan, vchan.chan); | 724 | return container_of(c, struct edma_chan, vchan.chan); |
136 | } | 725 | } |
137 | 726 | ||
138 | static inline struct edma_desc | 727 | static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) |
139 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | ||
140 | { | 728 | { |
141 | return container_of(tx, struct edma_desc, vdesc.tx); | 729 | return container_of(tx, struct edma_desc, vdesc.tx); |
142 | } | 730 | } |
@@ -149,20 +737,17 @@ static void edma_desc_free(struct virt_dma_desc *vdesc) | |||
149 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | 737 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ |
150 | static void edma_execute(struct edma_chan *echan) | 738 | static void edma_execute(struct edma_chan *echan) |
151 | { | 739 | { |
740 | struct edma_cc *ecc = echan->ecc; | ||
152 | struct virt_dma_desc *vdesc; | 741 | struct virt_dma_desc *vdesc; |
153 | struct edma_desc *edesc; | 742 | struct edma_desc *edesc; |
154 | struct device *dev = echan->vchan.chan.device->dev; | 743 | struct device *dev = echan->vchan.chan.device->dev; |
155 | int i, j, left, nslots; | 744 | int i, j, left, nslots; |
156 | 745 | ||
157 | /* If either we processed all psets or we're still not started */ | 746 | if (!echan->edesc) { |
158 | if (!echan->edesc || | 747 | /* Setup is needed for the first transfer */ |
159 | echan->edesc->pset_nr == echan->edesc->processed) { | ||
160 | /* Get next vdesc */ | ||
161 | vdesc = vchan_next_desc(&echan->vchan); | 748 | vdesc = vchan_next_desc(&echan->vchan); |
162 | if (!vdesc) { | 749 | if (!vdesc) |
163 | echan->edesc = NULL; | ||
164 | return; | 750 | return; |
165 | } | ||
166 | list_del(&vdesc->node); | 751 | list_del(&vdesc->node); |
167 | echan->edesc = to_edma_desc(&vdesc->tx); | 752 | echan->edesc = to_edma_desc(&vdesc->tx); |
168 | } | 753 | } |
@@ -177,32 +762,32 @@ static void edma_execute(struct edma_chan *echan) | |||
177 | /* Write descriptor PaRAM set(s) */ | 762 | /* Write descriptor PaRAM set(s) */ |
178 | for (i = 0; i < nslots; i++) { | 763 | for (i = 0; i < nslots; i++) { |
179 | j = i + edesc->processed; | 764 | j = i + edesc->processed; |
180 | edma_write_slot(echan->slot[i], &edesc->pset[j].param); | 765 | edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); |
181 | edesc->sg_len += edesc->pset[j].len; | 766 | edesc->sg_len += edesc->pset[j].len; |
182 | dev_vdbg(echan->vchan.chan.device->dev, | 767 | dev_vdbg(dev, |
183 | "\n pset[%d]:\n" | 768 | "\n pset[%d]:\n" |
184 | " chnum\t%d\n" | 769 | " chnum\t%d\n" |
185 | " slot\t%d\n" | 770 | " slot\t%d\n" |
186 | " opt\t%08x\n" | 771 | " opt\t%08x\n" |
187 | " src\t%08x\n" | 772 | " src\t%08x\n" |
188 | " dst\t%08x\n" | 773 | " dst\t%08x\n" |
189 | " abcnt\t%08x\n" | 774 | " abcnt\t%08x\n" |
190 | " ccnt\t%08x\n" | 775 | " ccnt\t%08x\n" |
191 | " bidx\t%08x\n" | 776 | " bidx\t%08x\n" |
192 | " cidx\t%08x\n" | 777 | " cidx\t%08x\n" |
193 | " lkrld\t%08x\n", | 778 | " lkrld\t%08x\n", |
194 | j, echan->ch_num, echan->slot[i], | 779 | j, echan->ch_num, echan->slot[i], |
195 | edesc->pset[j].param.opt, | 780 | edesc->pset[j].param.opt, |
196 | edesc->pset[j].param.src, | 781 | edesc->pset[j].param.src, |
197 | edesc->pset[j].param.dst, | 782 | edesc->pset[j].param.dst, |
198 | edesc->pset[j].param.a_b_cnt, | 783 | edesc->pset[j].param.a_b_cnt, |
199 | edesc->pset[j].param.ccnt, | 784 | edesc->pset[j].param.ccnt, |
200 | edesc->pset[j].param.src_dst_bidx, | 785 | edesc->pset[j].param.src_dst_bidx, |
201 | edesc->pset[j].param.src_dst_cidx, | 786 | edesc->pset[j].param.src_dst_cidx, |
202 | edesc->pset[j].param.link_bcntrld); | 787 | edesc->pset[j].param.link_bcntrld); |
203 | /* Link to the previous slot if not the last set */ | 788 | /* Link to the previous slot if not the last set */ |
204 | if (i != (nslots - 1)) | 789 | if (i != (nslots - 1)) |
205 | edma_link(echan->slot[i], echan->slot[i+1]); | 790 | edma_link(ecc, echan->slot[i], echan->slot[i + 1]); |
206 | } | 791 | } |
207 | 792 | ||
208 | edesc->processed += nslots; | 793 | edesc->processed += nslots; |
@@ -214,34 +799,32 @@ static void edma_execute(struct edma_chan *echan) | |||
214 | */ | 799 | */ |
215 | if (edesc->processed == edesc->pset_nr) { | 800 | if (edesc->processed == edesc->pset_nr) { |
216 | if (edesc->cyclic) | 801 | if (edesc->cyclic) |
217 | edma_link(echan->slot[nslots-1], echan->slot[1]); | 802 | edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); |
218 | else | 803 | else |
219 | edma_link(echan->slot[nslots-1], | 804 | edma_link(ecc, echan->slot[nslots - 1], |
220 | echan->ecc->dummy_slot); | 805 | echan->ecc->dummy_slot); |
221 | } | 806 | } |
222 | 807 | ||
223 | if (edesc->processed <= MAX_NR_SG) { | 808 | if (echan->missed) { |
809 | /* | ||
810 | * This happens due to setup times between intermediate | ||
811 | * transfers in long SG lists which have to be broken up into | ||
812 | * transfers of MAX_NR_SG | ||
813 | */ | ||
814 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); | ||
815 | edma_clean_channel(echan); | ||
816 | edma_stop(echan); | ||
817 | edma_start(echan); | ||
818 | edma_trigger_channel(echan); | ||
819 | echan->missed = 0; | ||
820 | } else if (edesc->processed <= MAX_NR_SG) { | ||
224 | dev_dbg(dev, "first transfer starting on channel %d\n", | 821 | dev_dbg(dev, "first transfer starting on channel %d\n", |
225 | echan->ch_num); | 822 | echan->ch_num); |
226 | edma_start(echan->ch_num); | 823 | edma_start(echan); |
227 | } else { | 824 | } else { |
228 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", | 825 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", |
229 | echan->ch_num, edesc->processed); | 826 | echan->ch_num, edesc->processed); |
230 | edma_resume(echan->ch_num); | 827 | edma_resume(echan); |
231 | } | ||
232 | |||
233 | /* | ||
234 | * This happens due to setup times between intermediate transfers | ||
235 | * in long SG lists which have to be broken up into transfers of | ||
236 | * MAX_NR_SG | ||
237 | */ | ||
238 | if (echan->missed) { | ||
239 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); | ||
240 | edma_clean_channel(echan->ch_num); | ||
241 | edma_stop(echan->ch_num); | ||
242 | edma_start(echan->ch_num); | ||
243 | edma_trigger_channel(echan->ch_num); | ||
244 | echan->missed = 0; | ||
245 | } | 828 | } |
246 | } | 829 | } |
247 | 830 | ||
@@ -259,20 +842,16 @@ static int edma_terminate_all(struct dma_chan *chan) | |||
259 | * echan->edesc is NULL and exit.) | 842 | * echan->edesc is NULL and exit.) |
260 | */ | 843 | */ |
261 | if (echan->edesc) { | 844 | if (echan->edesc) { |
262 | int cyclic = echan->edesc->cyclic; | 845 | edma_stop(echan); |
263 | 846 | /* Move the cyclic channel back to default queue */ | |
847 | if (!echan->tc && echan->edesc->cyclic) | ||
848 | edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); | ||
264 | /* | 849 | /* |
265 | * free the running request descriptor | 850 | * free the running request descriptor |
266 | * since it is not in any of the vdesc lists | 851 | * since it is not in any of the vdesc lists |
267 | */ | 852 | */ |
268 | edma_desc_free(&echan->edesc->vdesc); | 853 | edma_desc_free(&echan->edesc->vdesc); |
269 | |||
270 | echan->edesc = NULL; | 854 | echan->edesc = NULL; |
271 | edma_stop(echan->ch_num); | ||
272 | /* Move the cyclic channel back to default queue */ | ||
273 | if (cyclic) | ||
274 | edma_assign_channel_eventq(echan->ch_num, | ||
275 | EVENTQ_DEFAULT); | ||
276 | } | 855 | } |
277 | 856 | ||
278 | vchan_get_all_descriptors(&echan->vchan, &head); | 857 | vchan_get_all_descriptors(&echan->vchan, &head); |
@@ -303,7 +882,7 @@ static int edma_dma_pause(struct dma_chan *chan) | |||
303 | if (!echan->edesc) | 882 | if (!echan->edesc) |
304 | return -EINVAL; | 883 | return -EINVAL; |
305 | 884 | ||
306 | edma_pause(echan->ch_num); | 885 | edma_pause(echan); |
307 | return 0; | 886 | return 0; |
308 | } | 887 | } |
309 | 888 | ||
@@ -311,7 +890,7 @@ static int edma_dma_resume(struct dma_chan *chan) | |||
311 | { | 890 | { |
312 | struct edma_chan *echan = to_edma_chan(chan); | 891 | struct edma_chan *echan = to_edma_chan(chan); |
313 | 892 | ||
314 | edma_resume(echan->ch_num); | 893 | edma_resume(echan); |
315 | return 0; | 894 | return 0; |
316 | } | 895 | } |
317 | 896 | ||
@@ -327,19 +906,17 @@ static int edma_dma_resume(struct dma_chan *chan) | |||
327 | * @direction: Direction of the transfer | 906 | * @direction: Direction of the transfer |
328 | */ | 907 | */ |
329 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, | 908 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, |
330 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, | 909 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, |
331 | enum dma_slave_buswidth dev_width, unsigned int dma_length, | 910 | unsigned int acnt, unsigned int dma_length, |
332 | enum dma_transfer_direction direction) | 911 | enum dma_transfer_direction direction) |
333 | { | 912 | { |
334 | struct edma_chan *echan = to_edma_chan(chan); | 913 | struct edma_chan *echan = to_edma_chan(chan); |
335 | struct device *dev = chan->device->dev; | 914 | struct device *dev = chan->device->dev; |
336 | struct edmacc_param *param = &epset->param; | 915 | struct edmacc_param *param = &epset->param; |
337 | int acnt, bcnt, ccnt, cidx; | 916 | int bcnt, ccnt, cidx; |
338 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | 917 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
339 | int absync; | 918 | int absync; |
340 | 919 | ||
341 | acnt = dev_width; | ||
342 | |||
343 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ | 920 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ |
344 | if (!burst) | 921 | if (!burst) |
345 | burst = 1; | 922 | burst = 1; |
@@ -475,8 +1052,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
475 | return NULL; | 1052 | return NULL; |
476 | } | 1053 | } |
477 | 1054 | ||
478 | edesc = kzalloc(sizeof(*edesc) + sg_len * | 1055 | edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), |
479 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 1056 | GFP_ATOMIC); |
480 | if (!edesc) { | 1057 | if (!edesc) { |
481 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); | 1058 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
482 | return NULL; | 1059 | return NULL; |
@@ -493,8 +1070,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( | |||
493 | for (i = 0; i < nslots; i++) { | 1070 | for (i = 0; i < nslots; i++) { |
494 | if (echan->slot[i] < 0) { | 1071 | if (echan->slot[i] < 0) { |
495 | echan->slot[i] = | 1072 | echan->slot[i] = |
496 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | 1073 | edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); |
497 | EDMA_SLOT_ANY); | ||
498 | if (echan->slot[i] < 0) { | 1074 | if (echan->slot[i] < 0) { |
499 | kfree(edesc); | 1075 | kfree(edesc); |
500 | dev_err(dev, "%s: Failed to allocate slot\n", | 1076 | dev_err(dev, "%s: Failed to allocate slot\n", |
@@ -541,36 +1117,98 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
541 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 1117 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
542 | size_t len, unsigned long tx_flags) | 1118 | size_t len, unsigned long tx_flags) |
543 | { | 1119 | { |
544 | int ret; | 1120 | int ret, nslots; |
545 | struct edma_desc *edesc; | 1121 | struct edma_desc *edesc; |
546 | struct device *dev = chan->device->dev; | 1122 | struct device *dev = chan->device->dev; |
547 | struct edma_chan *echan = to_edma_chan(chan); | 1123 | struct edma_chan *echan = to_edma_chan(chan); |
1124 | unsigned int width, pset_len; | ||
548 | 1125 | ||
549 | if (unlikely(!echan || !len)) | 1126 | if (unlikely(!echan || !len)) |
550 | return NULL; | 1127 | return NULL; |
551 | 1128 | ||
552 | edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); | 1129 | if (len < SZ_64K) { |
1130 | /* | ||
1131 | * Transfer size less than 64K can be handled with one paRAM | ||
1132 | * slot and with one burst. | ||
1133 | * ACNT = length | ||
1134 | */ | ||
1135 | width = len; | ||
1136 | pset_len = len; | ||
1137 | nslots = 1; | ||
1138 | } else { | ||
1139 | /* | ||
1140 | * Transfer size bigger than 64K will be handled with maximum of | ||
1141 | * two paRAM slots. | ||
1142 | * slot1: (full_length / 32767) times 32767 bytes bursts. | ||
1143 | * ACNT = 32767, length1: (full_length / 32767) * 32767 | ||
1144 | * slot2: the remaining amount of data after slot1. | ||
1145 | * ACNT = full_length - length1, length2 = ACNT | ||
1146 | * | ||
1147 | * When the full_length is multibple of 32767 one slot can be | ||
1148 | * used to complete the transfer. | ||
1149 | */ | ||
1150 | width = SZ_32K - 1; | ||
1151 | pset_len = rounddown(len, width); | ||
1152 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | ||
1153 | if (unlikely(pset_len == len)) | ||
1154 | nslots = 1; | ||
1155 | else | ||
1156 | nslots = 2; | ||
1157 | } | ||
1158 | |||
1159 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), | ||
1160 | GFP_ATOMIC); | ||
553 | if (!edesc) { | 1161 | if (!edesc) { |
554 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | 1162 | dev_dbg(dev, "Failed to allocate a descriptor\n"); |
555 | return NULL; | 1163 | return NULL; |
556 | } | 1164 | } |
557 | 1165 | ||
558 | edesc->pset_nr = 1; | 1166 | edesc->pset_nr = nslots; |
1167 | edesc->residue = edesc->residue_stat = len; | ||
1168 | edesc->direction = DMA_MEM_TO_MEM; | ||
1169 | edesc->echan = echan; | ||
559 | 1170 | ||
560 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, | 1171 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, |
561 | DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); | 1172 | width, pset_len, DMA_MEM_TO_MEM); |
562 | if (ret < 0) | 1173 | if (ret < 0) { |
1174 | kfree(edesc); | ||
563 | return NULL; | 1175 | return NULL; |
1176 | } | ||
564 | 1177 | ||
565 | edesc->absync = ret; | 1178 | edesc->absync = ret; |
566 | 1179 | ||
567 | /* | ||
568 | * Enable intermediate transfer chaining to re-trigger channel | ||
569 | * on completion of every TR, and enable transfer-completion | ||
570 | * interrupt on completion of the whole transfer. | ||
571 | */ | ||
572 | edesc->pset[0].param.opt |= ITCCHEN; | 1180 | edesc->pset[0].param.opt |= ITCCHEN; |
573 | edesc->pset[0].param.opt |= TCINTEN; | 1181 | if (nslots == 1) { |
1182 | /* Enable transfer complete interrupt */ | ||
1183 | edesc->pset[0].param.opt |= TCINTEN; | ||
1184 | } else { | ||
1185 | /* Enable transfer complete chaining for the first slot */ | ||
1186 | edesc->pset[0].param.opt |= TCCHEN; | ||
1187 | |||
1188 | if (echan->slot[1] < 0) { | ||
1189 | echan->slot[1] = edma_alloc_slot(echan->ecc, | ||
1190 | EDMA_SLOT_ANY); | ||
1191 | if (echan->slot[1] < 0) { | ||
1192 | kfree(edesc); | ||
1193 | dev_err(dev, "%s: Failed to allocate slot\n", | ||
1194 | __func__); | ||
1195 | return NULL; | ||
1196 | } | ||
1197 | } | ||
1198 | dest += pset_len; | ||
1199 | src += pset_len; | ||
1200 | pset_len = width = len % (SZ_32K - 1); | ||
1201 | |||
1202 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | ||
1203 | width, pset_len, DMA_MEM_TO_MEM); | ||
1204 | if (ret < 0) { | ||
1205 | kfree(edesc); | ||
1206 | return NULL; | ||
1207 | } | ||
1208 | |||
1209 | edesc->pset[1].param.opt |= ITCCHEN; | ||
1210 | edesc->pset[1].param.opt |= TCINTEN; | ||
1211 | } | ||
574 | 1212 | ||
575 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 1213 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
576 | } | 1214 | } |
@@ -629,8 +1267,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
629 | if (nslots > MAX_NR_SG) | 1267 | if (nslots > MAX_NR_SG) |
630 | return NULL; | 1268 | return NULL; |
631 | 1269 | ||
632 | edesc = kzalloc(sizeof(*edesc) + nslots * | 1270 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), |
633 | sizeof(edesc->pset[0]), GFP_ATOMIC); | 1271 | GFP_ATOMIC); |
634 | if (!edesc) { | 1272 | if (!edesc) { |
635 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); | 1273 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
636 | return NULL; | 1274 | return NULL; |
@@ -649,8 +1287,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
649 | /* Allocate a PaRAM slot, if needed */ | 1287 | /* Allocate a PaRAM slot, if needed */ |
650 | if (echan->slot[i] < 0) { | 1288 | if (echan->slot[i] < 0) { |
651 | echan->slot[i] = | 1289 | echan->slot[i] = |
652 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | 1290 | edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); |
653 | EDMA_SLOT_ANY); | ||
654 | if (echan->slot[i] < 0) { | 1291 | if (echan->slot[i] < 0) { |
655 | kfree(edesc); | 1292 | kfree(edesc); |
656 | dev_err(dev, "%s: Failed to allocate slot\n", | 1293 | dev_err(dev, "%s: Failed to allocate slot\n", |
@@ -711,128 +1348,281 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
711 | } | 1348 | } |
712 | 1349 | ||
713 | /* Place the cyclic channel to highest priority queue */ | 1350 | /* Place the cyclic channel to highest priority queue */ |
714 | edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); | 1351 | if (!echan->tc) |
1352 | edma_assign_channel_eventq(echan, EVENTQ_0); | ||
715 | 1353 | ||
716 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 1354 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
717 | } | 1355 | } |
718 | 1356 | ||
719 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | 1357 | static void edma_completion_handler(struct edma_chan *echan) |
720 | { | 1358 | { |
721 | struct edma_chan *echan = data; | ||
722 | struct device *dev = echan->vchan.chan.device->dev; | 1359 | struct device *dev = echan->vchan.chan.device->dev; |
723 | struct edma_desc *edesc; | 1360 | struct edma_desc *edesc = echan->edesc; |
724 | struct edmacc_param p; | ||
725 | 1361 | ||
726 | edesc = echan->edesc; | 1362 | if (!edesc) |
1363 | return; | ||
727 | 1364 | ||
728 | /* Pause the channel for non-cyclic */ | 1365 | spin_lock(&echan->vchan.lock); |
729 | if (!edesc || !edesc->cyclic) | 1366 | if (edesc->cyclic) { |
730 | edma_pause(echan->ch_num); | 1367 | vchan_cyclic_callback(&edesc->vdesc); |
731 | 1368 | spin_unlock(&echan->vchan.lock); | |
732 | switch (ch_status) { | 1369 | return; |
733 | case EDMA_DMA_COMPLETE: | 1370 | } else if (edesc->processed == edesc->pset_nr) { |
734 | spin_lock(&echan->vchan.lock); | 1371 | edesc->residue = 0; |
735 | 1372 | edma_stop(echan); | |
736 | if (edesc) { | 1373 | vchan_cookie_complete(&edesc->vdesc); |
737 | if (edesc->cyclic) { | 1374 | echan->edesc = NULL; |
738 | vchan_cyclic_callback(&edesc->vdesc); | 1375 | |
739 | } else if (edesc->processed == edesc->pset_nr) { | 1376 | dev_dbg(dev, "Transfer completed on channel %d\n", |
740 | dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); | 1377 | echan->ch_num); |
741 | edesc->residue = 0; | 1378 | } else { |
742 | edma_stop(echan->ch_num); | 1379 | dev_dbg(dev, "Sub transfer completed on channel %d\n", |
743 | vchan_cookie_complete(&edesc->vdesc); | 1380 | echan->ch_num); |
744 | edma_execute(echan); | 1381 | |
745 | } else { | 1382 | edma_pause(echan); |
746 | dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); | 1383 | |
747 | 1384 | /* Update statistics for tx_status */ | |
748 | /* Update statistics for tx_status */ | 1385 | edesc->residue -= edesc->sg_len; |
749 | edesc->residue -= edesc->sg_len; | 1386 | edesc->residue_stat = edesc->residue; |
750 | edesc->residue_stat = edesc->residue; | 1387 | edesc->processed_stat = edesc->processed; |
751 | edesc->processed_stat = edesc->processed; | 1388 | } |
752 | 1389 | edma_execute(echan); | |
753 | edma_execute(echan); | 1390 | |
754 | } | 1391 | spin_unlock(&echan->vchan.lock); |
1392 | } | ||
1393 | |||
1394 | /* eDMA interrupt handler */ | ||
1395 | static irqreturn_t dma_irq_handler(int irq, void *data) | ||
1396 | { | ||
1397 | struct edma_cc *ecc = data; | ||
1398 | int ctlr; | ||
1399 | u32 sh_ier; | ||
1400 | u32 sh_ipr; | ||
1401 | u32 bank; | ||
1402 | |||
1403 | ctlr = ecc->id; | ||
1404 | if (ctlr < 0) | ||
1405 | return IRQ_NONE; | ||
1406 | |||
1407 | dev_vdbg(ecc->dev, "dma_irq_handler\n"); | ||
1408 | |||
1409 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); | ||
1410 | if (!sh_ipr) { | ||
1411 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); | ||
1412 | if (!sh_ipr) | ||
1413 | return IRQ_NONE; | ||
1414 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); | ||
1415 | bank = 1; | ||
1416 | } else { | ||
1417 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); | ||
1418 | bank = 0; | ||
1419 | } | ||
1420 | |||
1421 | do { | ||
1422 | u32 slot; | ||
1423 | u32 channel; | ||
1424 | |||
1425 | slot = __ffs(sh_ipr); | ||
1426 | sh_ipr &= ~(BIT(slot)); | ||
1427 | |||
1428 | if (sh_ier & BIT(slot)) { | ||
1429 | channel = (bank << 5) | slot; | ||
1430 | /* Clear the corresponding IPR bits */ | ||
1431 | edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); | ||
1432 | edma_completion_handler(&ecc->slave_chans[channel]); | ||
755 | } | 1433 | } |
1434 | } while (sh_ipr); | ||
756 | 1435 | ||
757 | spin_unlock(&echan->vchan.lock); | 1436 | edma_shadow0_write(ecc, SH_IEVAL, 1); |
1437 | return IRQ_HANDLED; | ||
1438 | } | ||
1439 | |||
1440 | static void edma_error_handler(struct edma_chan *echan) | ||
1441 | { | ||
1442 | struct edma_cc *ecc = echan->ecc; | ||
1443 | struct device *dev = echan->vchan.chan.device->dev; | ||
1444 | struct edmacc_param p; | ||
758 | 1445 | ||
759 | break; | 1446 | if (!echan->edesc) |
760 | case EDMA_DMA_CC_ERROR: | 1447 | return; |
761 | spin_lock(&echan->vchan.lock); | ||
762 | 1448 | ||
763 | edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); | 1449 | spin_lock(&echan->vchan.lock); |
764 | 1450 | ||
1451 | edma_read_slot(ecc, echan->slot[0], &p); | ||
1452 | /* | ||
1453 | * Issue later based on missed flag which will be sure | ||
1454 | * to happen as: | ||
1455 | * (1) we finished transmitting an intermediate slot and | ||
1456 | * edma_execute is coming up. | ||
1457 | * (2) or we finished current transfer and issue will | ||
1458 | * call edma_execute. | ||
1459 | * | ||
1460 | * Important note: issuing can be dangerous here and | ||
1461 | * lead to some nasty recursion when we are in a NULL | ||
1462 | * slot. So we avoid doing so and set the missed flag. | ||
1463 | */ | ||
1464 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | ||
1465 | dev_dbg(dev, "Error on null slot, setting miss\n"); | ||
1466 | echan->missed = 1; | ||
1467 | } else { | ||
765 | /* | 1468 | /* |
766 | * Issue later based on missed flag which will be sure | 1469 | * The slot is already programmed but the event got |
767 | * to happen as: | 1470 | * missed, so its safe to issue it here. |
768 | * (1) we finished transmitting an intermediate slot and | ||
769 | * edma_execute is coming up. | ||
770 | * (2) or we finished current transfer and issue will | ||
771 | * call edma_execute. | ||
772 | * | ||
773 | * Important note: issuing can be dangerous here and | ||
774 | * lead to some nasty recursion when we are in a NULL | ||
775 | * slot. So we avoid doing so and set the missed flag. | ||
776 | */ | 1471 | */ |
777 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | 1472 | dev_dbg(dev, "Missed event, TRIGGERING\n"); |
778 | dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); | 1473 | edma_clean_channel(echan); |
779 | echan->missed = 1; | 1474 | edma_stop(echan); |
780 | } else { | 1475 | edma_start(echan); |
781 | /* | 1476 | edma_trigger_channel(echan); |
782 | * The slot is already programmed but the event got | 1477 | } |
783 | * missed, so its safe to issue it here. | 1478 | spin_unlock(&echan->vchan.lock); |
784 | */ | 1479 | } |
785 | dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); | 1480 | |
786 | edma_clean_channel(echan->ch_num); | 1481 | static inline bool edma_error_pending(struct edma_cc *ecc) |
787 | edma_stop(echan->ch_num); | 1482 | { |
788 | edma_start(echan->ch_num); | 1483 | if (edma_read_array(ecc, EDMA_EMR, 0) || |
789 | edma_trigger_channel(echan->ch_num); | 1484 | edma_read_array(ecc, EDMA_EMR, 1) || |
1485 | edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) | ||
1486 | return true; | ||
1487 | |||
1488 | return false; | ||
1489 | } | ||
1490 | |||
1491 | /* eDMA error interrupt handler */ | ||
1492 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | ||
1493 | { | ||
1494 | struct edma_cc *ecc = data; | ||
1495 | int i, j; | ||
1496 | int ctlr; | ||
1497 | unsigned int cnt = 0; | ||
1498 | unsigned int val; | ||
1499 | |||
1500 | ctlr = ecc->id; | ||
1501 | if (ctlr < 0) | ||
1502 | return IRQ_NONE; | ||
1503 | |||
1504 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); | ||
1505 | |||
1506 | if (!edma_error_pending(ecc)) | ||
1507 | return IRQ_NONE; | ||
1508 | |||
1509 | while (1) { | ||
1510 | /* Event missed register(s) */ | ||
1511 | for (j = 0; j < 2; j++) { | ||
1512 | unsigned long emr; | ||
1513 | |||
1514 | val = edma_read_array(ecc, EDMA_EMR, j); | ||
1515 | if (!val) | ||
1516 | continue; | ||
1517 | |||
1518 | dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); | ||
1519 | emr = val; | ||
1520 | for (i = find_next_bit(&emr, 32, 0); i < 32; | ||
1521 | i = find_next_bit(&emr, 32, i + 1)) { | ||
1522 | int k = (j << 5) + i; | ||
1523 | |||
1524 | /* Clear the corresponding EMR bits */ | ||
1525 | edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); | ||
1526 | /* Clear any SER */ | ||
1527 | edma_shadow0_write_array(ecc, SH_SECR, j, | ||
1528 | BIT(i)); | ||
1529 | edma_error_handler(&ecc->slave_chans[k]); | ||
1530 | } | ||
790 | } | 1531 | } |
791 | 1532 | ||
792 | spin_unlock(&echan->vchan.lock); | 1533 | val = edma_read(ecc, EDMA_QEMR); |
1534 | if (val) { | ||
1535 | dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); | ||
1536 | /* Not reported, just clear the interrupt reason. */ | ||
1537 | edma_write(ecc, EDMA_QEMCR, val); | ||
1538 | edma_shadow0_write(ecc, SH_QSECR, val); | ||
1539 | } | ||
1540 | |||
1541 | val = edma_read(ecc, EDMA_CCERR); | ||
1542 | if (val) { | ||
1543 | dev_warn(ecc->dev, "CCERR 0x%08x\n", val); | ||
1544 | /* Not reported, just clear the interrupt reason. */ | ||
1545 | edma_write(ecc, EDMA_CCERRCLR, val); | ||
1546 | } | ||
1547 | |||
1548 | if (!edma_error_pending(ecc)) | ||
1549 | break; | ||
1550 | cnt++; | ||
1551 | if (cnt > 10) | ||
1552 | break; | ||
1553 | } | ||
1554 | edma_write(ecc, EDMA_EEVAL, 1); | ||
1555 | return IRQ_HANDLED; | ||
1556 | } | ||
1557 | |||
1558 | static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) | ||
1559 | { | ||
1560 | struct platform_device *tc_pdev; | ||
1561 | int ret; | ||
793 | 1562 | ||
794 | break; | 1563 | if (!tc) |
795 | default: | 1564 | return; |
796 | break; | 1565 | |
1566 | tc_pdev = of_find_device_by_node(tc->node); | ||
1567 | if (!tc_pdev) { | ||
1568 | pr_err("%s: TPTC device is not found\n", __func__); | ||
1569 | return; | ||
797 | } | 1570 | } |
1571 | if (!pm_runtime_enabled(&tc_pdev->dev)) | ||
1572 | pm_runtime_enable(&tc_pdev->dev); | ||
1573 | |||
1574 | if (enable) | ||
1575 | ret = pm_runtime_get_sync(&tc_pdev->dev); | ||
1576 | else | ||
1577 | ret = pm_runtime_put_sync(&tc_pdev->dev); | ||
1578 | |||
1579 | if (ret < 0) | ||
1580 | pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__, | ||
1581 | enable ? "get" : "put", dev_name(&tc_pdev->dev)); | ||
798 | } | 1582 | } |
799 | 1583 | ||
800 | /* Alloc channel resources */ | 1584 | /* Alloc channel resources */ |
801 | static int edma_alloc_chan_resources(struct dma_chan *chan) | 1585 | static int edma_alloc_chan_resources(struct dma_chan *chan) |
802 | { | 1586 | { |
803 | struct edma_chan *echan = to_edma_chan(chan); | 1587 | struct edma_chan *echan = to_edma_chan(chan); |
804 | struct device *dev = chan->device->dev; | 1588 | struct edma_cc *ecc = echan->ecc; |
1589 | struct device *dev = ecc->dev; | ||
1590 | enum dma_event_q eventq_no = EVENTQ_DEFAULT; | ||
805 | int ret; | 1591 | int ret; |
806 | int a_ch_num; | ||
807 | LIST_HEAD(descs); | ||
808 | |||
809 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | ||
810 | echan, EVENTQ_DEFAULT); | ||
811 | 1592 | ||
812 | if (a_ch_num < 0) { | 1593 | if (echan->tc) { |
813 | ret = -ENODEV; | 1594 | eventq_no = echan->tc->id; |
814 | goto err_no_chan; | 1595 | } else if (ecc->tc_list) { |
1596 | /* memcpy channel */ | ||
1597 | echan->tc = &ecc->tc_list[ecc->info->default_queue]; | ||
1598 | eventq_no = echan->tc->id; | ||
815 | } | 1599 | } |
816 | 1600 | ||
817 | if (a_ch_num != echan->ch_num) { | 1601 | ret = edma_alloc_channel(echan, eventq_no); |
818 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | 1602 | if (ret) |
819 | EDMA_CTLR(echan->ch_num), | 1603 | return ret; |
1604 | |||
1605 | echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); | ||
1606 | if (echan->slot[0] < 0) { | ||
1607 | dev_err(dev, "Entry slot allocation failed for channel %u\n", | ||
820 | EDMA_CHAN_SLOT(echan->ch_num)); | 1608 | EDMA_CHAN_SLOT(echan->ch_num)); |
821 | ret = -ENODEV; | 1609 | goto err_slot; |
822 | goto err_wrong_chan; | ||
823 | } | 1610 | } |
824 | 1611 | ||
1612 | /* Set up channel -> slot mapping for the entry slot */ | ||
1613 | edma_set_chmap(echan, echan->slot[0]); | ||
825 | echan->alloced = true; | 1614 | echan->alloced = true; |
826 | echan->slot[0] = echan->ch_num; | ||
827 | 1615 | ||
828 | dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, | 1616 | dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", |
829 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | 1617 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, |
1618 | echan->hw_triggered ? "HW" : "SW"); | ||
1619 | |||
1620 | edma_tc_set_pm_state(echan->tc, true); | ||
830 | 1621 | ||
831 | return 0; | 1622 | return 0; |
832 | 1623 | ||
833 | err_wrong_chan: | 1624 | err_slot: |
834 | edma_free_channel(a_ch_num); | 1625 | edma_free_channel(echan); |
835 | err_no_chan: | ||
836 | return ret; | 1626 | return ret; |
837 | } | 1627 | } |
838 | 1628 | ||
@@ -840,29 +1630,37 @@ err_no_chan: | |||
840 | static void edma_free_chan_resources(struct dma_chan *chan) | 1630 | static void edma_free_chan_resources(struct dma_chan *chan) |
841 | { | 1631 | { |
842 | struct edma_chan *echan = to_edma_chan(chan); | 1632 | struct edma_chan *echan = to_edma_chan(chan); |
843 | struct device *dev = chan->device->dev; | 1633 | struct device *dev = echan->ecc->dev; |
844 | int i; | 1634 | int i; |
845 | 1635 | ||
846 | /* Terminate transfers */ | 1636 | /* Terminate transfers */ |
847 | edma_stop(echan->ch_num); | 1637 | edma_stop(echan); |
848 | 1638 | ||
849 | vchan_free_chan_resources(&echan->vchan); | 1639 | vchan_free_chan_resources(&echan->vchan); |
850 | 1640 | ||
851 | /* Free EDMA PaRAM slots */ | 1641 | /* Free EDMA PaRAM slots */ |
852 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | 1642 | for (i = 0; i < EDMA_MAX_SLOTS; i++) { |
853 | if (echan->slot[i] >= 0) { | 1643 | if (echan->slot[i] >= 0) { |
854 | edma_free_slot(echan->slot[i]); | 1644 | edma_free_slot(echan->ecc, echan->slot[i]); |
855 | echan->slot[i] = -1; | 1645 | echan->slot[i] = -1; |
856 | } | 1646 | } |
857 | } | 1647 | } |
858 | 1648 | ||
1649 | /* Set entry slot to the dummy slot */ | ||
1650 | edma_set_chmap(echan, echan->ecc->dummy_slot); | ||
1651 | |||
859 | /* Free EDMA channel */ | 1652 | /* Free EDMA channel */ |
860 | if (echan->alloced) { | 1653 | if (echan->alloced) { |
861 | edma_free_channel(echan->ch_num); | 1654 | edma_free_channel(echan); |
862 | echan->alloced = false; | 1655 | echan->alloced = false; |
863 | } | 1656 | } |
864 | 1657 | ||
865 | dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); | 1658 | edma_tc_set_pm_state(echan->tc, false); |
1659 | echan->tc = NULL; | ||
1660 | echan->hw_triggered = false; | ||
1661 | |||
1662 | dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", | ||
1663 | EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); | ||
866 | } | 1664 | } |
867 | 1665 | ||
868 | /* Send pending descriptor to hardware */ | 1666 | /* Send pending descriptor to hardware */ |
@@ -888,7 +1686,7 @@ static u32 edma_residue(struct edma_desc *edesc) | |||
888 | * We always read the dst/src position from the first RamPar | 1686 | * We always read the dst/src position from the first RamPar |
889 | * pset. That's the one which is active now. | 1687 | * pset. That's the one which is active now. |
890 | */ | 1688 | */ |
891 | pos = edma_get_position(edesc->echan->slot[0], dst); | 1689 | pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); |
892 | 1690 | ||
893 | /* | 1691 | /* |
894 | * Cyclic is simple. Just subtract pset[0].addr from pos. | 1692 | * Cyclic is simple. Just subtract pset[0].addr from pos. |
@@ -949,19 +1747,101 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
949 | return ret; | 1747 | return ret; |
950 | } | 1748 | } |
951 | 1749 | ||
952 | static void __init edma_chan_init(struct edma_cc *ecc, | 1750 | static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) |
953 | struct dma_device *dma, | ||
954 | struct edma_chan *echans) | ||
955 | { | 1751 | { |
1752 | s16 *memcpy_ch = memcpy_channels; | ||
1753 | |||
1754 | if (!memcpy_channels) | ||
1755 | return false; | ||
1756 | while (*memcpy_ch != -1) { | ||
1757 | if (*memcpy_ch == ch_num) | ||
1758 | return true; | ||
1759 | memcpy_ch++; | ||
1760 | } | ||
1761 | return false; | ||
1762 | } | ||
1763 | |||
1764 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
1765 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
1766 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
1767 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
1768 | |||
1769 | static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | ||
1770 | { | ||
1771 | struct dma_device *s_ddev = &ecc->dma_slave; | ||
1772 | struct dma_device *m_ddev = NULL; | ||
1773 | s16 *memcpy_channels = ecc->info->memcpy_channels; | ||
956 | int i, j; | 1774 | int i, j; |
957 | 1775 | ||
958 | for (i = 0; i < EDMA_CHANS; i++) { | 1776 | dma_cap_zero(s_ddev->cap_mask); |
959 | struct edma_chan *echan = &echans[i]; | 1777 | dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); |
960 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | 1778 | dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); |
1779 | if (ecc->legacy_mode && !memcpy_channels) { | ||
1780 | dev_warn(ecc->dev, | ||
1781 | "Legacy memcpy is enabled, things might not work\n"); | ||
1782 | |||
1783 | dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); | ||
1784 | s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
1785 | s_ddev->directions = BIT(DMA_MEM_TO_MEM); | ||
1786 | } | ||
1787 | |||
1788 | s_ddev->device_prep_slave_sg = edma_prep_slave_sg; | ||
1789 | s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; | ||
1790 | s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
1791 | s_ddev->device_free_chan_resources = edma_free_chan_resources; | ||
1792 | s_ddev->device_issue_pending = edma_issue_pending; | ||
1793 | s_ddev->device_tx_status = edma_tx_status; | ||
1794 | s_ddev->device_config = edma_slave_config; | ||
1795 | s_ddev->device_pause = edma_dma_pause; | ||
1796 | s_ddev->device_resume = edma_dma_resume; | ||
1797 | s_ddev->device_terminate_all = edma_terminate_all; | ||
1798 | |||
1799 | s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1800 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1801 | s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); | ||
1802 | s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1803 | |||
1804 | s_ddev->dev = ecc->dev; | ||
1805 | INIT_LIST_HEAD(&s_ddev->channels); | ||
1806 | |||
1807 | if (memcpy_channels) { | ||
1808 | m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); | ||
1809 | ecc->dma_memcpy = m_ddev; | ||
1810 | |||
1811 | dma_cap_zero(m_ddev->cap_mask); | ||
1812 | dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); | ||
1813 | |||
1814 | m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; | ||
1815 | m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
1816 | m_ddev->device_free_chan_resources = edma_free_chan_resources; | ||
1817 | m_ddev->device_issue_pending = edma_issue_pending; | ||
1818 | m_ddev->device_tx_status = edma_tx_status; | ||
1819 | m_ddev->device_config = edma_slave_config; | ||
1820 | m_ddev->device_pause = edma_dma_pause; | ||
1821 | m_ddev->device_resume = edma_dma_resume; | ||
1822 | m_ddev->device_terminate_all = edma_terminate_all; | ||
1823 | |||
1824 | m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1825 | m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | ||
1826 | m_ddev->directions = BIT(DMA_MEM_TO_MEM); | ||
1827 | m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1828 | |||
1829 | m_ddev->dev = ecc->dev; | ||
1830 | INIT_LIST_HEAD(&m_ddev->channels); | ||
1831 | } else if (!ecc->legacy_mode) { | ||
1832 | dev_info(ecc->dev, "memcpy is disabled\n"); | ||
1833 | } | ||
1834 | |||
1835 | for (i = 0; i < ecc->num_channels; i++) { | ||
1836 | struct edma_chan *echan = &ecc->slave_chans[i]; | ||
1837 | echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); | ||
961 | echan->ecc = ecc; | 1838 | echan->ecc = ecc; |
962 | echan->vchan.desc_free = edma_desc_free; | 1839 | echan->vchan.desc_free = edma_desc_free; |
963 | 1840 | ||
964 | vchan_init(&echan->vchan, dma); | 1841 | if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) |
1842 | vchan_init(&echan->vchan, m_ddev); | ||
1843 | else | ||
1844 | vchan_init(&echan->vchan, s_ddev); | ||
965 | 1845 | ||
966 | INIT_LIST_HEAD(&echan->node); | 1846 | INIT_LIST_HEAD(&echan->node); |
967 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | 1847 | for (j = 0; j < EDMA_MAX_SLOTS; j++) |
@@ -969,85 +1849,474 @@ static void __init edma_chan_init(struct edma_cc *ecc, | |||
969 | } | 1849 | } |
970 | } | 1850 | } |
971 | 1851 | ||
972 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | 1852 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, |
973 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | 1853 | struct edma_cc *ecc) |
974 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ | ||
975 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
976 | |||
977 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | ||
978 | struct device *dev) | ||
979 | { | 1854 | { |
980 | dma->device_prep_slave_sg = edma_prep_slave_sg; | 1855 | int i; |
981 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; | 1856 | u32 value, cccfg; |
982 | dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; | 1857 | s8 (*queue_priority_map)[2]; |
983 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | 1858 | |
984 | dma->device_free_chan_resources = edma_free_chan_resources; | 1859 | /* Decode the eDMA3 configuration from CCCFG register */ |
985 | dma->device_issue_pending = edma_issue_pending; | 1860 | cccfg = edma_read(ecc, EDMA_CCCFG); |
986 | dma->device_tx_status = edma_tx_status; | 1861 | |
987 | dma->device_config = edma_slave_config; | 1862 | value = GET_NUM_REGN(cccfg); |
988 | dma->device_pause = edma_dma_pause; | 1863 | ecc->num_region = BIT(value); |
989 | dma->device_resume = edma_dma_resume; | 1864 | |
990 | dma->device_terminate_all = edma_terminate_all; | 1865 | value = GET_NUM_DMACH(cccfg); |
1866 | ecc->num_channels = BIT(value + 1); | ||
1867 | |||
1868 | value = GET_NUM_QDMACH(cccfg); | ||
1869 | ecc->num_qchannels = value * 2; | ||
991 | 1870 | ||
992 | dma->src_addr_widths = EDMA_DMA_BUSWIDTHS; | 1871 | value = GET_NUM_PAENTRY(cccfg); |
993 | dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | 1872 | ecc->num_slots = BIT(value + 4); |
994 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
995 | dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
996 | 1873 | ||
997 | dma->dev = dev; | 1874 | value = GET_NUM_EVQUE(cccfg); |
1875 | ecc->num_tc = value + 1; | ||
1876 | |||
1877 | ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; | ||
1878 | |||
1879 | dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); | ||
1880 | dev_dbg(dev, "num_region: %u\n", ecc->num_region); | ||
1881 | dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); | ||
1882 | dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); | ||
1883 | dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); | ||
1884 | dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); | ||
1885 | dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); | ||
1886 | |||
1887 | /* Nothing need to be done if queue priority is provided */ | ||
1888 | if (pdata->queue_priority_mapping) | ||
1889 | return 0; | ||
998 | 1890 | ||
999 | /* | 1891 | /* |
1000 | * code using dma memcpy must make sure alignment of | 1892 | * Configure TC/queue priority as follows: |
1001 | * length is at dma->copy_align boundary. | 1893 | * Q0 - priority 0 |
1894 | * Q1 - priority 1 | ||
1895 | * Q2 - priority 2 | ||
1896 | * ... | ||
1897 | * The meaning of priority numbers: 0 highest priority, 7 lowest | ||
1898 | * priority. So Q0 is the highest priority queue and the last queue has | ||
1899 | * the lowest priority. | ||
1002 | */ | 1900 | */ |
1003 | dma->copy_align = DMAENGINE_ALIGN_4_BYTES; | 1901 | queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), |
1902 | GFP_KERNEL); | ||
1903 | if (!queue_priority_map) | ||
1904 | return -ENOMEM; | ||
1905 | |||
1906 | for (i = 0; i < ecc->num_tc; i++) { | ||
1907 | queue_priority_map[i][0] = i; | ||
1908 | queue_priority_map[i][1] = i; | ||
1909 | } | ||
1910 | queue_priority_map[i][0] = -1; | ||
1911 | queue_priority_map[i][1] = -1; | ||
1912 | |||
1913 | pdata->queue_priority_mapping = queue_priority_map; | ||
1914 | /* Default queue has the lowest priority */ | ||
1915 | pdata->default_queue = i - 1; | ||
1916 | |||
1917 | return 0; | ||
1918 | } | ||
1919 | |||
1920 | #if IS_ENABLED(CONFIG_OF) | ||
1921 | static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, | ||
1922 | size_t sz) | ||
1923 | { | ||
1924 | const char pname[] = "ti,edma-xbar-event-map"; | ||
1925 | struct resource res; | ||
1926 | void __iomem *xbar; | ||
1927 | s16 (*xbar_chans)[2]; | ||
1928 | size_t nelm = sz / sizeof(s16); | ||
1929 | u32 shift, offset, mux; | ||
1930 | int ret, i; | ||
1931 | |||
1932 | xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); | ||
1933 | if (!xbar_chans) | ||
1934 | return -ENOMEM; | ||
1935 | |||
1936 | ret = of_address_to_resource(dev->of_node, 1, &res); | ||
1937 | if (ret) | ||
1938 | return -ENOMEM; | ||
1939 | |||
1940 | xbar = devm_ioremap(dev, res.start, resource_size(&res)); | ||
1941 | if (!xbar) | ||
1942 | return -ENOMEM; | ||
1943 | |||
1944 | ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, | ||
1945 | nelm); | ||
1946 | if (ret) | ||
1947 | return -EIO; | ||
1948 | |||
1949 | /* Invalidate last entry for the other user of this mess */ | ||
1950 | nelm >>= 1; | ||
1951 | xbar_chans[nelm][0] = -1; | ||
1952 | xbar_chans[nelm][1] = -1; | ||
1953 | |||
1954 | for (i = 0; i < nelm; i++) { | ||
1955 | shift = (xbar_chans[i][1] & 0x03) << 3; | ||
1956 | offset = xbar_chans[i][1] & 0xfffffffc; | ||
1957 | mux = readl(xbar + offset); | ||
1958 | mux &= ~(0xff << shift); | ||
1959 | mux |= xbar_chans[i][0] << shift; | ||
1960 | writel(mux, (xbar + offset)); | ||
1961 | } | ||
1962 | |||
1963 | pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; | ||
1964 | return 0; | ||
1965 | } | ||
1966 | |||
1967 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
1968 | bool legacy_mode) | ||
1969 | { | ||
1970 | struct edma_soc_info *info; | ||
1971 | struct property *prop; | ||
1972 | size_t sz; | ||
1973 | int ret; | ||
1974 | |||
1975 | info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); | ||
1976 | if (!info) | ||
1977 | return ERR_PTR(-ENOMEM); | ||
1978 | |||
1979 | if (legacy_mode) { | ||
1980 | prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", | ||
1981 | &sz); | ||
1982 | if (prop) { | ||
1983 | ret = edma_xbar_event_map(dev, info, sz); | ||
1984 | if (ret) | ||
1985 | return ERR_PTR(ret); | ||
1986 | } | ||
1987 | return info; | ||
1988 | } | ||
1989 | |||
1990 | /* Get the list of channels allocated to be used for memcpy */ | ||
1991 | prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); | ||
1992 | if (prop) { | ||
1993 | const char pname[] = "ti,edma-memcpy-channels"; | ||
1994 | size_t nelm = sz / sizeof(s16); | ||
1995 | s16 *memcpy_ch; | ||
1996 | |||
1997 | memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), | ||
1998 | GFP_KERNEL); | ||
1999 | if (!memcpy_ch) | ||
2000 | return ERR_PTR(-ENOMEM); | ||
2001 | |||
2002 | ret = of_property_read_u16_array(dev->of_node, pname, | ||
2003 | (u16 *)memcpy_ch, nelm); | ||
2004 | if (ret) | ||
2005 | return ERR_PTR(ret); | ||
2006 | |||
2007 | memcpy_ch[nelm] = -1; | ||
2008 | info->memcpy_channels = memcpy_ch; | ||
2009 | } | ||
2010 | |||
2011 | prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", | ||
2012 | &sz); | ||
2013 | if (prop) { | ||
2014 | const char pname[] = "ti,edma-reserved-slot-ranges"; | ||
2015 | s16 (*rsv_slots)[2]; | ||
2016 | size_t nelm = sz / sizeof(*rsv_slots); | ||
2017 | struct edma_rsv_info *rsv_info; | ||
2018 | |||
2019 | if (!nelm) | ||
2020 | return info; | ||
2021 | |||
2022 | rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); | ||
2023 | if (!rsv_info) | ||
2024 | return ERR_PTR(-ENOMEM); | ||
2025 | |||
2026 | rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), | ||
2027 | GFP_KERNEL); | ||
2028 | if (!rsv_slots) | ||
2029 | return ERR_PTR(-ENOMEM); | ||
2030 | |||
2031 | ret = of_property_read_u16_array(dev->of_node, pname, | ||
2032 | (u16 *)rsv_slots, nelm * 2); | ||
2033 | if (ret) | ||
2034 | return ERR_PTR(ret); | ||
2035 | |||
2036 | rsv_slots[nelm][0] = -1; | ||
2037 | rsv_slots[nelm][1] = -1; | ||
2038 | info->rsv = rsv_info; | ||
2039 | info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; | ||
2040 | } | ||
2041 | |||
2042 | return info; | ||
2043 | } | ||
2044 | |||
2045 | static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, | ||
2046 | struct of_dma *ofdma) | ||
2047 | { | ||
2048 | struct edma_cc *ecc = ofdma->of_dma_data; | ||
2049 | struct dma_chan *chan = NULL; | ||
2050 | struct edma_chan *echan; | ||
2051 | int i; | ||
2052 | |||
2053 | if (!ecc || dma_spec->args_count < 1) | ||
2054 | return NULL; | ||
2055 | |||
2056 | for (i = 0; i < ecc->num_channels; i++) { | ||
2057 | echan = &ecc->slave_chans[i]; | ||
2058 | if (echan->ch_num == dma_spec->args[0]) { | ||
2059 | chan = &echan->vchan.chan; | ||
2060 | break; | ||
2061 | } | ||
2062 | } | ||
1004 | 2063 | ||
1005 | INIT_LIST_HEAD(&dma->channels); | 2064 | if (!chan) |
2065 | return NULL; | ||
2066 | |||
2067 | if (echan->ecc->legacy_mode && dma_spec->args_count == 1) | ||
2068 | goto out; | ||
2069 | |||
2070 | if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && | ||
2071 | dma_spec->args[1] < echan->ecc->num_tc) { | ||
2072 | echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; | ||
2073 | goto out; | ||
2074 | } | ||
2075 | |||
2076 | return NULL; | ||
2077 | out: | ||
2078 | /* The channel is going to be used as HW synchronized */ | ||
2079 | echan->hw_triggered = true; | ||
2080 | return dma_get_slave_channel(chan); | ||
2081 | } | ||
2082 | #else | ||
2083 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | ||
2084 | bool legacy_mode) | ||
2085 | { | ||
2086 | return ERR_PTR(-EINVAL); | ||
1006 | } | 2087 | } |
1007 | 2088 | ||
2089 | static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, | ||
2090 | struct of_dma *ofdma) | ||
2091 | { | ||
2092 | return NULL; | ||
2093 | } | ||
2094 | #endif | ||
2095 | |||
1008 | static int edma_probe(struct platform_device *pdev) | 2096 | static int edma_probe(struct platform_device *pdev) |
1009 | { | 2097 | { |
1010 | struct edma_cc *ecc; | 2098 | struct edma_soc_info *info = pdev->dev.platform_data; |
2099 | s8 (*queue_priority_mapping)[2]; | ||
2100 | int i, off, ln; | ||
2101 | const s16 (*rsv_slots)[2]; | ||
2102 | const s16 (*xbar_chans)[2]; | ||
2103 | int irq; | ||
2104 | char *irq_name; | ||
2105 | struct resource *mem; | ||
2106 | struct device_node *node = pdev->dev.of_node; | ||
2107 | struct device *dev = &pdev->dev; | ||
2108 | struct edma_cc *ecc; | ||
2109 | bool legacy_mode = true; | ||
1011 | int ret; | 2110 | int ret; |
1012 | 2111 | ||
1013 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | 2112 | if (node) { |
2113 | const struct of_device_id *match; | ||
2114 | |||
2115 | match = of_match_node(edma_of_ids, node); | ||
2116 | if (match && (u32)match->data == EDMA_BINDING_TPCC) | ||
2117 | legacy_mode = false; | ||
2118 | |||
2119 | info = edma_setup_info_from_dt(dev, legacy_mode); | ||
2120 | if (IS_ERR(info)) { | ||
2121 | dev_err(dev, "failed to get DT data\n"); | ||
2122 | return PTR_ERR(info); | ||
2123 | } | ||
2124 | } | ||
2125 | |||
2126 | if (!info) | ||
2127 | return -ENODEV; | ||
2128 | |||
2129 | pm_runtime_enable(dev); | ||
2130 | ret = pm_runtime_get_sync(dev); | ||
2131 | if (ret < 0) { | ||
2132 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | ||
2133 | return ret; | ||
2134 | } | ||
2135 | |||
2136 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
1014 | if (ret) | 2137 | if (ret) |
1015 | return ret; | 2138 | return ret; |
1016 | 2139 | ||
1017 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | 2140 | ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); |
1018 | if (!ecc) { | 2141 | if (!ecc) { |
1019 | dev_err(&pdev->dev, "Can't allocate controller\n"); | 2142 | dev_err(dev, "Can't allocate controller\n"); |
2143 | return -ENOMEM; | ||
2144 | } | ||
2145 | |||
2146 | ecc->dev = dev; | ||
2147 | ecc->id = pdev->id; | ||
2148 | ecc->legacy_mode = legacy_mode; | ||
2149 | /* When booting with DT the pdev->id is -1 */ | ||
2150 | if (ecc->id < 0) | ||
2151 | ecc->id = 0; | ||
2152 | |||
2153 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); | ||
2154 | if (!mem) { | ||
2155 | dev_dbg(dev, "mem resource not found, using index 0\n"); | ||
2156 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2157 | if (!mem) { | ||
2158 | dev_err(dev, "no mem resource?\n"); | ||
2159 | return -ENODEV; | ||
2160 | } | ||
2161 | } | ||
2162 | ecc->base = devm_ioremap_resource(dev, mem); | ||
2163 | if (IS_ERR(ecc->base)) | ||
2164 | return PTR_ERR(ecc->base); | ||
2165 | |||
2166 | platform_set_drvdata(pdev, ecc); | ||
2167 | |||
2168 | /* Get eDMA3 configuration from IP */ | ||
2169 | ret = edma_setup_from_hw(dev, info, ecc); | ||
2170 | if (ret) | ||
2171 | return ret; | ||
2172 | |||
2173 | /* Allocate memory based on the information we got from the IP */ | ||
2174 | ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, | ||
2175 | sizeof(*ecc->slave_chans), GFP_KERNEL); | ||
2176 | if (!ecc->slave_chans) | ||
2177 | return -ENOMEM; | ||
2178 | |||
2179 | ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), | ||
2180 | sizeof(unsigned long), GFP_KERNEL); | ||
2181 | if (!ecc->slot_inuse) | ||
1020 | return -ENOMEM; | 2182 | return -ENOMEM; |
2183 | |||
2184 | ecc->default_queue = info->default_queue; | ||
2185 | |||
2186 | for (i = 0; i < ecc->num_slots; i++) | ||
2187 | edma_write_slot(ecc, i, &dummy_paramset); | ||
2188 | |||
2189 | if (info->rsv) { | ||
2190 | /* Set the reserved slots in inuse list */ | ||
2191 | rsv_slots = info->rsv->rsv_slots; | ||
2192 | if (rsv_slots) { | ||
2193 | for (i = 0; rsv_slots[i][0] != -1; i++) { | ||
2194 | off = rsv_slots[i][0]; | ||
2195 | ln = rsv_slots[i][1]; | ||
2196 | set_bits(off, ln, ecc->slot_inuse); | ||
2197 | } | ||
2198 | } | ||
2199 | } | ||
2200 | |||
2201 | /* Clear the xbar mapped channels in unused list */ | ||
2202 | xbar_chans = info->xbar_chans; | ||
2203 | if (xbar_chans) { | ||
2204 | for (i = 0; xbar_chans[i][1] != -1; i++) { | ||
2205 | off = xbar_chans[i][1]; | ||
2206 | } | ||
2207 | } | ||
2208 | |||
2209 | irq = platform_get_irq_byname(pdev, "edma3_ccint"); | ||
2210 | if (irq < 0 && node) | ||
2211 | irq = irq_of_parse_and_map(node, 0); | ||
2212 | |||
2213 | if (irq >= 0) { | ||
2214 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", | ||
2215 | dev_name(dev)); | ||
2216 | ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, | ||
2217 | ecc); | ||
2218 | if (ret) { | ||
2219 | dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); | ||
2220 | return ret; | ||
2221 | } | ||
2222 | } | ||
2223 | |||
2224 | irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); | ||
2225 | if (irq < 0 && node) | ||
2226 | irq = irq_of_parse_and_map(node, 2); | ||
2227 | |||
2228 | if (irq >= 0) { | ||
2229 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", | ||
2230 | dev_name(dev)); | ||
2231 | ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, | ||
2232 | ecc); | ||
2233 | if (ret) { | ||
2234 | dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); | ||
2235 | return ret; | ||
2236 | } | ||
1021 | } | 2237 | } |
1022 | 2238 | ||
1023 | ecc->ctlr = pdev->id; | 2239 | ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); |
1024 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | ||
1025 | if (ecc->dummy_slot < 0) { | 2240 | if (ecc->dummy_slot < 0) { |
1026 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | 2241 | dev_err(dev, "Can't allocate PaRAM dummy slot\n"); |
1027 | return ecc->dummy_slot; | 2242 | return ecc->dummy_slot; |
1028 | } | 2243 | } |
1029 | 2244 | ||
1030 | dma_cap_zero(ecc->dma_slave.cap_mask); | 2245 | queue_priority_mapping = info->queue_priority_mapping; |
1031 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | 2246 | |
1032 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); | 2247 | if (!ecc->legacy_mode) { |
1033 | dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); | 2248 | int lowest_priority = 0; |
2249 | struct of_phandle_args tc_args; | ||
2250 | |||
2251 | ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, | ||
2252 | sizeof(*ecc->tc_list), GFP_KERNEL); | ||
2253 | if (!ecc->tc_list) | ||
2254 | return -ENOMEM; | ||
2255 | |||
2256 | for (i = 0;; i++) { | ||
2257 | ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", | ||
2258 | 1, i, &tc_args); | ||
2259 | if (ret || i == ecc->num_tc) | ||
2260 | break; | ||
2261 | |||
2262 | ecc->tc_list[i].node = tc_args.np; | ||
2263 | ecc->tc_list[i].id = i; | ||
2264 | queue_priority_mapping[i][1] = tc_args.args[0]; | ||
2265 | if (queue_priority_mapping[i][1] > lowest_priority) { | ||
2266 | lowest_priority = queue_priority_mapping[i][1]; | ||
2267 | info->default_queue = i; | ||
2268 | } | ||
2269 | } | ||
2270 | } | ||
2271 | |||
2272 | /* Event queue priority mapping */ | ||
2273 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
2274 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], | ||
2275 | queue_priority_mapping[i][1]); | ||
1034 | 2276 | ||
1035 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | 2277 | for (i = 0; i < ecc->num_region; i++) { |
2278 | edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); | ||
2279 | edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); | ||
2280 | edma_write_array(ecc, EDMA_QRAE, i, 0x0); | ||
2281 | } | ||
2282 | ecc->info = info; | ||
1036 | 2283 | ||
1037 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | 2284 | /* Init the dma device and channels */ |
2285 | edma_dma_init(ecc, legacy_mode); | ||
2286 | |||
2287 | for (i = 0; i < ecc->num_channels; i++) { | ||
2288 | /* Assign all channels to the default queue */ | ||
2289 | edma_assign_channel_eventq(&ecc->slave_chans[i], | ||
2290 | info->default_queue); | ||
2291 | /* Set entry slot to the dummy slot */ | ||
2292 | edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); | ||
2293 | } | ||
1038 | 2294 | ||
1039 | ret = dma_async_device_register(&ecc->dma_slave); | 2295 | ret = dma_async_device_register(&ecc->dma_slave); |
1040 | if (ret) | 2296 | if (ret) { |
2297 | dev_err(dev, "slave ddev registration failed (%d)\n", ret); | ||
1041 | goto err_reg1; | 2298 | goto err_reg1; |
2299 | } | ||
1042 | 2300 | ||
1043 | platform_set_drvdata(pdev, ecc); | 2301 | if (ecc->dma_memcpy) { |
2302 | ret = dma_async_device_register(ecc->dma_memcpy); | ||
2303 | if (ret) { | ||
2304 | dev_err(dev, "memcpy ddev registration failed (%d)\n", | ||
2305 | ret); | ||
2306 | dma_async_device_unregister(&ecc->dma_slave); | ||
2307 | goto err_reg1; | ||
2308 | } | ||
2309 | } | ||
2310 | |||
2311 | if (node) | ||
2312 | of_dma_controller_register(node, of_edma_xlate, ecc); | ||
1044 | 2313 | ||
1045 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | 2314 | dev_info(dev, "TI EDMA DMA engine driver\n"); |
1046 | 2315 | ||
1047 | return 0; | 2316 | return 0; |
1048 | 2317 | ||
1049 | err_reg1: | 2318 | err_reg1: |
1050 | edma_free_slot(ecc->dummy_slot); | 2319 | edma_free_slot(ecc, ecc->dummy_slot); |
1051 | return ret; | 2320 | return ret; |
1052 | } | 2321 | } |
1053 | 2322 | ||
@@ -1056,28 +2325,94 @@ static int edma_remove(struct platform_device *pdev) | |||
1056 | struct device *dev = &pdev->dev; | 2325 | struct device *dev = &pdev->dev; |
1057 | struct edma_cc *ecc = dev_get_drvdata(dev); | 2326 | struct edma_cc *ecc = dev_get_drvdata(dev); |
1058 | 2327 | ||
2328 | if (dev->of_node) | ||
2329 | of_dma_controller_free(dev->of_node); | ||
1059 | dma_async_device_unregister(&ecc->dma_slave); | 2330 | dma_async_device_unregister(&ecc->dma_slave); |
1060 | edma_free_slot(ecc->dummy_slot); | 2331 | if (ecc->dma_memcpy) |
2332 | dma_async_device_unregister(ecc->dma_memcpy); | ||
2333 | edma_free_slot(ecc, ecc->dummy_slot); | ||
1061 | 2334 | ||
1062 | return 0; | 2335 | return 0; |
1063 | } | 2336 | } |
1064 | 2337 | ||
2338 | #ifdef CONFIG_PM_SLEEP | ||
2339 | static int edma_pm_suspend(struct device *dev) | ||
2340 | { | ||
2341 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
2342 | struct edma_chan *echan = ecc->slave_chans; | ||
2343 | int i; | ||
2344 | |||
2345 | for (i = 0; i < ecc->num_channels; i++) { | ||
2346 | if (echan[i].alloced) { | ||
2347 | edma_setup_interrupt(&echan[i], false); | ||
2348 | edma_tc_set_pm_state(echan[i].tc, false); | ||
2349 | } | ||
2350 | } | ||
2351 | |||
2352 | return 0; | ||
2353 | } | ||
2354 | |||
2355 | static int edma_pm_resume(struct device *dev) | ||
2356 | { | ||
2357 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
2358 | struct edma_chan *echan = ecc->slave_chans; | ||
2359 | int i; | ||
2360 | s8 (*queue_priority_mapping)[2]; | ||
2361 | |||
2362 | queue_priority_mapping = ecc->info->queue_priority_mapping; | ||
2363 | |||
2364 | /* Event queue priority mapping */ | ||
2365 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
2366 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], | ||
2367 | queue_priority_mapping[i][1]); | ||
2368 | |||
2369 | for (i = 0; i < ecc->num_channels; i++) { | ||
2370 | if (echan[i].alloced) { | ||
2371 | /* ensure access through shadow region 0 */ | ||
2372 | edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, | ||
2373 | BIT(i & 0x1f)); | ||
2374 | |||
2375 | edma_setup_interrupt(&echan[i], true); | ||
2376 | |||
2377 | /* Set up channel -> slot mapping for the entry slot */ | ||
2378 | edma_set_chmap(&echan[i], echan[i].slot[0]); | ||
2379 | |||
2380 | edma_tc_set_pm_state(echan[i].tc, true); | ||
2381 | } | ||
2382 | } | ||
2383 | |||
2384 | return 0; | ||
2385 | } | ||
2386 | #endif | ||
2387 | |||
2388 | static const struct dev_pm_ops edma_pm_ops = { | ||
2389 | SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) | ||
2390 | }; | ||
2391 | |||
1065 | static struct platform_driver edma_driver = { | 2392 | static struct platform_driver edma_driver = { |
1066 | .probe = edma_probe, | 2393 | .probe = edma_probe, |
1067 | .remove = edma_remove, | 2394 | .remove = edma_remove, |
1068 | .driver = { | 2395 | .driver = { |
1069 | .name = "edma-dma-engine", | 2396 | .name = "edma", |
2397 | .pm = &edma_pm_ops, | ||
2398 | .of_match_table = edma_of_ids, | ||
1070 | }, | 2399 | }, |
1071 | }; | 2400 | }; |
1072 | 2401 | ||
1073 | bool edma_filter_fn(struct dma_chan *chan, void *param) | 2402 | bool edma_filter_fn(struct dma_chan *chan, void *param) |
1074 | { | 2403 | { |
2404 | bool match = false; | ||
2405 | |||
1075 | if (chan->device->dev->driver == &edma_driver.driver) { | 2406 | if (chan->device->dev->driver == &edma_driver.driver) { |
1076 | struct edma_chan *echan = to_edma_chan(chan); | 2407 | struct edma_chan *echan = to_edma_chan(chan); |
1077 | unsigned ch_req = *(unsigned *)param; | 2408 | unsigned ch_req = *(unsigned *)param; |
1078 | return ch_req == echan->ch_num; | 2409 | if (ch_req == echan->ch_num) { |
2410 | /* The channel is going to be used as HW synchronized */ | ||
2411 | echan->hw_triggered = true; | ||
2412 | match = true; | ||
2413 | } | ||
1079 | } | 2414 | } |
1080 | return false; | 2415 | return match; |
1081 | } | 2416 | } |
1082 | EXPORT_SYMBOL(edma_filter_fn); | 2417 | EXPORT_SYMBOL(edma_filter_fn); |
1083 | 2418 | ||
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 5cce8c9d0026..a415edbe61b1 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -17,13 +17,184 @@ | |||
17 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
18 | #include <linux/of_dma.h> | 18 | #include <linux/of_dma.h> |
19 | 19 | ||
20 | #define TI_XBAR_OUTPUTS 127 | 20 | #define TI_XBAR_DRA7 0 |
21 | #define TI_XBAR_INPUTS 256 | 21 | #define TI_XBAR_AM335X 1 |
22 | |||
23 | static const struct of_device_id ti_dma_xbar_match[] = { | ||
24 | { | ||
25 | .compatible = "ti,dra7-dma-crossbar", | ||
26 | .data = (void *)TI_XBAR_DRA7, | ||
27 | }, | ||
28 | { | ||
29 | .compatible = "ti,am335x-edma-crossbar", | ||
30 | .data = (void *)TI_XBAR_AM335X, | ||
31 | }, | ||
32 | {}, | ||
33 | }; | ||
34 | |||
35 | /* Crossbar on AM335x/AM437x family */ | ||
36 | #define TI_AM335X_XBAR_LINES 64 | ||
37 | |||
38 | struct ti_am335x_xbar_data { | ||
39 | void __iomem *iomem; | ||
40 | |||
41 | struct dma_router dmarouter; | ||
42 | |||
43 | u32 xbar_events; /* maximum number of events to select in xbar */ | ||
44 | u32 dma_requests; /* number of DMA requests on eDMA */ | ||
45 | }; | ||
46 | |||
47 | struct ti_am335x_xbar_map { | ||
48 | u16 dma_line; | ||
49 | u16 mux_val; | ||
50 | }; | ||
51 | |||
52 | static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) | ||
53 | { | ||
54 | writeb_relaxed(val & 0x1f, iomem + event); | ||
55 | } | ||
56 | |||
57 | static void ti_am335x_xbar_free(struct device *dev, void *route_data) | ||
58 | { | ||
59 | struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); | ||
60 | struct ti_am335x_xbar_map *map = route_data; | ||
61 | |||
62 | dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", | ||
63 | map->mux_val, map->dma_line); | ||
64 | |||
65 | ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); | ||
66 | kfree(map); | ||
67 | } | ||
68 | |||
69 | static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, | ||
70 | struct of_dma *ofdma) | ||
71 | { | ||
72 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | ||
73 | struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); | ||
74 | struct ti_am335x_xbar_map *map; | ||
75 | |||
76 | if (dma_spec->args_count != 3) | ||
77 | return ERR_PTR(-EINVAL); | ||
78 | |||
79 | if (dma_spec->args[2] >= xbar->xbar_events) { | ||
80 | dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", | ||
81 | dma_spec->args[2]); | ||
82 | return ERR_PTR(-EINVAL); | ||
83 | } | ||
84 | |||
85 | if (dma_spec->args[0] >= xbar->dma_requests) { | ||
86 | dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", | ||
87 | dma_spec->args[0]); | ||
88 | return ERR_PTR(-EINVAL); | ||
89 | } | ||
90 | |||
91 | /* The of_node_put() will be done in the core for the node */ | ||
92 | dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); | ||
93 | if (!dma_spec->np) { | ||
94 | dev_err(&pdev->dev, "Can't get DMA master\n"); | ||
95 | return ERR_PTR(-EINVAL); | ||
96 | } | ||
97 | |||
98 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
99 | if (!map) { | ||
100 | of_node_put(dma_spec->np); | ||
101 | return ERR_PTR(-ENOMEM); | ||
102 | } | ||
103 | |||
104 | map->dma_line = (u16)dma_spec->args[0]; | ||
105 | map->mux_val = (u16)dma_spec->args[2]; | ||
106 | |||
107 | dma_spec->args[2] = 0; | ||
108 | dma_spec->args_count = 2; | ||
109 | |||
110 | dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", | ||
111 | map->mux_val, map->dma_line); | ||
112 | |||
113 | ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); | ||
114 | |||
115 | return map; | ||
116 | } | ||
117 | |||
118 | static const struct of_device_id ti_am335x_master_match[] = { | ||
119 | { .compatible = "ti,edma3-tpcc", }, | ||
120 | {}, | ||
121 | }; | ||
122 | |||
123 | static int ti_am335x_xbar_probe(struct platform_device *pdev) | ||
124 | { | ||
125 | struct device_node *node = pdev->dev.of_node; | ||
126 | const struct of_device_id *match; | ||
127 | struct device_node *dma_node; | ||
128 | struct ti_am335x_xbar_data *xbar; | ||
129 | struct resource *res; | ||
130 | void __iomem *iomem; | ||
131 | int i, ret; | ||
132 | |||
133 | if (!node) | ||
134 | return -ENODEV; | ||
135 | |||
136 | xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); | ||
137 | if (!xbar) | ||
138 | return -ENOMEM; | ||
139 | |||
140 | dma_node = of_parse_phandle(node, "dma-masters", 0); | ||
141 | if (!dma_node) { | ||
142 | dev_err(&pdev->dev, "Can't get DMA master node\n"); | ||
143 | return -ENODEV; | ||
144 | } | ||
145 | |||
146 | match = of_match_node(ti_am335x_master_match, dma_node); | ||
147 | if (!match) { | ||
148 | dev_err(&pdev->dev, "DMA master is not supported\n"); | ||
149 | return -EINVAL; | ||
150 | } | ||
151 | |||
152 | if (of_property_read_u32(dma_node, "dma-requests", | ||
153 | &xbar->dma_requests)) { | ||
154 | dev_info(&pdev->dev, | ||
155 | "Missing XBAR output information, using %u.\n", | ||
156 | TI_AM335X_XBAR_LINES); | ||
157 | xbar->dma_requests = TI_AM335X_XBAR_LINES; | ||
158 | } | ||
159 | of_node_put(dma_node); | ||
160 | |||
161 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { | ||
162 | dev_info(&pdev->dev, | ||
163 | "Missing XBAR input information, using %u.\n", | ||
164 | TI_AM335X_XBAR_LINES); | ||
165 | xbar->xbar_events = TI_AM335X_XBAR_LINES; | ||
166 | } | ||
167 | |||
168 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
169 | iomem = devm_ioremap_resource(&pdev->dev, res); | ||
170 | if (IS_ERR(iomem)) | ||
171 | return PTR_ERR(iomem); | ||
172 | |||
173 | xbar->iomem = iomem; | ||
174 | |||
175 | xbar->dmarouter.dev = &pdev->dev; | ||
176 | xbar->dmarouter.route_free = ti_am335x_xbar_free; | ||
177 | |||
178 | platform_set_drvdata(pdev, xbar); | ||
179 | |||
180 | /* Reset the crossbar */ | ||
181 | for (i = 0; i < xbar->dma_requests; i++) | ||
182 | ti_am335x_xbar_write(xbar->iomem, i, 0); | ||
183 | |||
184 | ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, | ||
185 | &xbar->dmarouter); | ||
186 | |||
187 | return ret; | ||
188 | } | ||
189 | |||
190 | /* Crossbar on DRA7xx family */ | ||
191 | #define TI_DRA7_XBAR_OUTPUTS 127 | ||
192 | #define TI_DRA7_XBAR_INPUTS 256 | ||
22 | 193 | ||
23 | #define TI_XBAR_EDMA_OFFSET 0 | 194 | #define TI_XBAR_EDMA_OFFSET 0 |
24 | #define TI_XBAR_SDMA_OFFSET 1 | 195 | #define TI_XBAR_SDMA_OFFSET 1 |
25 | 196 | ||
26 | struct ti_dma_xbar_data { | 197 | struct ti_dra7_xbar_data { |
27 | void __iomem *iomem; | 198 | void __iomem *iomem; |
28 | 199 | ||
29 | struct dma_router dmarouter; | 200 | struct dma_router dmarouter; |
@@ -35,35 +206,35 @@ struct ti_dma_xbar_data { | |||
35 | u32 dma_offset; | 206 | u32 dma_offset; |
36 | }; | 207 | }; |
37 | 208 | ||
38 | struct ti_dma_xbar_map { | 209 | struct ti_dra7_xbar_map { |
39 | u16 xbar_in; | 210 | u16 xbar_in; |
40 | int xbar_out; | 211 | int xbar_out; |
41 | }; | 212 | }; |
42 | 213 | ||
43 | static inline void ti_dma_xbar_write(void __iomem *iomem, int xbar, u16 val) | 214 | static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) |
44 | { | 215 | { |
45 | writew_relaxed(val, iomem + (xbar * 2)); | 216 | writew_relaxed(val, iomem + (xbar * 2)); |
46 | } | 217 | } |
47 | 218 | ||
48 | static void ti_dma_xbar_free(struct device *dev, void *route_data) | 219 | static void ti_dra7_xbar_free(struct device *dev, void *route_data) |
49 | { | 220 | { |
50 | struct ti_dma_xbar_data *xbar = dev_get_drvdata(dev); | 221 | struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); |
51 | struct ti_dma_xbar_map *map = route_data; | 222 | struct ti_dra7_xbar_map *map = route_data; |
52 | 223 | ||
53 | dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", | 224 | dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", |
54 | map->xbar_in, map->xbar_out); | 225 | map->xbar_in, map->xbar_out); |
55 | 226 | ||
56 | ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); | 227 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); |
57 | idr_remove(&xbar->map_idr, map->xbar_out); | 228 | idr_remove(&xbar->map_idr, map->xbar_out); |
58 | kfree(map); | 229 | kfree(map); |
59 | } | 230 | } |
60 | 231 | ||
61 | static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, | 232 | static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, |
62 | struct of_dma *ofdma) | 233 | struct of_dma *ofdma) |
63 | { | 234 | { |
64 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); | 235 | struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); |
65 | struct ti_dma_xbar_data *xbar = platform_get_drvdata(pdev); | 236 | struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); |
66 | struct ti_dma_xbar_map *map; | 237 | struct ti_dra7_xbar_map *map; |
67 | 238 | ||
68 | if (dma_spec->args[0] >= xbar->xbar_requests) { | 239 | if (dma_spec->args[0] >= xbar->xbar_requests) { |
69 | dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", | 240 | dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", |
@@ -93,12 +264,12 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
93 | dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", | 264 | dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", |
94 | map->xbar_in, map->xbar_out); | 265 | map->xbar_in, map->xbar_out); |
95 | 266 | ||
96 | ti_dma_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); | 267 | ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); |
97 | 268 | ||
98 | return map; | 269 | return map; |
99 | } | 270 | } |
100 | 271 | ||
101 | static const struct of_device_id ti_dma_master_match[] = { | 272 | static const struct of_device_id ti_dra7_master_match[] = { |
102 | { | 273 | { |
103 | .compatible = "ti,omap4430-sdma", | 274 | .compatible = "ti,omap4430-sdma", |
104 | .data = (void *)TI_XBAR_SDMA_OFFSET, | 275 | .data = (void *)TI_XBAR_SDMA_OFFSET, |
@@ -110,12 +281,12 @@ static const struct of_device_id ti_dma_master_match[] = { | |||
110 | {}, | 281 | {}, |
111 | }; | 282 | }; |
112 | 283 | ||
113 | static int ti_dma_xbar_probe(struct platform_device *pdev) | 284 | static int ti_dra7_xbar_probe(struct platform_device *pdev) |
114 | { | 285 | { |
115 | struct device_node *node = pdev->dev.of_node; | 286 | struct device_node *node = pdev->dev.of_node; |
116 | const struct of_device_id *match; | 287 | const struct of_device_id *match; |
117 | struct device_node *dma_node; | 288 | struct device_node *dma_node; |
118 | struct ti_dma_xbar_data *xbar; | 289 | struct ti_dra7_xbar_data *xbar; |
119 | struct resource *res; | 290 | struct resource *res; |
120 | u32 safe_val; | 291 | u32 safe_val; |
121 | void __iomem *iomem; | 292 | void __iomem *iomem; |
@@ -136,7 +307,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) | |||
136 | return -ENODEV; | 307 | return -ENODEV; |
137 | } | 308 | } |
138 | 309 | ||
139 | match = of_match_node(ti_dma_master_match, dma_node); | 310 | match = of_match_node(ti_dra7_master_match, dma_node); |
140 | if (!match) { | 311 | if (!match) { |
141 | dev_err(&pdev->dev, "DMA master is not supported\n"); | 312 | dev_err(&pdev->dev, "DMA master is not supported\n"); |
142 | return -EINVAL; | 313 | return -EINVAL; |
@@ -146,16 +317,16 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) | |||
146 | &xbar->dma_requests)) { | 317 | &xbar->dma_requests)) { |
147 | dev_info(&pdev->dev, | 318 | dev_info(&pdev->dev, |
148 | "Missing XBAR output information, using %u.\n", | 319 | "Missing XBAR output information, using %u.\n", |
149 | TI_XBAR_OUTPUTS); | 320 | TI_DRA7_XBAR_OUTPUTS); |
150 | xbar->dma_requests = TI_XBAR_OUTPUTS; | 321 | xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; |
151 | } | 322 | } |
152 | of_node_put(dma_node); | 323 | of_node_put(dma_node); |
153 | 324 | ||
154 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { | 325 | if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { |
155 | dev_info(&pdev->dev, | 326 | dev_info(&pdev->dev, |
156 | "Missing XBAR input information, using %u.\n", | 327 | "Missing XBAR input information, using %u.\n", |
157 | TI_XBAR_INPUTS); | 328 | TI_DRA7_XBAR_INPUTS); |
158 | xbar->xbar_requests = TI_XBAR_INPUTS; | 329 | xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; |
159 | } | 330 | } |
160 | 331 | ||
161 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) | 332 | if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) |
@@ -169,30 +340,50 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) | |||
169 | xbar->iomem = iomem; | 340 | xbar->iomem = iomem; |
170 | 341 | ||
171 | xbar->dmarouter.dev = &pdev->dev; | 342 | xbar->dmarouter.dev = &pdev->dev; |
172 | xbar->dmarouter.route_free = ti_dma_xbar_free; | 343 | xbar->dmarouter.route_free = ti_dra7_xbar_free; |
173 | xbar->dma_offset = (u32)match->data; | 344 | xbar->dma_offset = (u32)match->data; |
174 | 345 | ||
175 | platform_set_drvdata(pdev, xbar); | 346 | platform_set_drvdata(pdev, xbar); |
176 | 347 | ||
177 | /* Reset the crossbar */ | 348 | /* Reset the crossbar */ |
178 | for (i = 0; i < xbar->dma_requests; i++) | 349 | for (i = 0; i < xbar->dma_requests; i++) |
179 | ti_dma_xbar_write(xbar->iomem, i, xbar->safe_val); | 350 | ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); |
180 | 351 | ||
181 | ret = of_dma_router_register(node, ti_dma_xbar_route_allocate, | 352 | ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, |
182 | &xbar->dmarouter); | 353 | &xbar->dmarouter); |
183 | if (ret) { | 354 | if (ret) { |
184 | /* Restore the defaults for the crossbar */ | 355 | /* Restore the defaults for the crossbar */ |
185 | for (i = 0; i < xbar->dma_requests; i++) | 356 | for (i = 0; i < xbar->dma_requests; i++) |
186 | ti_dma_xbar_write(xbar->iomem, i, i); | 357 | ti_dra7_xbar_write(xbar->iomem, i, i); |
187 | } | 358 | } |
188 | 359 | ||
189 | return ret; | 360 | return ret; |
190 | } | 361 | } |
191 | 362 | ||
192 | static const struct of_device_id ti_dma_xbar_match[] = { | 363 | static int ti_dma_xbar_probe(struct platform_device *pdev) |
193 | { .compatible = "ti,dra7-dma-crossbar" }, | 364 | { |
194 | {}, | 365 | const struct of_device_id *match; |
195 | }; | 366 | int ret; |
367 | |||
368 | match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); | ||
369 | if (unlikely(!match)) | ||
370 | return -EINVAL; | ||
371 | |||
372 | switch ((u32)match->data) { | ||
373 | case TI_XBAR_DRA7: | ||
374 | ret = ti_dra7_xbar_probe(pdev); | ||
375 | break; | ||
376 | case TI_XBAR_AM335X: | ||
377 | ret = ti_am335x_xbar_probe(pdev); | ||
378 | break; | ||
379 | default: | ||
380 | dev_err(&pdev->dev, "Unsupported crossbar\n"); | ||
381 | ret = -ENODEV; | ||
382 | break; | ||
383 | } | ||
384 | |||
385 | return ret; | ||
386 | } | ||
196 | 387 | ||
197 | static struct platform_driver ti_dma_xbar_driver = { | 388 | static struct platform_driver ti_dma_xbar_driver = { |
198 | .driver = { | 389 | .driver = { |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index bdb2710e2aab..e2878baeb90e 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -41,51 +41,6 @@ | |||
41 | #ifndef EDMA_H_ | 41 | #ifndef EDMA_H_ |
42 | #define EDMA_H_ | 42 | #define EDMA_H_ |
43 | 43 | ||
44 | /* PaRAM slots are laid out like this */ | ||
45 | struct edmacc_param { | ||
46 | u32 opt; | ||
47 | u32 src; | ||
48 | u32 a_b_cnt; | ||
49 | u32 dst; | ||
50 | u32 src_dst_bidx; | ||
51 | u32 link_bcntrld; | ||
52 | u32 src_dst_cidx; | ||
53 | u32 ccnt; | ||
54 | } __packed; | ||
55 | |||
56 | /* fields in edmacc_param.opt */ | ||
57 | #define SAM BIT(0) | ||
58 | #define DAM BIT(1) | ||
59 | #define SYNCDIM BIT(2) | ||
60 | #define STATIC BIT(3) | ||
61 | #define EDMA_FWID (0x07 << 8) | ||
62 | #define TCCMODE BIT(11) | ||
63 | #define EDMA_TCC(t) ((t) << 12) | ||
64 | #define TCINTEN BIT(20) | ||
65 | #define ITCINTEN BIT(21) | ||
66 | #define TCCHEN BIT(22) | ||
67 | #define ITCCHEN BIT(23) | ||
68 | |||
69 | /*ch_status paramater of callback function possible values*/ | ||
70 | #define EDMA_DMA_COMPLETE 1 | ||
71 | #define EDMA_DMA_CC_ERROR 2 | ||
72 | #define EDMA_DMA_TC1_ERROR 3 | ||
73 | #define EDMA_DMA_TC2_ERROR 4 | ||
74 | |||
75 | enum address_mode { | ||
76 | INCR = 0, | ||
77 | FIFO = 1 | ||
78 | }; | ||
79 | |||
80 | enum fifo_width { | ||
81 | W8BIT = 0, | ||
82 | W16BIT = 1, | ||
83 | W32BIT = 2, | ||
84 | W64BIT = 3, | ||
85 | W128BIT = 4, | ||
86 | W256BIT = 5 | ||
87 | }; | ||
88 | |||
89 | enum dma_event_q { | 44 | enum dma_event_q { |
90 | EVENTQ_0 = 0, | 45 | EVENTQ_0 = 0, |
91 | EVENTQ_1 = 1, | 46 | EVENTQ_1 = 1, |
@@ -94,64 +49,10 @@ enum dma_event_q { | |||
94 | EVENTQ_DEFAULT = -1 | 49 | EVENTQ_DEFAULT = -1 |
95 | }; | 50 | }; |
96 | 51 | ||
97 | enum sync_dimension { | ||
98 | ASYNC = 0, | ||
99 | ABSYNC = 1 | ||
100 | }; | ||
101 | |||
102 | #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) | 52 | #define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan)) |
103 | #define EDMA_CTLR(i) ((i) >> 16) | 53 | #define EDMA_CTLR(i) ((i) >> 16) |
104 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) | 54 | #define EDMA_CHAN_SLOT(i) ((i) & 0xffff) |
105 | 55 | ||
106 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ | ||
107 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | ||
108 | #define EDMA_CONT_PARAMS_ANY 1001 | ||
109 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 | ||
110 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 | ||
111 | |||
112 | #define EDMA_MAX_CC 2 | ||
113 | |||
114 | /* alloc/free DMA channels and their dedicated parameter RAM slots */ | ||
115 | int edma_alloc_channel(int channel, | ||
116 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
117 | void *data, enum dma_event_q); | ||
118 | void edma_free_channel(unsigned channel); | ||
119 | |||
120 | /* alloc/free parameter RAM slots */ | ||
121 | int edma_alloc_slot(unsigned ctlr, int slot); | ||
122 | void edma_free_slot(unsigned slot); | ||
123 | |||
124 | /* alloc/free a set of contiguous parameter RAM slots */ | ||
125 | int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count); | ||
126 | int edma_free_cont_slots(unsigned slot, int count); | ||
127 | |||
128 | /* calls that operate on part of a parameter RAM slot */ | ||
129 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
130 | enum address_mode mode, enum fifo_width); | ||
131 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
132 | enum address_mode mode, enum fifo_width); | ||
133 | dma_addr_t edma_get_position(unsigned slot, bool dst); | ||
134 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); | ||
135 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); | ||
136 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, | ||
137 | u16 bcnt_rld, enum sync_dimension sync_mode); | ||
138 | void edma_link(unsigned from, unsigned to); | ||
139 | void edma_unlink(unsigned from); | ||
140 | |||
141 | /* calls that operate on an entire parameter RAM slot */ | ||
142 | void edma_write_slot(unsigned slot, const struct edmacc_param *params); | ||
143 | void edma_read_slot(unsigned slot, struct edmacc_param *params); | ||
144 | |||
145 | /* channel control operations */ | ||
146 | int edma_start(unsigned channel); | ||
147 | void edma_stop(unsigned channel); | ||
148 | void edma_clean_channel(unsigned channel); | ||
149 | void edma_clear_event(unsigned channel); | ||
150 | void edma_pause(unsigned channel); | ||
151 | void edma_resume(unsigned channel); | ||
152 | |||
153 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); | ||
154 | |||
155 | struct edma_rsv_info { | 56 | struct edma_rsv_info { |
156 | 57 | ||
157 | const s16 (*rsv_chans)[2]; | 58 | const s16 (*rsv_chans)[2]; |
@@ -170,10 +71,11 @@ struct edma_soc_info { | |||
170 | /* Resource reservation for other cores */ | 71 | /* Resource reservation for other cores */ |
171 | struct edma_rsv_info *rsv; | 72 | struct edma_rsv_info *rsv; |
172 | 73 | ||
74 | /* List of channels allocated for memcpy, terminated with -1 */ | ||
75 | s16 *memcpy_channels; | ||
76 | |||
173 | s8 (*queue_priority_mapping)[2]; | 77 | s8 (*queue_priority_mapping)[2]; |
174 | const s16 (*xbar_chans)[2]; | 78 | const s16 (*xbar_chans)[2]; |
175 | }; | 79 | }; |
176 | 80 | ||
177 | int edma_trigger_channel(unsigned); | ||
178 | |||
179 | #endif | 81 | #endif |