diff options
59 files changed, 4031 insertions, 984 deletions
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt index e577196a12c0..4659fd952301 100644 --- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt | |||
@@ -47,6 +47,7 @@ The full ID of peripheral types can be found below. | |||
47 | 20 ASRC | 47 | 20 ASRC |
48 | 21 ESAI | 48 | 21 ESAI |
49 | 22 SSI Dual FIFO (needs firmware ver >= 2) | 49 | 22 SSI Dual FIFO (needs firmware ver >= 2) |
50 | 23 Shared ASRC | ||
50 | 51 | ||
51 | The third cell specifies the transfer priority as below. | 52 | The third cell specifies the transfer priority as below. |
52 | 53 | ||
diff --git a/Documentation/devicetree/bindings/dma/mpc512x-dma.txt b/Documentation/devicetree/bindings/dma/mpc512x-dma.txt new file mode 100644 index 000000000000..a6511df165c5 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/mpc512x-dma.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | * Freescale MPC512x and MPC8308 DMA Controller | ||
2 | |||
3 | The DMA controller in Freescale MPC512x and MPC8308 SoCs can move | ||
4 | blocks of memory contents between memory and peripherals or | ||
5 | from memory to memory. | ||
6 | |||
7 | Refer to "Generic DMA Controller and DMA request bindings" in | ||
8 | the dma/dma.txt file for a more detailed description of binding. | ||
9 | |||
10 | Required properties: | ||
11 | - compatible: should be "fsl,mpc5121-dma" or "fsl,mpc8308-dma"; | ||
12 | - reg: should contain the DMA controller registers location and length; | ||
13 | - interrupt for the DMA controller: syntax of interrupt client node | ||
14 | is described in interrupt-controller/interrupts.txt file. | ||
15 | - #dma-cells: the length of the DMA specifier, must be <1>. | ||
16 | Each channel of this DMA controller has a peripheral request line, | ||
17 | the assignment is fixed in hardware. This one cell | ||
18 | in dmas property of a client device represents the channel number. | ||
19 | |||
20 | Example: | ||
21 | |||
22 | dma0: dma@14000 { | ||
23 | compatible = "fsl,mpc5121-dma"; | ||
24 | reg = <0x14000 0x1800>; | ||
25 | interrupts = <65 0x8>; | ||
26 | #dma-cells = <1>; | ||
27 | }; | ||
28 | |||
29 | DMA clients must use the format described in dma/dma.txt file. | ||
diff --git a/Documentation/devicetree/bindings/dma/nbpfaxi.txt b/Documentation/devicetree/bindings/dma/nbpfaxi.txt new file mode 100644 index 000000000000..d5e2522b9ec1 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/nbpfaxi.txt | |||
@@ -0,0 +1,61 @@ | |||
1 | * Renesas "Type-AXI" NBPFAXI* DMA controllers | ||
2 | |||
3 | * DMA controller | ||
4 | |||
5 | Required properties | ||
6 | |||
7 | - compatible: must be one of | ||
8 | "renesas,nbpfaxi64dmac1b4" | ||
9 | "renesas,nbpfaxi64dmac1b8" | ||
10 | "renesas,nbpfaxi64dmac1b16" | ||
11 | "renesas,nbpfaxi64dmac4b4" | ||
12 | "renesas,nbpfaxi64dmac4b8" | ||
13 | "renesas,nbpfaxi64dmac4b16" | ||
14 | "renesas,nbpfaxi64dmac8b4" | ||
15 | "renesas,nbpfaxi64dmac8b8" | ||
16 | "renesas,nbpfaxi64dmac8b16" | ||
17 | - #dma-cells: must be 2: the first integer is a terminal number, to which this | ||
18 | slave is connected, the second one is flags. Flags is a bitmask | ||
19 | with the following bits defined: | ||
20 | |||
21 | #define NBPF_SLAVE_RQ_HIGH 1 | ||
22 | #define NBPF_SLAVE_RQ_LOW 2 | ||
23 | #define NBPF_SLAVE_RQ_LEVEL 4 | ||
24 | |||
25 | Optional properties: | ||
26 | |||
27 | You can use dma-channels and dma-requests as described in dma.txt, although they | ||
28 | won't be used, this information is derived from the compatibility string. | ||
29 | |||
30 | Example: | ||
31 | |||
32 | dma: dma-controller@48000000 { | ||
33 | compatible = "renesas,nbpfaxi64dmac8b4"; | ||
34 | reg = <0x48000000 0x400>; | ||
35 | interrupts = <0 12 0x4 | ||
36 | 0 13 0x4 | ||
37 | 0 14 0x4 | ||
38 | 0 15 0x4 | ||
39 | 0 16 0x4 | ||
40 | 0 17 0x4 | ||
41 | 0 18 0x4 | ||
42 | 0 19 0x4>; | ||
43 | #dma-cells = <2>; | ||
44 | dma-channels = <8>; | ||
45 | dma-requests = <8>; | ||
46 | }; | ||
47 | |||
48 | * DMA client | ||
49 | |||
50 | Required properties: | ||
51 | |||
52 | dmas and dma-names are required, as described in dma.txt. | ||
53 | |||
54 | Example: | ||
55 | |||
56 | #include <dt-bindings/dma/nbpfaxi.h> | ||
57 | |||
58 | ... | ||
59 | dmas = <&dma 0 (NBPF_SLAVE_RQ_HIGH | NBPF_SLAVE_RQ_LEVEL) | ||
60 | &dma 1 (NBPF_SLAVE_RQ_HIGH | NBPF_SLAVE_RQ_LEVEL)>; | ||
61 | dma-names = "rx", "tx"; | ||
diff --git a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt new file mode 100644 index 000000000000..9f1d750d76de --- /dev/null +++ b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt | |||
@@ -0,0 +1,29 @@ | |||
1 | * R-Car Audio DMAC peri peri Device Tree bindings | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: should be "renesas,rcar-audmapp" | ||
5 | - #dma-cells: should be <1>, see "dmas" property below | ||
6 | |||
7 | Example: | ||
8 | audmapp: audio-dma-pp@0xec740000 { | ||
9 | compatible = "renesas,rcar-audmapp"; | ||
10 | #dma-cells = <1>; | ||
11 | |||
12 | reg = <0 0xec740000 0 0x200>; | ||
13 | }; | ||
14 | |||
15 | |||
16 | * DMA client | ||
17 | |||
18 | Required properties: | ||
19 | - dmas: a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs, | ||
20 | where SRS/DRS values are fixed handles, specified in the SoC | ||
21 | manual as the value that would be written into the PDMACHCR. | ||
22 | - dma-names: a list of DMA channel names, one per "dmas" entry | ||
23 | |||
24 | Example: | ||
25 | |||
26 | dmas = <&audmapp 0x2d00 | ||
27 | &audmapp 0x3700>; | ||
28 | dma-names = "src0_ssiu0", | ||
29 | "dvc0_ssiu0"; | ||
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt new file mode 100644 index 000000000000..df0f48bcf75a --- /dev/null +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt | |||
@@ -0,0 +1,98 @@ | |||
1 | * Renesas R-Car DMA Controller Device Tree bindings | ||
2 | |||
3 | Renesas R-Car Generation 2 SoCs have have multiple multi-channel DMA | ||
4 | controller instances named DMAC capable of serving multiple clients. Channels | ||
5 | can be dedicated to specific clients or shared between a large number of | ||
6 | clients. | ||
7 | |||
8 | DMA clients are connected to the DMAC ports referenced by an 8-bit identifier | ||
9 | called MID/RID. | ||
10 | |||
11 | Each DMA client is connected to one dedicated port of the DMAC, identified by | ||
12 | an 8-bit port number called the MID/RID. A DMA controller can thus serve up to | ||
13 | 256 clients in total. When the number of hardware channels is lower than the | ||
14 | number of clients to be served, channels must be shared between multiple DMA | ||
15 | clients. The association of DMA clients to DMAC channels is fully dynamic and | ||
16 | not described in these device tree bindings. | ||
17 | |||
18 | Required Properties: | ||
19 | |||
20 | - compatible: must contain "renesas,rcar-dmac" | ||
21 | |||
22 | - reg: base address and length of the registers block for the DMAC | ||
23 | |||
24 | - interrupts: interrupt specifiers for the DMAC, one for each entry in | ||
25 | interrupt-names. | ||
26 | - interrupt-names: one entry per channel, named "ch%u", where %u is the | ||
27 | channel number ranging from zero to the number of channels minus one. | ||
28 | |||
29 | - clock-names: "fck" for the functional clock | ||
30 | - clocks: a list of phandle + clock-specifier pairs, one for each entry | ||
31 | in clock-names. | ||
32 | - clock-names: must contain "fck" for the functional clock. | ||
33 | |||
34 | - #dma-cells: must be <1>, the cell specifies the MID/RID of the DMAC port | ||
35 | connected to the DMA client | ||
36 | - dma-channels: number of DMA channels | ||
37 | |||
38 | Example: R8A7790 (R-Car H2) SYS-DMACs | ||
39 | |||
40 | dmac0: dma-controller@e6700000 { | ||
41 | compatible = "renesas,rcar-dmac"; | ||
42 | reg = <0 0xe6700000 0 0x20000>; | ||
43 | interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH | ||
44 | 0 200 IRQ_TYPE_LEVEL_HIGH | ||
45 | 0 201 IRQ_TYPE_LEVEL_HIGH | ||
46 | 0 202 IRQ_TYPE_LEVEL_HIGH | ||
47 | 0 203 IRQ_TYPE_LEVEL_HIGH | ||
48 | 0 204 IRQ_TYPE_LEVEL_HIGH | ||
49 | 0 205 IRQ_TYPE_LEVEL_HIGH | ||
50 | 0 206 IRQ_TYPE_LEVEL_HIGH | ||
51 | 0 207 IRQ_TYPE_LEVEL_HIGH | ||
52 | 0 208 IRQ_TYPE_LEVEL_HIGH | ||
53 | 0 209 IRQ_TYPE_LEVEL_HIGH | ||
54 | 0 210 IRQ_TYPE_LEVEL_HIGH | ||
55 | 0 211 IRQ_TYPE_LEVEL_HIGH | ||
56 | 0 212 IRQ_TYPE_LEVEL_HIGH | ||
57 | 0 213 IRQ_TYPE_LEVEL_HIGH | ||
58 | 0 214 IRQ_TYPE_LEVEL_HIGH>; | ||
59 | interrupt-names = "error", | ||
60 | "ch0", "ch1", "ch2", "ch3", | ||
61 | "ch4", "ch5", "ch6", "ch7", | ||
62 | "ch8", "ch9", "ch10", "ch11", | ||
63 | "ch12", "ch13", "ch14"; | ||
64 | clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC0>; | ||
65 | clock-names = "fck"; | ||
66 | #dma-cells = <1>; | ||
67 | dma-channels = <15>; | ||
68 | }; | ||
69 | |||
70 | dmac1: dma-controller@e6720000 { | ||
71 | compatible = "renesas,rcar-dmac"; | ||
72 | reg = <0 0xe6720000 0 0x20000>; | ||
73 | interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH | ||
74 | 0 216 IRQ_TYPE_LEVEL_HIGH | ||
75 | 0 217 IRQ_TYPE_LEVEL_HIGH | ||
76 | 0 218 IRQ_TYPE_LEVEL_HIGH | ||
77 | 0 219 IRQ_TYPE_LEVEL_HIGH | ||
78 | 0 308 IRQ_TYPE_LEVEL_HIGH | ||
79 | 0 309 IRQ_TYPE_LEVEL_HIGH | ||
80 | 0 310 IRQ_TYPE_LEVEL_HIGH | ||
81 | 0 311 IRQ_TYPE_LEVEL_HIGH | ||
82 | 0 312 IRQ_TYPE_LEVEL_HIGH | ||
83 | 0 313 IRQ_TYPE_LEVEL_HIGH | ||
84 | 0 314 IRQ_TYPE_LEVEL_HIGH | ||
85 | 0 315 IRQ_TYPE_LEVEL_HIGH | ||
86 | 0 316 IRQ_TYPE_LEVEL_HIGH | ||
87 | 0 317 IRQ_TYPE_LEVEL_HIGH | ||
88 | 0 318 IRQ_TYPE_LEVEL_HIGH>; | ||
89 | interrupt-names = "error", | ||
90 | "ch0", "ch1", "ch2", "ch3", | ||
91 | "ch4", "ch5", "ch6", "ch7", | ||
92 | "ch8", "ch9", "ch10", "ch11", | ||
93 | "ch12", "ch13", "ch14"; | ||
94 | clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC1>; | ||
95 | clock-names = "fck"; | ||
96 | #dma-cells = <1>; | ||
97 | dma-channels = <15>; | ||
98 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/ste-dma40.txt b/Documentation/devicetree/bindings/dma/ste-dma40.txt index 1f5729f10621..95800ab37bb0 100644 --- a/Documentation/devicetree/bindings/dma/ste-dma40.txt +++ b/Documentation/devicetree/bindings/dma/ste-dma40.txt | |||
@@ -35,9 +35,11 @@ Required properties: | |||
35 | 35 | ||
36 | Each dmas request consists of 4 cells: | 36 | Each dmas request consists of 4 cells: |
37 | 1. A phandle pointing to the DMA controller | 37 | 1. A phandle pointing to the DMA controller |
38 | 2. Device Type | 38 | 2. Device signal number, the signal line for single and burst requests |
39 | connected from the device to the DMA40 engine | ||
39 | 3. The DMA request line number (only when 'use fixed channel' is set) | 40 | 3. The DMA request line number (only when 'use fixed channel' is set) |
40 | 4. A 32bit mask specifying; mode, direction and endianness [NB: This list will grow] | 41 | 4. A 32bit mask specifying; mode, direction and endianness |
42 | [NB: This list will grow] | ||
41 | 0x00000001: Mode: | 43 | 0x00000001: Mode: |
42 | Logical channel when unset | 44 | Logical channel when unset |
43 | Physical channel when set | 45 | Physical channel when set |
@@ -54,6 +56,74 @@ Each dmas request consists of 4 cells: | |||
54 | Normal priority when unset | 56 | Normal priority when unset |
55 | High priority when set | 57 | High priority when set |
56 | 58 | ||
59 | Existing signal numbers for the DB8500 ASIC. Unless specified, the signals are | ||
60 | bidirectional, i.e. the same for RX and TX operations: | ||
61 | |||
62 | 0: SPI controller 0 | ||
63 | 1: SD/MMC controller 0 (unused) | ||
64 | 2: SD/MMC controller 1 (unused) | ||
65 | 3: SD/MMC controller 2 (unused) | ||
66 | 4: I2C port 1 | ||
67 | 5: I2C port 3 | ||
68 | 6: I2C port 2 | ||
69 | 7: I2C port 4 | ||
70 | 8: Synchronous Serial Port SSP0 | ||
71 | 9: Synchronous Serial Port SSP1 | ||
72 | 10: Multi-Channel Display Engine MCDE RX | ||
73 | 11: UART port 2 | ||
74 | 12: UART port 1 | ||
75 | 13: UART port 0 | ||
76 | 14: Multirate Serial Port MSP2 | ||
77 | 15: I2C port 0 | ||
78 | 16: USB OTG in/out endpoints 7 & 15 | ||
79 | 17: USB OTG in/out endpoints 6 & 14 | ||
80 | 18: USB OTG in/out endpoints 5 & 13 | ||
81 | 19: USB OTG in/out endpoints 4 & 12 | ||
82 | 20: SLIMbus or HSI channel 0 | ||
83 | 21: SLIMbus or HSI channel 1 | ||
84 | 22: SLIMbus or HSI channel 2 | ||
85 | 23: SLIMbus or HSI channel 3 | ||
86 | 24: Multimedia DSP SXA0 | ||
87 | 25: Multimedia DSP SXA1 | ||
88 | 26: Multimedia DSP SXA2 | ||
89 | 27: Multimedia DSP SXA3 | ||
90 | 28: SD/MM controller 2 | ||
91 | 29: SD/MM controller 0 | ||
92 | 30: MSP port 1 on DB8500 v1, MSP port 3 on DB8500 v2 | ||
93 | 31: MSP port 0 or SLIMbus channel 0 | ||
94 | 32: SD/MM controller 1 | ||
95 | 33: SPI controller 2 | ||
96 | 34: i2c3 RX2 TX2 | ||
97 | 35: SPI controller 1 | ||
98 | 36: USB OTG in/out endpoints 3 & 11 | ||
99 | 37: USB OTG in/out endpoints 2 & 10 | ||
100 | 38: USB OTG in/out endpoints 1 & 9 | ||
101 | 39: USB OTG in/out endpoints 8 | ||
102 | 40: SPI controller 3 | ||
103 | 41: SD/MM controller 3 | ||
104 | 42: SD/MM controller 4 | ||
105 | 43: SD/MM controller 5 | ||
106 | 44: Multimedia DSP SXA4 | ||
107 | 45: Multimedia DSP SXA5 | ||
108 | 46: SLIMbus channel 8 or Multimedia DSP SXA6 | ||
109 | 47: SLIMbus channel 9 or Multimedia DSP SXA7 | ||
110 | 48: Crypto Accelerator 1 | ||
111 | 49: Crypto Accelerator 1 TX or Hash Accelerator 1 TX | ||
112 | 50: Hash Accelerator 1 TX | ||
113 | 51: memcpy TX (to be used by the DMA driver for memcpy operations) | ||
114 | 52: SLIMbus or HSI channel 4 | ||
115 | 53: SLIMbus or HSI channel 5 | ||
116 | 54: SLIMbus or HSI channel 6 | ||
117 | 55: SLIMbus or HSI channel 7 | ||
118 | 56: memcpy (to be used by the DMA driver for memcpy operations) | ||
119 | 57: memcpy (to be used by the DMA driver for memcpy operations) | ||
120 | 58: memcpy (to be used by the DMA driver for memcpy operations) | ||
121 | 59: memcpy (to be used by the DMA driver for memcpy operations) | ||
122 | 60: memcpy (to be used by the DMA driver for memcpy operations) | ||
123 | 61: Crypto Accelerator 0 | ||
124 | 62: Crypto Accelerator 0 TX or Hash Accelerator 0 TX | ||
125 | 63: Hash Accelerator 0 TX | ||
126 | |||
57 | Example: | 127 | Example: |
58 | 128 | ||
59 | uart@80120000 { | 129 | uart@80120000 { |
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt new file mode 100644 index 000000000000..3e145c1675b1 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt | |||
@@ -0,0 +1,45 @@ | |||
1 | Allwinner A31 DMA Controller | ||
2 | |||
3 | This driver follows the generic DMA bindings defined in dma.txt. | ||
4 | |||
5 | Required properties: | ||
6 | |||
7 | - compatible: Must be "allwinner,sun6i-a31-dma" | ||
8 | - reg: Should contain the registers base address and length | ||
9 | - interrupts: Should contain a reference to the interrupt used by this device | ||
10 | - clocks: Should contain a reference to the parent AHB clock | ||
11 | - resets: Should contain a reference to the reset controller asserting | ||
12 | this device in reset | ||
13 | - #dma-cells : Should be 1, a single cell holding a line request number | ||
14 | |||
15 | Example: | ||
16 | dma: dma-controller@01c02000 { | ||
17 | compatible = "allwinner,sun6i-a31-dma"; | ||
18 | reg = <0x01c02000 0x1000>; | ||
19 | interrupts = <0 50 4>; | ||
20 | clocks = <&ahb1_gates 6>; | ||
21 | resets = <&ahb1_rst 6>; | ||
22 | #dma-cells = <1>; | ||
23 | }; | ||
24 | |||
25 | Clients: | ||
26 | |||
27 | DMA clients connected to the A31 DMA controller must use the format | ||
28 | described in the dma.txt file, using a two-cell specifier for each | ||
29 | channel: a phandle plus one integer cells. | ||
30 | The two cells in order are: | ||
31 | |||
32 | 1. A phandle pointing to the DMA controller. | ||
33 | 2. The port ID as specified in the datasheet | ||
34 | |||
35 | Example: | ||
36 | spi2: spi@01c6a000 { | ||
37 | compatible = "allwinner,sun6i-a31-spi"; | ||
38 | reg = <0x01c6a000 0x1000>; | ||
39 | interrupts = <0 67 4>; | ||
40 | clocks = <&ahb1_gates 22>, <&spi2_clk>; | ||
41 | clock-names = "ahb", "mod"; | ||
42 | dmas = <&dma 25>, <&dma 25>; | ||
43 | dma-names = "rx", "tx"; | ||
44 | resets = <&ahb1_rst 22>; | ||
45 | }; | ||
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt index 879b6e31e2da..573e28ce9751 100644 --- a/Documentation/dmaengine.txt +++ b/Documentation/dmaengine.txt | |||
@@ -84,31 +84,32 @@ The slave DMA usage consists of following steps: | |||
84 | the given transaction. | 84 | the given transaction. |
85 | 85 | ||
86 | Interface: | 86 | Interface: |
87 | struct dma_async_tx_descriptor *(*chan->device->device_prep_slave_sg)( | 87 | struct dma_async_tx_descriptor *dmaengine_prep_slave_sg( |
88 | struct dma_chan *chan, struct scatterlist *sgl, | 88 | struct dma_chan *chan, struct scatterlist *sgl, |
89 | unsigned int sg_len, enum dma_data_direction direction, | 89 | unsigned int sg_len, enum dma_data_direction direction, |
90 | unsigned long flags); | 90 | unsigned long flags); |
91 | 91 | ||
92 | struct dma_async_tx_descriptor *(*chan->device->device_prep_dma_cyclic)( | 92 | struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( |
93 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 93 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
94 | size_t period_len, enum dma_data_direction direction); | 94 | size_t period_len, enum dma_data_direction direction); |
95 | 95 | ||
96 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | 96 | struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( |
97 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 97 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
98 | unsigned long flags); | 98 | unsigned long flags); |
99 | 99 | ||
100 | The peripheral driver is expected to have mapped the scatterlist for | 100 | The peripheral driver is expected to have mapped the scatterlist for |
101 | the DMA operation prior to calling device_prep_slave_sg, and must | 101 | the DMA operation prior to calling device_prep_slave_sg, and must |
102 | keep the scatterlist mapped until the DMA operation has completed. | 102 | keep the scatterlist mapped until the DMA operation has completed. |
103 | The scatterlist must be mapped using the DMA struct device. So, | 103 | The scatterlist must be mapped using the DMA struct device. |
104 | normal setup should look like this: | 104 | If a mapping needs to be synchronized later, dma_sync_*_for_*() must be |
105 | called using the DMA struct device, too. | ||
106 | So, normal setup should look like this: | ||
105 | 107 | ||
106 | nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len); | 108 | nr_sg = dma_map_sg(chan->device->dev, sgl, sg_len); |
107 | if (nr_sg == 0) | 109 | if (nr_sg == 0) |
108 | /* error */ | 110 | /* error */ |
109 | 111 | ||
110 | desc = chan->device->device_prep_slave_sg(chan, sgl, nr_sg, | 112 | desc = dmaengine_prep_slave_sg(chan, sgl, nr_sg, direction, flags); |
111 | direction, flags); | ||
112 | 113 | ||
113 | Once a descriptor has been obtained, the callback information can be | 114 | Once a descriptor has been obtained, the callback information can be |
114 | added and the descriptor must then be submitted. Some DMA engine | 115 | added and the descriptor must then be submitted. Some DMA engine |
@@ -188,7 +189,7 @@ Further APIs: | |||
188 | description of this API. | 189 | description of this API. |
189 | 190 | ||
190 | This can be used in conjunction with dma_async_is_complete() and | 191 | This can be used in conjunction with dma_async_is_complete() and |
191 | the cookie returned from 'descriptor->submit()' to check for | 192 | the cookie returned from dmaengine_submit() to check for |
192 | completion of a specific DMA transaction. | 193 | completion of a specific DMA transaction. |
193 | 194 | ||
194 | Note: | 195 | Note: |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 485be42519b9..88099175fc56 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c | |||
@@ -1414,6 +1414,34 @@ void edma_clear_event(unsigned channel) | |||
1414 | } | 1414 | } |
1415 | EXPORT_SYMBOL(edma_clear_event); | 1415 | EXPORT_SYMBOL(edma_clear_event); |
1416 | 1416 | ||
1417 | /* | ||
1418 | * edma_assign_channel_eventq - move given channel to desired eventq | ||
1419 | * Arguments: | ||
1420 | * channel - channel number | ||
1421 | * eventq_no - queue to move the channel | ||
1422 | * | ||
1423 | * Can be used to move a channel to a selected event queue. | ||
1424 | */ | ||
1425 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no) | ||
1426 | { | ||
1427 | unsigned ctlr; | ||
1428 | |||
1429 | ctlr = EDMA_CTLR(channel); | ||
1430 | channel = EDMA_CHAN_SLOT(channel); | ||
1431 | |||
1432 | if (channel >= edma_cc[ctlr]->num_channels) | ||
1433 | return; | ||
1434 | |||
1435 | /* default to low priority queue */ | ||
1436 | if (eventq_no == EVENTQ_DEFAULT) | ||
1437 | eventq_no = edma_cc[ctlr]->default_queue; | ||
1438 | if (eventq_no >= edma_cc[ctlr]->num_tc) | ||
1439 | return; | ||
1440 | |||
1441 | map_dmach_queue(ctlr, channel, eventq_no); | ||
1442 | } | ||
1443 | EXPORT_SYMBOL(edma_assign_channel_eventq); | ||
1444 | |||
1417 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | 1445 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, |
1418 | struct edma *edma_cc) | 1446 | struct edma *edma_cc) |
1419 | { | 1447 | { |
@@ -1470,7 +1498,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | |||
1470 | queue_priority_map[i][1] = -1; | 1498 | queue_priority_map[i][1] = -1; |
1471 | 1499 | ||
1472 | pdata->queue_priority_mapping = queue_priority_map; | 1500 | pdata->queue_priority_mapping = queue_priority_map; |
1473 | pdata->default_queue = 0; | 1501 | /* Default queue has the lowest priority */ |
1502 | pdata->default_queue = i - 1; | ||
1474 | 1503 | ||
1475 | return 0; | 1504 | return 0; |
1476 | } | 1505 | } |
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi index 2c0e1552d20b..7f9d14f5c4da 100644 --- a/arch/powerpc/boot/dts/mpc5121.dtsi +++ b/arch/powerpc/boot/dts/mpc5121.dtsi | |||
@@ -498,6 +498,7 @@ | |||
498 | compatible = "fsl,mpc5121-dma"; | 498 | compatible = "fsl,mpc5121-dma"; |
499 | reg = <0x14000 0x1800>; | 499 | reg = <0x14000 0x1800>; |
500 | interrupts = <65 0x8>; | 500 | interrupts = <65 0x8>; |
501 | #dma-cells = <1>; | ||
501 | }; | 502 | }; |
502 | }; | 503 | }; |
503 | 504 | ||
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index b22565623142..afde2a7d3eb3 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Define the default configuration for dual address memory-memory transfer. | 25 | * Define the default configuration for dual address memory-memory transfer. |
26 | * The 0x400 value represents auto-request, external->external. | 26 | * The 0x400 value represents auto-request, external->external. |
27 | */ | 27 | */ |
28 | #define RS_DUAL (DM_INC | SM_INC | 0x400 | TS_INDEX2VAL(XMIT_SZ_32BIT)) | 28 | #define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT)) |
29 | 29 | ||
30 | static unsigned long dma_find_base(unsigned int chan) | 30 | static unsigned long dma_find_base(unsigned int chan) |
31 | { | 31 | { |
diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h index 51cd78feacff..c757b47e6b64 100644 --- a/arch/sh/include/asm/dma-register.h +++ b/arch/sh/include/asm/dma-register.h | |||
@@ -13,17 +13,17 @@ | |||
13 | #ifndef DMA_REGISTER_H | 13 | #ifndef DMA_REGISTER_H |
14 | #define DMA_REGISTER_H | 14 | #define DMA_REGISTER_H |
15 | 15 | ||
16 | /* DMA register */ | 16 | /* DMA registers */ |
17 | #define SAR 0x00 | 17 | #define SAR 0x00 /* Source Address Register */ |
18 | #define DAR 0x04 | 18 | #define DAR 0x04 /* Destination Address Register */ |
19 | #define TCR 0x08 | 19 | #define TCR 0x08 /* Transfer Count Register */ |
20 | #define CHCR 0x0C | 20 | #define CHCR 0x0C /* Channel Control Register */ |
21 | #define DMAOR 0x40 | 21 | #define DMAOR 0x40 /* DMA Operation Register */ |
22 | 22 | ||
23 | /* DMAOR definitions */ | 23 | /* DMAOR definitions */ |
24 | #define DMAOR_AE 0x00000004 | 24 | #define DMAOR_AE 0x00000004 /* Address Error Flag */ |
25 | #define DMAOR_NMIF 0x00000002 | 25 | #define DMAOR_NMIF 0x00000002 |
26 | #define DMAOR_DME 0x00000001 | 26 | #define DMAOR_DME 0x00000001 /* DMA Master Enable */ |
27 | 27 | ||
28 | /* Definitions for the SuperH DMAC */ | 28 | /* Definitions for the SuperH DMAC */ |
29 | #define REQ_L 0x00000000 | 29 | #define REQ_L 0x00000000 |
@@ -34,18 +34,20 @@ | |||
34 | #define ACK_W 0x00020000 | 34 | #define ACK_W 0x00020000 |
35 | #define ACK_H 0x00000000 | 35 | #define ACK_H 0x00000000 |
36 | #define ACK_L 0x00010000 | 36 | #define ACK_L 0x00010000 |
37 | #define DM_INC 0x00004000 | 37 | #define DM_INC 0x00004000 /* Destination addresses are incremented */ |
38 | #define DM_DEC 0x00008000 | 38 | #define DM_DEC 0x00008000 /* Destination addresses are decremented */ |
39 | #define DM_FIX 0x0000c000 | 39 | #define DM_FIX 0x0000c000 /* Destination address is fixed */ |
40 | #define SM_INC 0x00001000 | 40 | #define SM_INC 0x00001000 /* Source addresses are incremented */ |
41 | #define SM_DEC 0x00002000 | 41 | #define SM_DEC 0x00002000 /* Source addresses are decremented */ |
42 | #define SM_FIX 0x00003000 | 42 | #define SM_FIX 0x00003000 /* Source address is fixed */ |
43 | #define RS_IN 0x00000200 | 43 | #define RS_IN 0x00000200 |
44 | #define RS_OUT 0x00000300 | 44 | #define RS_OUT 0x00000300 |
45 | #define RS_AUTO 0x00000400 /* Auto Request */ | ||
46 | #define RS_ERS 0x00000800 /* DMA extended resource selector */ | ||
45 | #define TS_BLK 0x00000040 | 47 | #define TS_BLK 0x00000040 |
46 | #define TM_BUR 0x00000020 | 48 | #define TM_BUR 0x00000020 |
47 | #define CHCR_DE 0x00000001 | 49 | #define CHCR_DE 0x00000001 /* DMA Enable */ |
48 | #define CHCR_TE 0x00000002 | 50 | #define CHCR_TE 0x00000002 /* Transfer End Flag */ |
49 | #define CHCR_IE 0x00000004 | 51 | #define CHCR_IE 0x00000004 /* Interrupt Enable */ |
50 | 52 | ||
51 | #endif | 53 | #endif |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 57f83a92a505..7aa733307afc 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c | |||
@@ -30,62 +30,62 @@ static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = { | |||
30 | { | 30 | { |
31 | .slave_id = SHDMA_SLAVE_SCIF0_TX, | 31 | .slave_id = SHDMA_SLAVE_SCIF0_TX, |
32 | .addr = 0xffe0000c, | 32 | .addr = 0xffe0000c, |
33 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 33 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
34 | .mid_rid = 0x21, | 34 | .mid_rid = 0x21, |
35 | }, { | 35 | }, { |
36 | .slave_id = SHDMA_SLAVE_SCIF0_RX, | 36 | .slave_id = SHDMA_SLAVE_SCIF0_RX, |
37 | .addr = 0xffe00014, | 37 | .addr = 0xffe00014, |
38 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 38 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
39 | .mid_rid = 0x22, | 39 | .mid_rid = 0x22, |
40 | }, { | 40 | }, { |
41 | .slave_id = SHDMA_SLAVE_SCIF1_TX, | 41 | .slave_id = SHDMA_SLAVE_SCIF1_TX, |
42 | .addr = 0xffe1000c, | 42 | .addr = 0xffe1000c, |
43 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 43 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
44 | .mid_rid = 0x25, | 44 | .mid_rid = 0x25, |
45 | }, { | 45 | }, { |
46 | .slave_id = SHDMA_SLAVE_SCIF1_RX, | 46 | .slave_id = SHDMA_SLAVE_SCIF1_RX, |
47 | .addr = 0xffe10014, | 47 | .addr = 0xffe10014, |
48 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 48 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
49 | .mid_rid = 0x26, | 49 | .mid_rid = 0x26, |
50 | }, { | 50 | }, { |
51 | .slave_id = SHDMA_SLAVE_SCIF2_TX, | 51 | .slave_id = SHDMA_SLAVE_SCIF2_TX, |
52 | .addr = 0xffe2000c, | 52 | .addr = 0xffe2000c, |
53 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 53 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
54 | .mid_rid = 0x29, | 54 | .mid_rid = 0x29, |
55 | }, { | 55 | }, { |
56 | .slave_id = SHDMA_SLAVE_SCIF2_RX, | 56 | .slave_id = SHDMA_SLAVE_SCIF2_RX, |
57 | .addr = 0xffe20014, | 57 | .addr = 0xffe20014, |
58 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 58 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
59 | .mid_rid = 0x2a, | 59 | .mid_rid = 0x2a, |
60 | }, { | 60 | }, { |
61 | .slave_id = SHDMA_SLAVE_SIUA_TX, | 61 | .slave_id = SHDMA_SLAVE_SIUA_TX, |
62 | .addr = 0xa454c098, | 62 | .addr = 0xa454c098, |
63 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 63 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
64 | .mid_rid = 0xb1, | 64 | .mid_rid = 0xb1, |
65 | }, { | 65 | }, { |
66 | .slave_id = SHDMA_SLAVE_SIUA_RX, | 66 | .slave_id = SHDMA_SLAVE_SIUA_RX, |
67 | .addr = 0xa454c090, | 67 | .addr = 0xa454c090, |
68 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 68 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
69 | .mid_rid = 0xb2, | 69 | .mid_rid = 0xb2, |
70 | }, { | 70 | }, { |
71 | .slave_id = SHDMA_SLAVE_SIUB_TX, | 71 | .slave_id = SHDMA_SLAVE_SIUB_TX, |
72 | .addr = 0xa454c09c, | 72 | .addr = 0xa454c09c, |
73 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 73 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
74 | .mid_rid = 0xb5, | 74 | .mid_rid = 0xb5, |
75 | }, { | 75 | }, { |
76 | .slave_id = SHDMA_SLAVE_SIUB_RX, | 76 | .slave_id = SHDMA_SLAVE_SIUB_RX, |
77 | .addr = 0xa454c094, | 77 | .addr = 0xa454c094, |
78 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 78 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
79 | .mid_rid = 0xb6, | 79 | .mid_rid = 0xb6, |
80 | }, { | 80 | }, { |
81 | .slave_id = SHDMA_SLAVE_SDHI0_TX, | 81 | .slave_id = SHDMA_SLAVE_SDHI0_TX, |
82 | .addr = 0x04ce0030, | 82 | .addr = 0x04ce0030, |
83 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 83 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), |
84 | .mid_rid = 0xc1, | 84 | .mid_rid = 0xc1, |
85 | }, { | 85 | }, { |
86 | .slave_id = SHDMA_SLAVE_SDHI0_RX, | 86 | .slave_id = SHDMA_SLAVE_SDHI0_RX, |
87 | .addr = 0x04ce0030, | 87 | .addr = 0x04ce0030, |
88 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 88 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), |
89 | .mid_rid = 0xc2, | 89 | .mid_rid = 0xc2, |
90 | }, | 90 | }, |
91 | }; | 91 | }; |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index b9e84b1d3aa7..ea5780b3c7f6 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c | |||
@@ -36,122 +36,122 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = { | |||
36 | { | 36 | { |
37 | .slave_id = SHDMA_SLAVE_SCIF0_TX, | 37 | .slave_id = SHDMA_SLAVE_SCIF0_TX, |
38 | .addr = 0xffe0000c, | 38 | .addr = 0xffe0000c, |
39 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 39 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
40 | .mid_rid = 0x21, | 40 | .mid_rid = 0x21, |
41 | }, { | 41 | }, { |
42 | .slave_id = SHDMA_SLAVE_SCIF0_RX, | 42 | .slave_id = SHDMA_SLAVE_SCIF0_RX, |
43 | .addr = 0xffe00014, | 43 | .addr = 0xffe00014, |
44 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 44 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
45 | .mid_rid = 0x22, | 45 | .mid_rid = 0x22, |
46 | }, { | 46 | }, { |
47 | .slave_id = SHDMA_SLAVE_SCIF1_TX, | 47 | .slave_id = SHDMA_SLAVE_SCIF1_TX, |
48 | .addr = 0xffe1000c, | 48 | .addr = 0xffe1000c, |
49 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 49 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
50 | .mid_rid = 0x25, | 50 | .mid_rid = 0x25, |
51 | }, { | 51 | }, { |
52 | .slave_id = SHDMA_SLAVE_SCIF1_RX, | 52 | .slave_id = SHDMA_SLAVE_SCIF1_RX, |
53 | .addr = 0xffe10014, | 53 | .addr = 0xffe10014, |
54 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 54 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
55 | .mid_rid = 0x26, | 55 | .mid_rid = 0x26, |
56 | }, { | 56 | }, { |
57 | .slave_id = SHDMA_SLAVE_SCIF2_TX, | 57 | .slave_id = SHDMA_SLAVE_SCIF2_TX, |
58 | .addr = 0xffe2000c, | 58 | .addr = 0xffe2000c, |
59 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 59 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
60 | .mid_rid = 0x29, | 60 | .mid_rid = 0x29, |
61 | }, { | 61 | }, { |
62 | .slave_id = SHDMA_SLAVE_SCIF2_RX, | 62 | .slave_id = SHDMA_SLAVE_SCIF2_RX, |
63 | .addr = 0xffe20014, | 63 | .addr = 0xffe20014, |
64 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 64 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
65 | .mid_rid = 0x2a, | 65 | .mid_rid = 0x2a, |
66 | }, { | 66 | }, { |
67 | .slave_id = SHDMA_SLAVE_SCIF3_TX, | 67 | .slave_id = SHDMA_SLAVE_SCIF3_TX, |
68 | .addr = 0xa4e30020, | 68 | .addr = 0xa4e30020, |
69 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 69 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
70 | .mid_rid = 0x2d, | 70 | .mid_rid = 0x2d, |
71 | }, { | 71 | }, { |
72 | .slave_id = SHDMA_SLAVE_SCIF3_RX, | 72 | .slave_id = SHDMA_SLAVE_SCIF3_RX, |
73 | .addr = 0xa4e30024, | 73 | .addr = 0xa4e30024, |
74 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 74 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
75 | .mid_rid = 0x2e, | 75 | .mid_rid = 0x2e, |
76 | }, { | 76 | }, { |
77 | .slave_id = SHDMA_SLAVE_SCIF4_TX, | 77 | .slave_id = SHDMA_SLAVE_SCIF4_TX, |
78 | .addr = 0xa4e40020, | 78 | .addr = 0xa4e40020, |
79 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 79 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
80 | .mid_rid = 0x31, | 80 | .mid_rid = 0x31, |
81 | }, { | 81 | }, { |
82 | .slave_id = SHDMA_SLAVE_SCIF4_RX, | 82 | .slave_id = SHDMA_SLAVE_SCIF4_RX, |
83 | .addr = 0xa4e40024, | 83 | .addr = 0xa4e40024, |
84 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 84 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
85 | .mid_rid = 0x32, | 85 | .mid_rid = 0x32, |
86 | }, { | 86 | }, { |
87 | .slave_id = SHDMA_SLAVE_SCIF5_TX, | 87 | .slave_id = SHDMA_SLAVE_SCIF5_TX, |
88 | .addr = 0xa4e50020, | 88 | .addr = 0xa4e50020, |
89 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 89 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
90 | .mid_rid = 0x35, | 90 | .mid_rid = 0x35, |
91 | }, { | 91 | }, { |
92 | .slave_id = SHDMA_SLAVE_SCIF5_RX, | 92 | .slave_id = SHDMA_SLAVE_SCIF5_RX, |
93 | .addr = 0xa4e50024, | 93 | .addr = 0xa4e50024, |
94 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 94 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), |
95 | .mid_rid = 0x36, | 95 | .mid_rid = 0x36, |
96 | }, { | 96 | }, { |
97 | .slave_id = SHDMA_SLAVE_USB0D0_TX, | 97 | .slave_id = SHDMA_SLAVE_USB0D0_TX, |
98 | .addr = 0xA4D80100, | 98 | .addr = 0xA4D80100, |
99 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 99 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
100 | .mid_rid = 0x73, | 100 | .mid_rid = 0x73, |
101 | }, { | 101 | }, { |
102 | .slave_id = SHDMA_SLAVE_USB0D0_RX, | 102 | .slave_id = SHDMA_SLAVE_USB0D0_RX, |
103 | .addr = 0xA4D80100, | 103 | .addr = 0xA4D80100, |
104 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 104 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
105 | .mid_rid = 0x73, | 105 | .mid_rid = 0x73, |
106 | }, { | 106 | }, { |
107 | .slave_id = SHDMA_SLAVE_USB0D1_TX, | 107 | .slave_id = SHDMA_SLAVE_USB0D1_TX, |
108 | .addr = 0xA4D80120, | 108 | .addr = 0xA4D80120, |
109 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 109 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
110 | .mid_rid = 0x77, | 110 | .mid_rid = 0x77, |
111 | }, { | 111 | }, { |
112 | .slave_id = SHDMA_SLAVE_USB0D1_RX, | 112 | .slave_id = SHDMA_SLAVE_USB0D1_RX, |
113 | .addr = 0xA4D80120, | 113 | .addr = 0xA4D80120, |
114 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 114 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
115 | .mid_rid = 0x77, | 115 | .mid_rid = 0x77, |
116 | }, { | 116 | }, { |
117 | .slave_id = SHDMA_SLAVE_USB1D0_TX, | 117 | .slave_id = SHDMA_SLAVE_USB1D0_TX, |
118 | .addr = 0xA4D90100, | 118 | .addr = 0xA4D90100, |
119 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 119 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
120 | .mid_rid = 0xab, | 120 | .mid_rid = 0xab, |
121 | }, { | 121 | }, { |
122 | .slave_id = SHDMA_SLAVE_USB1D0_RX, | 122 | .slave_id = SHDMA_SLAVE_USB1D0_RX, |
123 | .addr = 0xA4D90100, | 123 | .addr = 0xA4D90100, |
124 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 124 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
125 | .mid_rid = 0xab, | 125 | .mid_rid = 0xab, |
126 | }, { | 126 | }, { |
127 | .slave_id = SHDMA_SLAVE_USB1D1_TX, | 127 | .slave_id = SHDMA_SLAVE_USB1D1_TX, |
128 | .addr = 0xA4D90120, | 128 | .addr = 0xA4D90120, |
129 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 129 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
130 | .mid_rid = 0xaf, | 130 | .mid_rid = 0xaf, |
131 | }, { | 131 | }, { |
132 | .slave_id = SHDMA_SLAVE_USB1D1_RX, | 132 | .slave_id = SHDMA_SLAVE_USB1D1_RX, |
133 | .addr = 0xA4D90120, | 133 | .addr = 0xA4D90120, |
134 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 134 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), |
135 | .mid_rid = 0xaf, | 135 | .mid_rid = 0xaf, |
136 | }, { | 136 | }, { |
137 | .slave_id = SHDMA_SLAVE_SDHI0_TX, | 137 | .slave_id = SHDMA_SLAVE_SDHI0_TX, |
138 | .addr = 0x04ce0030, | 138 | .addr = 0x04ce0030, |
139 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 139 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), |
140 | .mid_rid = 0xc1, | 140 | .mid_rid = 0xc1, |
141 | }, { | 141 | }, { |
142 | .slave_id = SHDMA_SLAVE_SDHI0_RX, | 142 | .slave_id = SHDMA_SLAVE_SDHI0_RX, |
143 | .addr = 0x04ce0030, | 143 | .addr = 0x04ce0030, |
144 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 144 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), |
145 | .mid_rid = 0xc2, | 145 | .mid_rid = 0xc2, |
146 | }, { | 146 | }, { |
147 | .slave_id = SHDMA_SLAVE_SDHI1_TX, | 147 | .slave_id = SHDMA_SLAVE_SDHI1_TX, |
148 | .addr = 0x04cf0030, | 148 | .addr = 0x04cf0030, |
149 | .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 149 | .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), |
150 | .mid_rid = 0xc9, | 150 | .mid_rid = 0xc9, |
151 | }, { | 151 | }, { |
152 | .slave_id = SHDMA_SLAVE_SDHI1_RX, | 152 | .slave_id = SHDMA_SLAVE_SDHI1_RX, |
153 | .addr = 0x04cf0030, | 153 | .addr = 0x04cf0030, |
154 | .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 154 | .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), |
155 | .mid_rid = 0xca, | 155 | .mid_rid = 0xca, |
156 | }, | 156 | }, |
157 | }; | 157 | }; |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c index 7b24ec4b409a..18bcd70cd813 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c | |||
@@ -123,28 +123,28 @@ static const struct sh_dmae_slave_config sh7757_dmae0_slaves[] = { | |||
123 | { | 123 | { |
124 | .slave_id = SHDMA_SLAVE_SDHI_TX, | 124 | .slave_id = SHDMA_SLAVE_SDHI_TX, |
125 | .addr = 0x1fe50030, | 125 | .addr = 0x1fe50030, |
126 | .chcr = SM_INC | 0x800 | 0x40000000 | | 126 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
127 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 127 | TS_INDEX2VAL(XMIT_SZ_16BIT), |
128 | .mid_rid = 0xc5, | 128 | .mid_rid = 0xc5, |
129 | }, | 129 | }, |
130 | { | 130 | { |
131 | .slave_id = SHDMA_SLAVE_SDHI_RX, | 131 | .slave_id = SHDMA_SLAVE_SDHI_RX, |
132 | .addr = 0x1fe50030, | 132 | .addr = 0x1fe50030, |
133 | .chcr = DM_INC | 0x800 | 0x40000000 | | 133 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
134 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 134 | TS_INDEX2VAL(XMIT_SZ_16BIT), |
135 | .mid_rid = 0xc6, | 135 | .mid_rid = 0xc6, |
136 | }, | 136 | }, |
137 | { | 137 | { |
138 | .slave_id = SHDMA_SLAVE_MMCIF_TX, | 138 | .slave_id = SHDMA_SLAVE_MMCIF_TX, |
139 | .addr = 0x1fcb0034, | 139 | .addr = 0x1fcb0034, |
140 | .chcr = SM_INC | 0x800 | 0x40000000 | | 140 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
141 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 141 | TS_INDEX2VAL(XMIT_SZ_32BIT), |
142 | .mid_rid = 0xd3, | 142 | .mid_rid = 0xd3, |
143 | }, | 143 | }, |
144 | { | 144 | { |
145 | .slave_id = SHDMA_SLAVE_MMCIF_RX, | 145 | .slave_id = SHDMA_SLAVE_MMCIF_RX, |
146 | .addr = 0x1fcb0034, | 146 | .addr = 0x1fcb0034, |
147 | .chcr = DM_INC | 0x800 | 0x40000000 | | 147 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
148 | TS_INDEX2VAL(XMIT_SZ_32BIT), | 148 | TS_INDEX2VAL(XMIT_SZ_32BIT), |
149 | .mid_rid = 0xd7, | 149 | .mid_rid = 0xd7, |
150 | }, | 150 | }, |
@@ -154,56 +154,56 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { | |||
154 | { | 154 | { |
155 | .slave_id = SHDMA_SLAVE_SCIF2_TX, | 155 | .slave_id = SHDMA_SLAVE_SCIF2_TX, |
156 | .addr = 0x1f4b000c, | 156 | .addr = 0x1f4b000c, |
157 | .chcr = SM_INC | 0x800 | 0x40000000 | | 157 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
158 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 158 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
159 | .mid_rid = 0x21, | 159 | .mid_rid = 0x21, |
160 | }, | 160 | }, |
161 | { | 161 | { |
162 | .slave_id = SHDMA_SLAVE_SCIF2_RX, | 162 | .slave_id = SHDMA_SLAVE_SCIF2_RX, |
163 | .addr = 0x1f4b0014, | 163 | .addr = 0x1f4b0014, |
164 | .chcr = DM_INC | 0x800 | 0x40000000 | | 164 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
165 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 165 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
166 | .mid_rid = 0x22, | 166 | .mid_rid = 0x22, |
167 | }, | 167 | }, |
168 | { | 168 | { |
169 | .slave_id = SHDMA_SLAVE_SCIF3_TX, | 169 | .slave_id = SHDMA_SLAVE_SCIF3_TX, |
170 | .addr = 0x1f4c000c, | 170 | .addr = 0x1f4c000c, |
171 | .chcr = SM_INC | 0x800 | 0x40000000 | | 171 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
172 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 172 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
173 | .mid_rid = 0x29, | 173 | .mid_rid = 0x29, |
174 | }, | 174 | }, |
175 | { | 175 | { |
176 | .slave_id = SHDMA_SLAVE_SCIF3_RX, | 176 | .slave_id = SHDMA_SLAVE_SCIF3_RX, |
177 | .addr = 0x1f4c0014, | 177 | .addr = 0x1f4c0014, |
178 | .chcr = DM_INC | 0x800 | 0x40000000 | | 178 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
179 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 179 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
180 | .mid_rid = 0x2a, | 180 | .mid_rid = 0x2a, |
181 | }, | 181 | }, |
182 | { | 182 | { |
183 | .slave_id = SHDMA_SLAVE_SCIF4_TX, | 183 | .slave_id = SHDMA_SLAVE_SCIF4_TX, |
184 | .addr = 0x1f4d000c, | 184 | .addr = 0x1f4d000c, |
185 | .chcr = SM_INC | 0x800 | 0x40000000 | | 185 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
186 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 186 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
187 | .mid_rid = 0x41, | 187 | .mid_rid = 0x41, |
188 | }, | 188 | }, |
189 | { | 189 | { |
190 | .slave_id = SHDMA_SLAVE_SCIF4_RX, | 190 | .slave_id = SHDMA_SLAVE_SCIF4_RX, |
191 | .addr = 0x1f4d0014, | 191 | .addr = 0x1f4d0014, |
192 | .chcr = DM_INC | 0x800 | 0x40000000 | | 192 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
193 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 193 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
194 | .mid_rid = 0x42, | 194 | .mid_rid = 0x42, |
195 | }, | 195 | }, |
196 | { | 196 | { |
197 | .slave_id = SHDMA_SLAVE_RSPI_TX, | 197 | .slave_id = SHDMA_SLAVE_RSPI_TX, |
198 | .addr = 0xfe480004, | 198 | .addr = 0xfe480004, |
199 | .chcr = SM_INC | 0x800 | 0x40000000 | | 199 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
200 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 200 | TS_INDEX2VAL(XMIT_SZ_16BIT), |
201 | .mid_rid = 0xc1, | 201 | .mid_rid = 0xc1, |
202 | }, | 202 | }, |
203 | { | 203 | { |
204 | .slave_id = SHDMA_SLAVE_RSPI_RX, | 204 | .slave_id = SHDMA_SLAVE_RSPI_RX, |
205 | .addr = 0xfe480004, | 205 | .addr = 0xfe480004, |
206 | .chcr = DM_INC | 0x800 | 0x40000000 | | 206 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
207 | TS_INDEX2VAL(XMIT_SZ_16BIT), | 207 | TS_INDEX2VAL(XMIT_SZ_16BIT), |
208 | .mid_rid = 0xc2, | 208 | .mid_rid = 0xc2, |
209 | }, | 209 | }, |
@@ -213,70 +213,70 @@ static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { | |||
213 | { | 213 | { |
214 | .slave_id = SHDMA_SLAVE_RIIC0_TX, | 214 | .slave_id = SHDMA_SLAVE_RIIC0_TX, |
215 | .addr = 0x1e500012, | 215 | .addr = 0x1e500012, |
216 | .chcr = SM_INC | 0x800 | 0x40000000 | | 216 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
217 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 217 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
218 | .mid_rid = 0x21, | 218 | .mid_rid = 0x21, |
219 | }, | 219 | }, |
220 | { | 220 | { |
221 | .slave_id = SHDMA_SLAVE_RIIC0_RX, | 221 | .slave_id = SHDMA_SLAVE_RIIC0_RX, |
222 | .addr = 0x1e500013, | 222 | .addr = 0x1e500013, |
223 | .chcr = DM_INC | 0x800 | 0x40000000 | | 223 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
224 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 224 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
225 | .mid_rid = 0x22, | 225 | .mid_rid = 0x22, |
226 | }, | 226 | }, |
227 | { | 227 | { |
228 | .slave_id = SHDMA_SLAVE_RIIC1_TX, | 228 | .slave_id = SHDMA_SLAVE_RIIC1_TX, |
229 | .addr = 0x1e510012, | 229 | .addr = 0x1e510012, |
230 | .chcr = SM_INC | 0x800 | 0x40000000 | | 230 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
231 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 231 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
232 | .mid_rid = 0x29, | 232 | .mid_rid = 0x29, |
233 | }, | 233 | }, |
234 | { | 234 | { |
235 | .slave_id = SHDMA_SLAVE_RIIC1_RX, | 235 | .slave_id = SHDMA_SLAVE_RIIC1_RX, |
236 | .addr = 0x1e510013, | 236 | .addr = 0x1e510013, |
237 | .chcr = DM_INC | 0x800 | 0x40000000 | | 237 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
238 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 238 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
239 | .mid_rid = 0x2a, | 239 | .mid_rid = 0x2a, |
240 | }, | 240 | }, |
241 | { | 241 | { |
242 | .slave_id = SHDMA_SLAVE_RIIC2_TX, | 242 | .slave_id = SHDMA_SLAVE_RIIC2_TX, |
243 | .addr = 0x1e520012, | 243 | .addr = 0x1e520012, |
244 | .chcr = SM_INC | 0x800 | 0x40000000 | | 244 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
245 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 245 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
246 | .mid_rid = 0xa1, | 246 | .mid_rid = 0xa1, |
247 | }, | 247 | }, |
248 | { | 248 | { |
249 | .slave_id = SHDMA_SLAVE_RIIC2_RX, | 249 | .slave_id = SHDMA_SLAVE_RIIC2_RX, |
250 | .addr = 0x1e520013, | 250 | .addr = 0x1e520013, |
251 | .chcr = DM_INC | 0x800 | 0x40000000 | | 251 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
252 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 252 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
253 | .mid_rid = 0xa2, | 253 | .mid_rid = 0xa2, |
254 | }, | 254 | }, |
255 | { | 255 | { |
256 | .slave_id = SHDMA_SLAVE_RIIC3_TX, | 256 | .slave_id = SHDMA_SLAVE_RIIC3_TX, |
257 | .addr = 0x1e530012, | 257 | .addr = 0x1e530012, |
258 | .chcr = SM_INC | 0x800 | 0x40000000 | | 258 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
259 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 259 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
260 | .mid_rid = 0xa9, | 260 | .mid_rid = 0xa9, |
261 | }, | 261 | }, |
262 | { | 262 | { |
263 | .slave_id = SHDMA_SLAVE_RIIC3_RX, | 263 | .slave_id = SHDMA_SLAVE_RIIC3_RX, |
264 | .addr = 0x1e530013, | 264 | .addr = 0x1e530013, |
265 | .chcr = DM_INC | 0x800 | 0x40000000 | | 265 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
266 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 266 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
267 | .mid_rid = 0xaf, | 267 | .mid_rid = 0xaf, |
268 | }, | 268 | }, |
269 | { | 269 | { |
270 | .slave_id = SHDMA_SLAVE_RIIC4_TX, | 270 | .slave_id = SHDMA_SLAVE_RIIC4_TX, |
271 | .addr = 0x1e540012, | 271 | .addr = 0x1e540012, |
272 | .chcr = SM_INC | 0x800 | 0x40000000 | | 272 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
273 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 273 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
274 | .mid_rid = 0xc5, | 274 | .mid_rid = 0xc5, |
275 | }, | 275 | }, |
276 | { | 276 | { |
277 | .slave_id = SHDMA_SLAVE_RIIC4_RX, | 277 | .slave_id = SHDMA_SLAVE_RIIC4_RX, |
278 | .addr = 0x1e540013, | 278 | .addr = 0x1e540013, |
279 | .chcr = DM_INC | 0x800 | 0x40000000 | | 279 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
280 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 280 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
281 | .mid_rid = 0xc6, | 281 | .mid_rid = 0xc6, |
282 | }, | 282 | }, |
@@ -286,70 +286,70 @@ static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { | |||
286 | { | 286 | { |
287 | .slave_id = SHDMA_SLAVE_RIIC5_TX, | 287 | .slave_id = SHDMA_SLAVE_RIIC5_TX, |
288 | .addr = 0x1e550012, | 288 | .addr = 0x1e550012, |
289 | .chcr = SM_INC | 0x800 | 0x40000000 | | 289 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
290 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 290 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
291 | .mid_rid = 0x21, | 291 | .mid_rid = 0x21, |
292 | }, | 292 | }, |
293 | { | 293 | { |
294 | .slave_id = SHDMA_SLAVE_RIIC5_RX, | 294 | .slave_id = SHDMA_SLAVE_RIIC5_RX, |
295 | .addr = 0x1e550013, | 295 | .addr = 0x1e550013, |
296 | .chcr = DM_INC | 0x800 | 0x40000000 | | 296 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
297 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 297 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
298 | .mid_rid = 0x22, | 298 | .mid_rid = 0x22, |
299 | }, | 299 | }, |
300 | { | 300 | { |
301 | .slave_id = SHDMA_SLAVE_RIIC6_TX, | 301 | .slave_id = SHDMA_SLAVE_RIIC6_TX, |
302 | .addr = 0x1e560012, | 302 | .addr = 0x1e560012, |
303 | .chcr = SM_INC | 0x800 | 0x40000000 | | 303 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
304 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 304 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
305 | .mid_rid = 0x29, | 305 | .mid_rid = 0x29, |
306 | }, | 306 | }, |
307 | { | 307 | { |
308 | .slave_id = SHDMA_SLAVE_RIIC6_RX, | 308 | .slave_id = SHDMA_SLAVE_RIIC6_RX, |
309 | .addr = 0x1e560013, | 309 | .addr = 0x1e560013, |
310 | .chcr = DM_INC | 0x800 | 0x40000000 | | 310 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
311 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 311 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
312 | .mid_rid = 0x2a, | 312 | .mid_rid = 0x2a, |
313 | }, | 313 | }, |
314 | { | 314 | { |
315 | .slave_id = SHDMA_SLAVE_RIIC7_TX, | 315 | .slave_id = SHDMA_SLAVE_RIIC7_TX, |
316 | .addr = 0x1e570012, | 316 | .addr = 0x1e570012, |
317 | .chcr = SM_INC | 0x800 | 0x40000000 | | 317 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
318 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 318 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
319 | .mid_rid = 0x41, | 319 | .mid_rid = 0x41, |
320 | }, | 320 | }, |
321 | { | 321 | { |
322 | .slave_id = SHDMA_SLAVE_RIIC7_RX, | 322 | .slave_id = SHDMA_SLAVE_RIIC7_RX, |
323 | .addr = 0x1e570013, | 323 | .addr = 0x1e570013, |
324 | .chcr = DM_INC | 0x800 | 0x40000000 | | 324 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
325 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 325 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
326 | .mid_rid = 0x42, | 326 | .mid_rid = 0x42, |
327 | }, | 327 | }, |
328 | { | 328 | { |
329 | .slave_id = SHDMA_SLAVE_RIIC8_TX, | 329 | .slave_id = SHDMA_SLAVE_RIIC8_TX, |
330 | .addr = 0x1e580012, | 330 | .addr = 0x1e580012, |
331 | .chcr = SM_INC | 0x800 | 0x40000000 | | 331 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
332 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 332 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
333 | .mid_rid = 0x45, | 333 | .mid_rid = 0x45, |
334 | }, | 334 | }, |
335 | { | 335 | { |
336 | .slave_id = SHDMA_SLAVE_RIIC8_RX, | 336 | .slave_id = SHDMA_SLAVE_RIIC8_RX, |
337 | .addr = 0x1e580013, | 337 | .addr = 0x1e580013, |
338 | .chcr = DM_INC | 0x800 | 0x40000000 | | 338 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
339 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 339 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
340 | .mid_rid = 0x46, | 340 | .mid_rid = 0x46, |
341 | }, | 341 | }, |
342 | { | 342 | { |
343 | .slave_id = SHDMA_SLAVE_RIIC9_TX, | 343 | .slave_id = SHDMA_SLAVE_RIIC9_TX, |
344 | .addr = 0x1e590012, | 344 | .addr = 0x1e590012, |
345 | .chcr = SM_INC | 0x800 | 0x40000000 | | 345 | .chcr = SM_INC | RS_ERS | 0x40000000 | |
346 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 346 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
347 | .mid_rid = 0x51, | 347 | .mid_rid = 0x51, |
348 | }, | 348 | }, |
349 | { | 349 | { |
350 | .slave_id = SHDMA_SLAVE_RIIC9_RX, | 350 | .slave_id = SHDMA_SLAVE_RIIC9_RX, |
351 | .addr = 0x1e590013, | 351 | .addr = 0x1e590013, |
352 | .chcr = DM_INC | 0x800 | 0x40000000 | | 352 | .chcr = DM_INC | RS_ERS | 0x40000000 | |
353 | TS_INDEX2VAL(XMIT_SZ_8BIT), | 353 | TS_INDEX2VAL(XMIT_SZ_8BIT), |
354 | .mid_rid = 0x52, | 354 | .mid_rid = 0x52, |
355 | }, | 355 | }, |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 8f6afbf9ba54..9b1ea0ef59af 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -393,6 +393,22 @@ config XILINX_VDMA | |||
393 | channels, Memory Mapped to Stream (MM2S) and Stream to | 393 | channels, Memory Mapped to Stream (MM2S) and Stream to |
394 | Memory Mapped (S2MM) for the data transfers. | 394 | Memory Mapped (S2MM) for the data transfers. |
395 | 395 | ||
396 | config DMA_SUN6I | ||
397 | tristate "Allwinner A31 SoCs DMA support" | ||
398 | depends on MACH_SUN6I || COMPILE_TEST | ||
399 | depends on RESET_CONTROLLER | ||
400 | select DMA_ENGINE | ||
401 | select DMA_VIRTUAL_CHANNELS | ||
402 | help | ||
403 | Support for the DMA engine for Allwinner A31 SoCs. | ||
404 | |||
405 | config NBPFAXI_DMA | ||
406 | tristate "Renesas Type-AXI NBPF DMA support" | ||
407 | select DMA_ENGINE | ||
408 | depends on ARM || COMPILE_TEST | ||
409 | help | ||
410 | Support for "Type-AXI" NBPF DMA IPs from Renesas | ||
411 | |||
396 | config DMA_ENGINE | 412 | config DMA_ENGINE |
397 | bool | 413 | bool |
398 | 414 | ||
@@ -406,6 +422,7 @@ config DMA_ACPI | |||
406 | config DMA_OF | 422 | config DMA_OF |
407 | def_bool y | 423 | def_bool y |
408 | depends on OF | 424 | depends on OF |
425 | select DMA_ENGINE | ||
409 | 426 | ||
410 | comment "DMA Clients" | 427 | comment "DMA Clients" |
411 | depends on DMA_ENGINE | 428 | depends on DMA_ENGINE |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index bd9e7fa928bd..c6adb925f0b9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG | 1 | subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG |
2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG | 2 | subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG |
3 | 3 | ||
4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | 5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o |
@@ -48,3 +48,5 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o | |||
48 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o | 48 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o |
49 | obj-y += xilinx/ | 49 | obj-y += xilinx/ |
50 | obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o | 50 | obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o |
51 | obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o | ||
52 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o | ||
diff --git a/drivers/dma/TODO b/drivers/dma/TODO index 734ed0206cd5..b8045cd42ee1 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO | |||
@@ -7,7 +7,6 @@ TODO for slave dma | |||
7 | - imx-dma | 7 | - imx-dma |
8 | - imx-sdma | 8 | - imx-sdma |
9 | - mxs-dma.c | 9 | - mxs-dma.c |
10 | - dw_dmac | ||
11 | - intel_mid_dma | 10 | - intel_mid_dma |
12 | 4. Check other subsystems for dma drivers and merge/move to dmaengine | 11 | 4. Check other subsystems for dma drivers and merge/move to dmaengine |
13 | 5. Remove dma_slave_config's dma direction. | 12 | 5. Remove dma_slave_config's dma direction. |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8114731a1c62..e34024b000a4 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -1040,7 +1040,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | |||
1040 | 1040 | ||
1041 | if (early_bytes) { | 1041 | if (early_bytes) { |
1042 | dev_vdbg(&pl08x->adev->dev, | 1042 | dev_vdbg(&pl08x->adev->dev, |
1043 | "%s byte width LLIs (remain 0x%08x)\n", | 1043 | "%s byte width LLIs (remain 0x%08zx)\n", |
1044 | __func__, bd.remainder); | 1044 | __func__, bd.remainder); |
1045 | prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, | 1045 | prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, |
1046 | num_llis++, &total_bytes); | 1046 | num_llis++, &total_bytes); |
@@ -1653,7 +1653,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1653 | static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( | 1653 | static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( |
1654 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 1654 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
1655 | size_t period_len, enum dma_transfer_direction direction, | 1655 | size_t period_len, enum dma_transfer_direction direction, |
1656 | unsigned long flags, void *context) | 1656 | unsigned long flags) |
1657 | { | 1657 | { |
1658 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1658 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
1659 | struct pl08x_driver_data *pl08x = plchan->host; | 1659 | struct pl08x_driver_data *pl08x = plchan->host; |
@@ -1662,7 +1662,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( | |||
1662 | dma_addr_t slave_addr; | 1662 | dma_addr_t slave_addr; |
1663 | 1663 | ||
1664 | dev_dbg(&pl08x->adev->dev, | 1664 | dev_dbg(&pl08x->adev->dev, |
1665 | "%s prepare cyclic transaction of %d/%d bytes %s %s\n", | 1665 | "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", |
1666 | __func__, period_len, buf_len, | 1666 | __func__, period_len, buf_len, |
1667 | direction == DMA_MEM_TO_DEV ? "to" : "from", | 1667 | direction == DMA_MEM_TO_DEV ? "to" : "from", |
1668 | plchan->name); | 1668 | plchan->name); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c13a3bb0f594..ca9dd2613283 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -294,14 +294,16 @@ static int atc_get_bytes_left(struct dma_chan *chan) | |||
294 | ret = -EINVAL; | 294 | ret = -EINVAL; |
295 | goto out; | 295 | goto out; |
296 | } | 296 | } |
297 | atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) | 297 | |
298 | << (desc_first->tx_width); | 298 | count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) |
299 | if (atchan->remain_desc < 0) { | 299 | << desc_first->tx_width; |
300 | if (atchan->remain_desc < count) { | ||
300 | ret = -EINVAL; | 301 | ret = -EINVAL; |
301 | goto out; | 302 | goto out; |
302 | } else { | ||
303 | ret = atchan->remain_desc; | ||
304 | } | 303 | } |
304 | |||
305 | atchan->remain_desc -= count; | ||
306 | ret = atchan->remain_desc; | ||
305 | } else { | 307 | } else { |
306 | /* | 308 | /* |
307 | * Get residual bytes when current | 309 | * Get residual bytes when current |
@@ -893,12 +895,11 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
893 | * @period_len: number of bytes for each period | 895 | * @period_len: number of bytes for each period |
894 | * @direction: transfer direction, to or from device | 896 | * @direction: transfer direction, to or from device |
895 | * @flags: tx descriptor status flags | 897 | * @flags: tx descriptor status flags |
896 | * @context: transfer context (ignored) | ||
897 | */ | 898 | */ |
898 | static struct dma_async_tx_descriptor * | 899 | static struct dma_async_tx_descriptor * |
899 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 900 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
900 | size_t period_len, enum dma_transfer_direction direction, | 901 | size_t period_len, enum dma_transfer_direction direction, |
901 | unsigned long flags, void *context) | 902 | unsigned long flags) |
902 | { | 903 | { |
903 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 904 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
904 | struct at_dma_slave *atslave = chan->private; | 905 | struct at_dma_slave *atslave = chan->private; |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index a03602164e3e..68007974961a 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -335,7 +335,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) | |||
335 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | 335 | static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( |
336 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 336 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
337 | size_t period_len, enum dma_transfer_direction direction, | 337 | size_t period_len, enum dma_transfer_direction direction, |
338 | unsigned long flags, void *context) | 338 | unsigned long flags) |
339 | { | 339 | { |
340 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 340 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
341 | enum dma_slave_buswidth dev_width; | 341 | enum dma_slave_buswidth dev_width; |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 94c380f07538..6a9d89c93b1f 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
@@ -433,7 +433,7 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( | |||
433 | static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( | 433 | static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( |
434 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | 434 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, |
435 | size_t period_len, enum dma_transfer_direction direction, | 435 | size_t period_len, enum dma_transfer_direction direction, |
436 | unsigned long flags, void *context) | 436 | unsigned long flags) |
437 | { | 437 | { |
438 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); | 438 | struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); |
439 | struct jz4740_dma_desc *desc; | 439 | struct jz4740_dma_desc *desc; |
@@ -614,4 +614,4 @@ module_platform_driver(jz4740_dma_driver); | |||
614 | 614 | ||
615 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); | 615 | MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); |
616 | MODULE_DESCRIPTION("JZ4740 DMA driver"); | 616 | MODULE_DESCRIPTION("JZ4740 DMA driver"); |
617 | MODULE_LICENSE("GPLv2"); | 617 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index a27ded53ab4f..1af731b83b3f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
@@ -279,6 +279,19 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
279 | channel_set_bit(dw, CH_EN, dwc->mask); | 279 | channel_set_bit(dw, CH_EN, dwc->mask); |
280 | } | 280 | } |
281 | 281 | ||
282 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) | ||
283 | { | ||
284 | struct dw_desc *desc; | ||
285 | |||
286 | if (list_empty(&dwc->queue)) | ||
287 | return; | ||
288 | |||
289 | list_move(dwc->queue.next, &dwc->active_list); | ||
290 | desc = dwc_first_active(dwc); | ||
291 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); | ||
292 | dwc_dostart(dwc, desc); | ||
293 | } | ||
294 | |||
282 | /*----------------------------------------------------------------------*/ | 295 | /*----------------------------------------------------------------------*/ |
283 | 296 | ||
284 | static void | 297 | static void |
@@ -335,10 +348,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
335 | * the completed ones. | 348 | * the completed ones. |
336 | */ | 349 | */ |
337 | list_splice_init(&dwc->active_list, &list); | 350 | list_splice_init(&dwc->active_list, &list); |
338 | if (!list_empty(&dwc->queue)) { | 351 | dwc_dostart_first_queued(dwc); |
339 | list_move(dwc->queue.next, &dwc->active_list); | ||
340 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
341 | } | ||
342 | 352 | ||
343 | spin_unlock_irqrestore(&dwc->lock, flags); | 353 | spin_unlock_irqrestore(&dwc->lock, flags); |
344 | 354 | ||
@@ -467,10 +477,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
467 | /* Try to continue after resetting the channel... */ | 477 | /* Try to continue after resetting the channel... */ |
468 | dwc_chan_disable(dw, dwc); | 478 | dwc_chan_disable(dw, dwc); |
469 | 479 | ||
470 | if (!list_empty(&dwc->queue)) { | 480 | dwc_dostart_first_queued(dwc); |
471 | list_move(dwc->queue.next, &dwc->active_list); | ||
472 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
473 | } | ||
474 | spin_unlock_irqrestore(&dwc->lock, flags); | 481 | spin_unlock_irqrestore(&dwc->lock, flags); |
475 | } | 482 | } |
476 | 483 | ||
@@ -677,17 +684,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | |||
677 | * possible, perhaps even appending to those already submitted | 684 | * possible, perhaps even appending to those already submitted |
678 | * for DMA. But this is hard to do in a race-free manner. | 685 | * for DMA. But this is hard to do in a race-free manner. |
679 | */ | 686 | */ |
680 | if (list_empty(&dwc->active_list)) { | ||
681 | dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, | ||
682 | desc->txd.cookie); | ||
683 | list_add_tail(&desc->desc_node, &dwc->active_list); | ||
684 | dwc_dostart(dwc, dwc_first_active(dwc)); | ||
685 | } else { | ||
686 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, | ||
687 | desc->txd.cookie); | ||
688 | 687 | ||
689 | list_add_tail(&desc->desc_node, &dwc->queue); | 688 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); |
690 | } | 689 | list_add_tail(&desc->desc_node, &dwc->queue); |
691 | 690 | ||
692 | spin_unlock_irqrestore(&dwc->lock, flags); | 691 | spin_unlock_irqrestore(&dwc->lock, flags); |
693 | 692 | ||
@@ -1092,9 +1091,12 @@ dwc_tx_status(struct dma_chan *chan, | |||
1092 | static void dwc_issue_pending(struct dma_chan *chan) | 1091 | static void dwc_issue_pending(struct dma_chan *chan) |
1093 | { | 1092 | { |
1094 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1093 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1094 | unsigned long flags; | ||
1095 | 1095 | ||
1096 | if (!list_empty(&dwc->queue)) | 1096 | spin_lock_irqsave(&dwc->lock, flags); |
1097 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 1097 | if (list_empty(&dwc->active_list)) |
1098 | dwc_dostart_first_queued(dwc); | ||
1099 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1098 | } | 1100 | } |
1099 | 1101 | ||
1100 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 1102 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index b512caf46944..7b65633f495e 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> | 25 | #include <linux/spinlock.h> |
26 | #include <linux/of.h> | ||
26 | 27 | ||
27 | #include <linux/platform_data/edma.h> | 28 | #include <linux/platform_data/edma.h> |
28 | 29 | ||
@@ -256,8 +257,13 @@ static int edma_terminate_all(struct edma_chan *echan) | |||
256 | * echan->edesc is NULL and exit.) | 257 | * echan->edesc is NULL and exit.) |
257 | */ | 258 | */ |
258 | if (echan->edesc) { | 259 | if (echan->edesc) { |
260 | int cyclic = echan->edesc->cyclic; | ||
259 | echan->edesc = NULL; | 261 | echan->edesc = NULL; |
260 | edma_stop(echan->ch_num); | 262 | edma_stop(echan->ch_num); |
263 | /* Move the cyclic channel back to default queue */ | ||
264 | if (cyclic) | ||
265 | edma_assign_channel_eventq(echan->ch_num, | ||
266 | EVENTQ_DEFAULT); | ||
261 | } | 267 | } |
262 | 268 | ||
263 | vchan_get_all_descriptors(&echan->vchan, &head); | 269 | vchan_get_all_descriptors(&echan->vchan, &head); |
@@ -592,7 +598,7 @@ struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
592 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | 598 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
593 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 599 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
594 | size_t period_len, enum dma_transfer_direction direction, | 600 | size_t period_len, enum dma_transfer_direction direction, |
595 | unsigned long tx_flags, void *context) | 601 | unsigned long tx_flags) |
596 | { | 602 | { |
597 | struct edma_chan *echan = to_edma_chan(chan); | 603 | struct edma_chan *echan = to_edma_chan(chan); |
598 | struct device *dev = chan->device->dev; | 604 | struct device *dev = chan->device->dev; |
@@ -718,12 +724,15 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
718 | edesc->absync = ret; | 724 | edesc->absync = ret; |
719 | 725 | ||
720 | /* | 726 | /* |
721 | * Enable interrupts for every period because callback | 727 | * Enable period interrupt only if it is requested |
722 | * has to be called for every period. | ||
723 | */ | 728 | */ |
724 | edesc->pset[i].param.opt |= TCINTEN; | 729 | if (tx_flags & DMA_PREP_INTERRUPT) |
730 | edesc->pset[i].param.opt |= TCINTEN; | ||
725 | } | 731 | } |
726 | 732 | ||
733 | /* Place the cyclic channel to highest priority queue */ | ||
734 | edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); | ||
735 | |||
727 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | 736 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
728 | } | 737 | } |
729 | 738 | ||
@@ -993,7 +1002,7 @@ static int edma_dma_device_slave_caps(struct dma_chan *dchan, | |||
993 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1002 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
994 | caps->cmd_pause = true; | 1003 | caps->cmd_pause = true; |
995 | caps->cmd_terminate = true; | 1004 | caps->cmd_terminate = true; |
996 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | 1005 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
997 | 1006 | ||
998 | return 0; | 1007 | return 0; |
999 | } | 1008 | } |
@@ -1040,7 +1049,7 @@ static int edma_probe(struct platform_device *pdev) | |||
1040 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | 1049 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); |
1041 | if (ecc->dummy_slot < 0) { | 1050 | if (ecc->dummy_slot < 0) { |
1042 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | 1051 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); |
1043 | return -EIO; | 1052 | return ecc->dummy_slot; |
1044 | } | 1053 | } |
1045 | 1054 | ||
1046 | dma_cap_zero(ecc->dma_slave.cap_mask); | 1055 | dma_cap_zero(ecc->dma_slave.cap_mask); |
@@ -1125,7 +1134,7 @@ static int edma_init(void) | |||
1125 | } | 1134 | } |
1126 | } | 1135 | } |
1127 | 1136 | ||
1128 | if (EDMA_CTLRS == 2) { | 1137 | if (!of_have_populated_dt() && EDMA_CTLRS == 2) { |
1129 | pdev1 = platform_device_register_full(&edma_dev_info1); | 1138 | pdev1 = platform_device_register_full(&edma_dev_info1); |
1130 | if (IS_ERR(pdev1)) { | 1139 | if (IS_ERR(pdev1)) { |
1131 | platform_driver_unregister(&edma_driver); | 1140 | platform_driver_unregister(&edma_driver); |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index cb4bf682a708..7650470196c4 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -1092,7 +1092,6 @@ fail: | |||
1092 | * @period_len: length of a single period | 1092 | * @period_len: length of a single period |
1093 | * @dir: direction of the operation | 1093 | * @dir: direction of the operation |
1094 | * @flags: tx descriptor status flags | 1094 | * @flags: tx descriptor status flags |
1095 | * @context: operation context (ignored) | ||
1096 | * | 1095 | * |
1097 | * Prepares a descriptor for cyclic DMA operation. This means that once the | 1096 | * Prepares a descriptor for cyclic DMA operation. This means that once the |
1098 | * descriptor is submitted, we will be submitting in a @period_len sized | 1097 | * descriptor is submitted, we will be submitting in a @period_len sized |
@@ -1105,8 +1104,7 @@ fail: | |||
1105 | static struct dma_async_tx_descriptor * | 1104 | static struct dma_async_tx_descriptor * |
1106 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 1105 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1107 | size_t buf_len, size_t period_len, | 1106 | size_t buf_len, size_t period_len, |
1108 | enum dma_transfer_direction dir, unsigned long flags, | 1107 | enum dma_transfer_direction dir, unsigned long flags) |
1109 | void *context) | ||
1110 | { | 1108 | { |
1111 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1109 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1112 | struct ep93xx_dma_desc *desc, *first; | 1110 | struct ep93xx_dma_desc *desc, *first; |
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b396a7fb53ab..3c5711d5fe97 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
@@ -248,11 +248,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, | |||
248 | unsigned int slot, bool enable) | 248 | unsigned int slot, bool enable) |
249 | { | 249 | { |
250 | u32 ch = fsl_chan->vchan.chan.chan_id; | 250 | u32 ch = fsl_chan->vchan.chan.chan_id; |
251 | void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR]; | 251 | void __iomem *muxaddr; |
252 | unsigned chans_per_mux, ch_off; | 252 | unsigned chans_per_mux, ch_off; |
253 | 253 | ||
254 | chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; | 254 | chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; |
255 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; | 255 | ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; |
256 | muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; | ||
256 | 257 | ||
257 | if (enable) | 258 | if (enable) |
258 | edma_writeb(fsl_chan->edma, | 259 | edma_writeb(fsl_chan->edma, |
@@ -516,7 +517,7 @@ err: | |||
516 | static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( | 517 | static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( |
517 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 518 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
518 | size_t period_len, enum dma_transfer_direction direction, | 519 | size_t period_len, enum dma_transfer_direction direction, |
519 | unsigned long flags, void *context) | 520 | unsigned long flags) |
520 | { | 521 | { |
521 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); | 522 | struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); |
522 | struct fsl_edma_desc *fsl_desc; | 523 | struct fsl_edma_desc *fsl_desc; |
@@ -724,6 +725,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, | |||
724 | { | 725 | { |
725 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; | 726 | struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; |
726 | struct dma_chan *chan, *_chan; | 727 | struct dma_chan *chan, *_chan; |
728 | unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; | ||
727 | 729 | ||
728 | if (dma_spec->args_count != 2) | 730 | if (dma_spec->args_count != 2) |
729 | return NULL; | 731 | return NULL; |
@@ -732,7 +734,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, | |||
732 | list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { | 734 | list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { |
733 | if (chan->client_count) | 735 | if (chan->client_count) |
734 | continue; | 736 | continue; |
735 | if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) { | 737 | if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { |
736 | chan = dma_get_slave_channel(chan); | 738 | chan = dma_get_slave_channel(chan); |
737 | if (chan) { | 739 | if (chan) { |
738 | chan->device->privatecnt++; | 740 | chan->device->privatecnt++; |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e0fec68aed25..d5d6885ab341 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -396,10 +396,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
396 | struct fsldma_chan *chan = to_fsl_chan(tx->chan); | 396 | struct fsldma_chan *chan = to_fsl_chan(tx->chan); |
397 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 397 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
398 | struct fsl_desc_sw *child; | 398 | struct fsl_desc_sw *child; |
399 | unsigned long flags; | ||
400 | dma_cookie_t cookie = -EINVAL; | 399 | dma_cookie_t cookie = -EINVAL; |
401 | 400 | ||
402 | spin_lock_irqsave(&chan->desc_lock, flags); | 401 | spin_lock_bh(&chan->desc_lock); |
402 | |||
403 | #ifdef CONFIG_PM | ||
404 | if (unlikely(chan->pm_state != RUNNING)) { | ||
405 | chan_dbg(chan, "cannot submit due to suspend\n"); | ||
406 | spin_unlock_bh(&chan->desc_lock); | ||
407 | return -1; | ||
408 | } | ||
409 | #endif | ||
403 | 410 | ||
404 | /* | 411 | /* |
405 | * assign cookies to all of the software descriptors | 412 | * assign cookies to all of the software descriptors |
@@ -412,7 +419,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
412 | /* put this transaction onto the tail of the pending queue */ | 419 | /* put this transaction onto the tail of the pending queue */ |
413 | append_ld_queue(chan, desc); | 420 | append_ld_queue(chan, desc); |
414 | 421 | ||
415 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 422 | spin_unlock_bh(&chan->desc_lock); |
416 | 423 | ||
417 | return cookie; | 424 | return cookie; |
418 | } | 425 | } |
@@ -459,6 +466,88 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) | |||
459 | } | 466 | } |
460 | 467 | ||
461 | /** | 468 | /** |
469 | * fsldma_clean_completed_descriptor - free all descriptors which | ||
470 | * has been completed and acked | ||
471 | * @chan: Freescale DMA channel | ||
472 | * | ||
473 | * This function is used on all completed and acked descriptors. | ||
474 | * All descriptors should only be freed in this function. | ||
475 | */ | ||
476 | static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan) | ||
477 | { | ||
478 | struct fsl_desc_sw *desc, *_desc; | ||
479 | |||
480 | /* Run the callback for each descriptor, in order */ | ||
481 | list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) | ||
482 | if (async_tx_test_ack(&desc->async_tx)) | ||
483 | fsl_dma_free_descriptor(chan, desc); | ||
484 | } | ||
485 | |||
486 | /** | ||
487 | * fsldma_run_tx_complete_actions - cleanup a single link descriptor | ||
488 | * @chan: Freescale DMA channel | ||
489 | * @desc: descriptor to cleanup and free | ||
490 | * @cookie: Freescale DMA transaction identifier | ||
491 | * | ||
492 | * This function is used on a descriptor which has been executed by the DMA | ||
493 | * controller. It will run any callbacks, submit any dependencies. | ||
494 | */ | ||
495 | static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, | ||
496 | struct fsl_desc_sw *desc, dma_cookie_t cookie) | ||
497 | { | ||
498 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | ||
499 | dma_cookie_t ret = cookie; | ||
500 | |||
501 | BUG_ON(txd->cookie < 0); | ||
502 | |||
503 | if (txd->cookie > 0) { | ||
504 | ret = txd->cookie; | ||
505 | |||
506 | /* Run the link descriptor callback function */ | ||
507 | if (txd->callback) { | ||
508 | chan_dbg(chan, "LD %p callback\n", desc); | ||
509 | txd->callback(txd->callback_param); | ||
510 | } | ||
511 | } | ||
512 | |||
513 | /* Run any dependencies */ | ||
514 | dma_run_dependencies(txd); | ||
515 | |||
516 | return ret; | ||
517 | } | ||
518 | |||
519 | /** | ||
520 | * fsldma_clean_running_descriptor - move the completed descriptor from | ||
521 | * ld_running to ld_completed | ||
522 | * @chan: Freescale DMA channel | ||
523 | * @desc: the descriptor which is completed | ||
524 | * | ||
525 | * Free the descriptor directly if acked by async_tx api, or move it to | ||
526 | * queue ld_completed. | ||
527 | */ | ||
528 | static void fsldma_clean_running_descriptor(struct fsldma_chan *chan, | ||
529 | struct fsl_desc_sw *desc) | ||
530 | { | ||
531 | /* Remove from the list of transactions */ | ||
532 | list_del(&desc->node); | ||
533 | |||
534 | /* | ||
535 | * the client is allowed to attach dependent operations | ||
536 | * until 'ack' is set | ||
537 | */ | ||
538 | if (!async_tx_test_ack(&desc->async_tx)) { | ||
539 | /* | ||
540 | * Move this descriptor to the list of descriptors which is | ||
541 | * completed, but still awaiting the 'ack' bit to be set. | ||
542 | */ | ||
543 | list_add_tail(&desc->node, &chan->ld_completed); | ||
544 | return; | ||
545 | } | ||
546 | |||
547 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | ||
548 | } | ||
549 | |||
550 | /** | ||
462 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | 551 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
463 | * @chan : Freescale DMA channel | 552 | * @chan : Freescale DMA channel |
464 | * | 553 | * |
@@ -526,31 +615,58 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | |||
526 | } | 615 | } |
527 | 616 | ||
528 | /** | 617 | /** |
529 | * fsldma_cleanup_descriptor - cleanup and free a single link descriptor | 618 | * fsldma_cleanup_descriptors - cleanup link descriptors which are completed |
619 | * and move them to ld_completed to free until flag 'ack' is set | ||
530 | * @chan: Freescale DMA channel | 620 | * @chan: Freescale DMA channel |
531 | * @desc: descriptor to cleanup and free | ||
532 | * | 621 | * |
533 | * This function is used on a descriptor which has been executed by the DMA | 622 | * This function is used on descriptors which have been executed by the DMA |
534 | * controller. It will run any callbacks, submit any dependencies, and then | 623 | * controller. It will run any callbacks, submit any dependencies, then |
535 | * free the descriptor. | 624 | * free these descriptors if flag 'ack' is set. |
536 | */ | 625 | */ |
537 | static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, | 626 | static void fsldma_cleanup_descriptors(struct fsldma_chan *chan) |
538 | struct fsl_desc_sw *desc) | ||
539 | { | 627 | { |
540 | struct dma_async_tx_descriptor *txd = &desc->async_tx; | 628 | struct fsl_desc_sw *desc, *_desc; |
629 | dma_cookie_t cookie = 0; | ||
630 | dma_addr_t curr_phys = get_cdar(chan); | ||
631 | int seen_current = 0; | ||
632 | |||
633 | fsldma_clean_completed_descriptor(chan); | ||
634 | |||
635 | /* Run the callback for each descriptor, in order */ | ||
636 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { | ||
637 | /* | ||
638 | * do not advance past the current descriptor loaded into the | ||
639 | * hardware channel, subsequent descriptors are either in | ||
640 | * process or have not been submitted | ||
641 | */ | ||
642 | if (seen_current) | ||
643 | break; | ||
644 | |||
645 | /* | ||
646 | * stop the search if we reach the current descriptor and the | ||
647 | * channel is busy | ||
648 | */ | ||
649 | if (desc->async_tx.phys == curr_phys) { | ||
650 | seen_current = 1; | ||
651 | if (!dma_is_idle(chan)) | ||
652 | break; | ||
653 | } | ||
654 | |||
655 | cookie = fsldma_run_tx_complete_actions(chan, desc, cookie); | ||
541 | 656 | ||
542 | /* Run the link descriptor callback function */ | 657 | fsldma_clean_running_descriptor(chan, desc); |
543 | if (txd->callback) { | ||
544 | chan_dbg(chan, "LD %p callback\n", desc); | ||
545 | txd->callback(txd->callback_param); | ||
546 | } | 658 | } |
547 | 659 | ||
548 | /* Run any dependencies */ | 660 | /* |
549 | dma_run_dependencies(txd); | 661 | * Start any pending transactions automatically |
662 | * | ||
663 | * In the ideal case, we keep the DMA controller busy while we go | ||
664 | * ahead and free the descriptors below. | ||
665 | */ | ||
666 | fsl_chan_xfer_ld_queue(chan); | ||
550 | 667 | ||
551 | dma_descriptor_unmap(txd); | 668 | if (cookie > 0) |
552 | chan_dbg(chan, "LD %p free\n", desc); | 669 | chan->common.completed_cookie = cookie; |
553 | dma_pool_free(chan->desc_pool, desc, txd->phys); | ||
554 | } | 670 | } |
555 | 671 | ||
556 | /** | 672 | /** |
@@ -617,13 +733,14 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | |||
617 | static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | 733 | static void fsl_dma_free_chan_resources(struct dma_chan *dchan) |
618 | { | 734 | { |
619 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 735 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
620 | unsigned long flags; | ||
621 | 736 | ||
622 | chan_dbg(chan, "free all channel resources\n"); | 737 | chan_dbg(chan, "free all channel resources\n"); |
623 | spin_lock_irqsave(&chan->desc_lock, flags); | 738 | spin_lock_bh(&chan->desc_lock); |
739 | fsldma_cleanup_descriptors(chan); | ||
624 | fsldma_free_desc_list(chan, &chan->ld_pending); | 740 | fsldma_free_desc_list(chan, &chan->ld_pending); |
625 | fsldma_free_desc_list(chan, &chan->ld_running); | 741 | fsldma_free_desc_list(chan, &chan->ld_running); |
626 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 742 | fsldma_free_desc_list(chan, &chan->ld_completed); |
743 | spin_unlock_bh(&chan->desc_lock); | ||
627 | 744 | ||
628 | dma_pool_destroy(chan->desc_pool); | 745 | dma_pool_destroy(chan->desc_pool); |
629 | chan->desc_pool = NULL; | 746 | chan->desc_pool = NULL; |
@@ -842,7 +959,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
842 | { | 959 | { |
843 | struct dma_slave_config *config; | 960 | struct dma_slave_config *config; |
844 | struct fsldma_chan *chan; | 961 | struct fsldma_chan *chan; |
845 | unsigned long flags; | ||
846 | int size; | 962 | int size; |
847 | 963 | ||
848 | if (!dchan) | 964 | if (!dchan) |
@@ -852,7 +968,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
852 | 968 | ||
853 | switch (cmd) { | 969 | switch (cmd) { |
854 | case DMA_TERMINATE_ALL: | 970 | case DMA_TERMINATE_ALL: |
855 | spin_lock_irqsave(&chan->desc_lock, flags); | 971 | spin_lock_bh(&chan->desc_lock); |
856 | 972 | ||
857 | /* Halt the DMA engine */ | 973 | /* Halt the DMA engine */ |
858 | dma_halt(chan); | 974 | dma_halt(chan); |
@@ -860,9 +976,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
860 | /* Remove and free all of the descriptors in the LD queue */ | 976 | /* Remove and free all of the descriptors in the LD queue */ |
861 | fsldma_free_desc_list(chan, &chan->ld_pending); | 977 | fsldma_free_desc_list(chan, &chan->ld_pending); |
862 | fsldma_free_desc_list(chan, &chan->ld_running); | 978 | fsldma_free_desc_list(chan, &chan->ld_running); |
979 | fsldma_free_desc_list(chan, &chan->ld_completed); | ||
863 | chan->idle = true; | 980 | chan->idle = true; |
864 | 981 | ||
865 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 982 | spin_unlock_bh(&chan->desc_lock); |
866 | return 0; | 983 | return 0; |
867 | 984 | ||
868 | case DMA_SLAVE_CONFIG: | 985 | case DMA_SLAVE_CONFIG: |
@@ -904,11 +1021,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
904 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) | 1021 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
905 | { | 1022 | { |
906 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 1023 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
907 | unsigned long flags; | ||
908 | 1024 | ||
909 | spin_lock_irqsave(&chan->desc_lock, flags); | 1025 | spin_lock_bh(&chan->desc_lock); |
910 | fsl_chan_xfer_ld_queue(chan); | 1026 | fsl_chan_xfer_ld_queue(chan); |
911 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 1027 | spin_unlock_bh(&chan->desc_lock); |
912 | } | 1028 | } |
913 | 1029 | ||
914 | /** | 1030 | /** |
@@ -919,6 +1035,17 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, | |||
919 | dma_cookie_t cookie, | 1035 | dma_cookie_t cookie, |
920 | struct dma_tx_state *txstate) | 1036 | struct dma_tx_state *txstate) |
921 | { | 1037 | { |
1038 | struct fsldma_chan *chan = to_fsl_chan(dchan); | ||
1039 | enum dma_status ret; | ||
1040 | |||
1041 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
1042 | if (ret == DMA_COMPLETE) | ||
1043 | return ret; | ||
1044 | |||
1045 | spin_lock_bh(&chan->desc_lock); | ||
1046 | fsldma_cleanup_descriptors(chan); | ||
1047 | spin_unlock_bh(&chan->desc_lock); | ||
1048 | |||
922 | return dma_cookie_status(dchan, cookie, txstate); | 1049 | return dma_cookie_status(dchan, cookie, txstate); |
923 | } | 1050 | } |
924 | 1051 | ||
@@ -996,52 +1123,18 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
996 | static void dma_do_tasklet(unsigned long data) | 1123 | static void dma_do_tasklet(unsigned long data) |
997 | { | 1124 | { |
998 | struct fsldma_chan *chan = (struct fsldma_chan *)data; | 1125 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
999 | struct fsl_desc_sw *desc, *_desc; | ||
1000 | LIST_HEAD(ld_cleanup); | ||
1001 | unsigned long flags; | ||
1002 | 1126 | ||
1003 | chan_dbg(chan, "tasklet entry\n"); | 1127 | chan_dbg(chan, "tasklet entry\n"); |
1004 | 1128 | ||
1005 | spin_lock_irqsave(&chan->desc_lock, flags); | 1129 | spin_lock_bh(&chan->desc_lock); |
1006 | |||
1007 | /* update the cookie if we have some descriptors to cleanup */ | ||
1008 | if (!list_empty(&chan->ld_running)) { | ||
1009 | dma_cookie_t cookie; | ||
1010 | |||
1011 | desc = to_fsl_desc(chan->ld_running.prev); | ||
1012 | cookie = desc->async_tx.cookie; | ||
1013 | dma_cookie_complete(&desc->async_tx); | ||
1014 | |||
1015 | chan_dbg(chan, "completed_cookie=%d\n", cookie); | ||
1016 | } | ||
1017 | |||
1018 | /* | ||
1019 | * move the descriptors to a temporary list so we can drop the lock | ||
1020 | * during the entire cleanup operation | ||
1021 | */ | ||
1022 | list_splice_tail_init(&chan->ld_running, &ld_cleanup); | ||
1023 | 1130 | ||
1024 | /* the hardware is now idle and ready for more */ | 1131 | /* the hardware is now idle and ready for more */ |
1025 | chan->idle = true; | 1132 | chan->idle = true; |
1026 | 1133 | ||
1027 | /* | 1134 | /* Run all cleanup for descriptors which have been completed */ |
1028 | * Start any pending transactions automatically | 1135 | fsldma_cleanup_descriptors(chan); |
1029 | * | ||
1030 | * In the ideal case, we keep the DMA controller busy while we go | ||
1031 | * ahead and free the descriptors below. | ||
1032 | */ | ||
1033 | fsl_chan_xfer_ld_queue(chan); | ||
1034 | spin_unlock_irqrestore(&chan->desc_lock, flags); | ||
1035 | |||
1036 | /* Run the callback for each descriptor, in order */ | ||
1037 | list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { | ||
1038 | |||
1039 | /* Remove from the list of transactions */ | ||
1040 | list_del(&desc->node); | ||
1041 | 1136 | ||
1042 | /* Run all cleanup for this descriptor */ | 1137 | spin_unlock_bh(&chan->desc_lock); |
1043 | fsldma_cleanup_descriptor(chan, desc); | ||
1044 | } | ||
1045 | 1138 | ||
1046 | chan_dbg(chan, "tasklet exit\n"); | 1139 | chan_dbg(chan, "tasklet exit\n"); |
1047 | } | 1140 | } |
@@ -1225,7 +1318,11 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
1225 | spin_lock_init(&chan->desc_lock); | 1318 | spin_lock_init(&chan->desc_lock); |
1226 | INIT_LIST_HEAD(&chan->ld_pending); | 1319 | INIT_LIST_HEAD(&chan->ld_pending); |
1227 | INIT_LIST_HEAD(&chan->ld_running); | 1320 | INIT_LIST_HEAD(&chan->ld_running); |
1321 | INIT_LIST_HEAD(&chan->ld_completed); | ||
1228 | chan->idle = true; | 1322 | chan->idle = true; |
1323 | #ifdef CONFIG_PM | ||
1324 | chan->pm_state = RUNNING; | ||
1325 | #endif | ||
1229 | 1326 | ||
1230 | chan->common.device = &fdev->common; | 1327 | chan->common.device = &fdev->common; |
1231 | dma_cookie_init(&chan->common); | 1328 | dma_cookie_init(&chan->common); |
@@ -1365,6 +1462,69 @@ static int fsldma_of_remove(struct platform_device *op) | |||
1365 | return 0; | 1462 | return 0; |
1366 | } | 1463 | } |
1367 | 1464 | ||
1465 | #ifdef CONFIG_PM | ||
1466 | static int fsldma_suspend_late(struct device *dev) | ||
1467 | { | ||
1468 | struct platform_device *pdev = to_platform_device(dev); | ||
1469 | struct fsldma_device *fdev = platform_get_drvdata(pdev); | ||
1470 | struct fsldma_chan *chan; | ||
1471 | int i; | ||
1472 | |||
1473 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | ||
1474 | chan = fdev->chan[i]; | ||
1475 | if (!chan) | ||
1476 | continue; | ||
1477 | |||
1478 | spin_lock_bh(&chan->desc_lock); | ||
1479 | if (unlikely(!chan->idle)) | ||
1480 | goto out; | ||
1481 | chan->regs_save.mr = get_mr(chan); | ||
1482 | chan->pm_state = SUSPENDED; | ||
1483 | spin_unlock_bh(&chan->desc_lock); | ||
1484 | } | ||
1485 | return 0; | ||
1486 | |||
1487 | out: | ||
1488 | for (; i >= 0; i--) { | ||
1489 | chan = fdev->chan[i]; | ||
1490 | if (!chan) | ||
1491 | continue; | ||
1492 | chan->pm_state = RUNNING; | ||
1493 | spin_unlock_bh(&chan->desc_lock); | ||
1494 | } | ||
1495 | return -EBUSY; | ||
1496 | } | ||
1497 | |||
1498 | static int fsldma_resume_early(struct device *dev) | ||
1499 | { | ||
1500 | struct platform_device *pdev = to_platform_device(dev); | ||
1501 | struct fsldma_device *fdev = platform_get_drvdata(pdev); | ||
1502 | struct fsldma_chan *chan; | ||
1503 | u32 mode; | ||
1504 | int i; | ||
1505 | |||
1506 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | ||
1507 | chan = fdev->chan[i]; | ||
1508 | if (!chan) | ||
1509 | continue; | ||
1510 | |||
1511 | spin_lock_bh(&chan->desc_lock); | ||
1512 | mode = chan->regs_save.mr | ||
1513 | & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA; | ||
1514 | set_mr(chan, mode); | ||
1515 | chan->pm_state = RUNNING; | ||
1516 | spin_unlock_bh(&chan->desc_lock); | ||
1517 | } | ||
1518 | |||
1519 | return 0; | ||
1520 | } | ||
1521 | |||
1522 | static const struct dev_pm_ops fsldma_pm_ops = { | ||
1523 | .suspend_late = fsldma_suspend_late, | ||
1524 | .resume_early = fsldma_resume_early, | ||
1525 | }; | ||
1526 | #endif | ||
1527 | |||
1368 | static const struct of_device_id fsldma_of_ids[] = { | 1528 | static const struct of_device_id fsldma_of_ids[] = { |
1369 | { .compatible = "fsl,elo3-dma", }, | 1529 | { .compatible = "fsl,elo3-dma", }, |
1370 | { .compatible = "fsl,eloplus-dma", }, | 1530 | { .compatible = "fsl,eloplus-dma", }, |
@@ -1377,6 +1537,9 @@ static struct platform_driver fsldma_of_driver = { | |||
1377 | .name = "fsl-elo-dma", | 1537 | .name = "fsl-elo-dma", |
1378 | .owner = THIS_MODULE, | 1538 | .owner = THIS_MODULE, |
1379 | .of_match_table = fsldma_of_ids, | 1539 | .of_match_table = fsldma_of_ids, |
1540 | #ifdef CONFIG_PM | ||
1541 | .pm = &fsldma_pm_ops, | ||
1542 | #endif | ||
1380 | }, | 1543 | }, |
1381 | .probe = fsldma_of_probe, | 1544 | .probe = fsldma_of_probe, |
1382 | .remove = fsldma_of_remove, | 1545 | .remove = fsldma_of_remove, |
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index d56e83599825..239c20c84382 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h | |||
@@ -134,12 +134,36 @@ struct fsldma_device { | |||
134 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | 134 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 |
135 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | 135 | #define FSL_DMA_CHAN_START_EXT 0x00002000 |
136 | 136 | ||
137 | #ifdef CONFIG_PM | ||
138 | struct fsldma_chan_regs_save { | ||
139 | u32 mr; | ||
140 | }; | ||
141 | |||
142 | enum fsldma_pm_state { | ||
143 | RUNNING = 0, | ||
144 | SUSPENDED, | ||
145 | }; | ||
146 | #endif | ||
147 | |||
137 | struct fsldma_chan { | 148 | struct fsldma_chan { |
138 | char name[8]; /* Channel name */ | 149 | char name[8]; /* Channel name */ |
139 | struct fsldma_chan_regs __iomem *regs; | 150 | struct fsldma_chan_regs __iomem *regs; |
140 | spinlock_t desc_lock; /* Descriptor operation lock */ | 151 | spinlock_t desc_lock; /* Descriptor operation lock */ |
141 | struct list_head ld_pending; /* Link descriptors queue */ | 152 | /* |
142 | struct list_head ld_running; /* Link descriptors queue */ | 153 | * Descriptors which are queued to run, but have not yet been |
154 | * submitted to the hardware for execution | ||
155 | */ | ||
156 | struct list_head ld_pending; | ||
157 | /* | ||
158 | * Descriptors which are currently being executed by the hardware | ||
159 | */ | ||
160 | struct list_head ld_running; | ||
161 | /* | ||
162 | * Descriptors which have finished execution by the hardware. These | ||
163 | * descriptors have already had their cleanup actions run. They are | ||
164 | * waiting for the ACK bit to be set by the async_tx API. | ||
165 | */ | ||
166 | struct list_head ld_completed; /* Link descriptors queue */ | ||
143 | struct dma_chan common; /* DMA common channel */ | 167 | struct dma_chan common; /* DMA common channel */ |
144 | struct dma_pool *desc_pool; /* Descriptors pool */ | 168 | struct dma_pool *desc_pool; /* Descriptors pool */ |
145 | struct device *dev; /* Channel device */ | 169 | struct device *dev; /* Channel device */ |
@@ -148,6 +172,10 @@ struct fsldma_chan { | |||
148 | struct tasklet_struct tasklet; | 172 | struct tasklet_struct tasklet; |
149 | u32 feature; | 173 | u32 feature; |
150 | bool idle; /* DMA controller is idle */ | 174 | bool idle; /* DMA controller is idle */ |
175 | #ifdef CONFIG_PM | ||
176 | struct fsldma_chan_regs_save regs_save; | ||
177 | enum fsldma_pm_state pm_state; | ||
178 | #endif | ||
151 | 179 | ||
152 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); | 180 | void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); |
153 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); | 181 | void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 286660a12cc6..9d2c9e7374dc 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -866,7 +866,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
866 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | 866 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
867 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 867 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
868 | size_t period_len, enum dma_transfer_direction direction, | 868 | size_t period_len, enum dma_transfer_direction direction, |
869 | unsigned long flags, void *context) | 869 | unsigned long flags) |
870 | { | 870 | { |
871 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 871 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
872 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 872 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 14867e3ac8ff..f7626e37d0b8 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -271,6 +271,7 @@ struct sdma_channel { | |||
271 | unsigned int chn_count; | 271 | unsigned int chn_count; |
272 | unsigned int chn_real_count; | 272 | unsigned int chn_real_count; |
273 | struct tasklet_struct tasklet; | 273 | struct tasklet_struct tasklet; |
274 | struct imx_dma_data data; | ||
274 | }; | 275 | }; |
275 | 276 | ||
276 | #define IMX_DMA_SG_LOOP BIT(0) | 277 | #define IMX_DMA_SG_LOOP BIT(0) |
@@ -749,6 +750,11 @@ static void sdma_get_pc(struct sdma_channel *sdmac, | |||
749 | emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; | 750 | emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; |
750 | per_2_per = sdma->script_addrs->per_2_per_addr; | 751 | per_2_per = sdma->script_addrs->per_2_per_addr; |
751 | break; | 752 | break; |
753 | case IMX_DMATYPE_ASRC_SP: | ||
754 | per_2_emi = sdma->script_addrs->shp_2_mcu_addr; | ||
755 | emi_2_per = sdma->script_addrs->mcu_2_shp_addr; | ||
756 | per_2_per = sdma->script_addrs->per_2_per_addr; | ||
757 | break; | ||
752 | case IMX_DMATYPE_MSHC: | 758 | case IMX_DMATYPE_MSHC: |
753 | per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; | 759 | per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; |
754 | emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; | 760 | emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; |
@@ -911,14 +917,13 @@ static int sdma_request_channel(struct sdma_channel *sdmac) | |||
911 | int channel = sdmac->channel; | 917 | int channel = sdmac->channel; |
912 | int ret = -EBUSY; | 918 | int ret = -EBUSY; |
913 | 919 | ||
914 | sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); | 920 | sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, |
921 | GFP_KERNEL); | ||
915 | if (!sdmac->bd) { | 922 | if (!sdmac->bd) { |
916 | ret = -ENOMEM; | 923 | ret = -ENOMEM; |
917 | goto out; | 924 | goto out; |
918 | } | 925 | } |
919 | 926 | ||
920 | memset(sdmac->bd, 0, PAGE_SIZE); | ||
921 | |||
922 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; | 927 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; |
923 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | 928 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; |
924 | 929 | ||
@@ -1120,7 +1125,7 @@ err_out: | |||
1120 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | 1125 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( |
1121 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 1126 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
1122 | size_t period_len, enum dma_transfer_direction direction, | 1127 | size_t period_len, enum dma_transfer_direction direction, |
1123 | unsigned long flags, void *context) | 1128 | unsigned long flags) |
1124 | { | 1129 | { |
1125 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1130 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1126 | struct sdma_engine *sdma = sdmac->sdma; | 1131 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1414,12 +1419,14 @@ err_dma_alloc: | |||
1414 | 1419 | ||
1415 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) | 1420 | static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) |
1416 | { | 1421 | { |
1422 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
1417 | struct imx_dma_data *data = fn_param; | 1423 | struct imx_dma_data *data = fn_param; |
1418 | 1424 | ||
1419 | if (!imx_dma_is_general_purpose(chan)) | 1425 | if (!imx_dma_is_general_purpose(chan)) |
1420 | return false; | 1426 | return false; |
1421 | 1427 | ||
1422 | chan->private = data; | 1428 | sdmac->data = *data; |
1429 | chan->private = &sdmac->data; | ||
1423 | 1430 | ||
1424 | return true; | 1431 | return true; |
1425 | } | 1432 | } |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 128ca143486d..bbf62927bd72 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1532,11 +1532,17 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) | |||
1532 | #ifdef DEBUG | 1532 | #ifdef DEBUG |
1533 | if (chan->chan_id == IDMAC_IC_7) { | 1533 | if (chan->chan_id == IDMAC_IC_7) { |
1534 | ic_sof = ipu_irq_map(69); | 1534 | ic_sof = ipu_irq_map(69); |
1535 | if (ic_sof > 0) | 1535 | if (ic_sof > 0) { |
1536 | request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); | 1536 | ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); |
1537 | if (ret) | ||
1538 | dev_err(&chan->dev->device, "request irq failed for IC SOF"); | ||
1539 | } | ||
1537 | ic_eof = ipu_irq_map(70); | 1540 | ic_eof = ipu_irq_map(70); |
1538 | if (ic_eof > 0) | 1541 | if (ic_eof > 0) { |
1539 | request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); | 1542 | ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); |
1543 | if (ret) | ||
1544 | dev_err(&chan->dev->device, "request irq failed for IC EOF"); | ||
1545 | } | ||
1540 | } | 1546 | } |
1541 | #endif | 1547 | #endif |
1542 | 1548 | ||
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index a7b186d536b3..a1a4db5721b8 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -601,7 +601,7 @@ static struct dma_async_tx_descriptor * | |||
601 | mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, | 601 | mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, |
602 | dma_addr_t buf_addr, size_t len, size_t period_len, | 602 | dma_addr_t buf_addr, size_t len, size_t period_len, |
603 | enum dma_transfer_direction direction, | 603 | enum dma_transfer_direction direction, |
604 | unsigned long flags, void *context) | 604 | unsigned long flags) |
605 | { | 605 | { |
606 | struct mmp_pdma_chan *chan; | 606 | struct mmp_pdma_chan *chan; |
607 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | 607 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 724f7f4c9720..6ad30e2c5038 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
@@ -389,7 +389,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) | |||
389 | static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( | 389 | static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( |
390 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 390 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
391 | size_t period_len, enum dma_transfer_direction direction, | 391 | size_t period_len, enum dma_transfer_direction direction, |
392 | unsigned long flags, void *context) | 392 | unsigned long flags) |
393 | { | 393 | { |
394 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); | 394 | struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); |
395 | struct mmp_tdma_desc *desc; | 395 | struct mmp_tdma_desc *desc; |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 2ad43738ac8b..881db2bcb48b 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <linux/of_address.h> | 53 | #include <linux/of_address.h> |
54 | #include <linux/of_device.h> | 54 | #include <linux/of_device.h> |
55 | #include <linux/of_irq.h> | 55 | #include <linux/of_irq.h> |
56 | #include <linux/of_dma.h> | ||
56 | #include <linux/of_platform.h> | 57 | #include <linux/of_platform.h> |
57 | 58 | ||
58 | #include <linux/random.h> | 59 | #include <linux/random.h> |
@@ -1036,7 +1037,15 @@ static int mpc_dma_probe(struct platform_device *op) | |||
1036 | if (retval) | 1037 | if (retval) |
1037 | goto err_free2; | 1038 | goto err_free2; |
1038 | 1039 | ||
1039 | return retval; | 1040 | /* Register with OF helpers for DMA lookups (nonfatal) */ |
1041 | if (dev->of_node) { | ||
1042 | retval = of_dma_controller_register(dev->of_node, | ||
1043 | of_dma_xlate_by_chan_id, mdma); | ||
1044 | if (retval) | ||
1045 | dev_warn(dev, "Could not register for OF lookup\n"); | ||
1046 | } | ||
1047 | |||
1048 | return 0; | ||
1040 | 1049 | ||
1041 | err_free2: | 1050 | err_free2: |
1042 | if (mdma->is_mpc8308) | 1051 | if (mdma->is_mpc8308) |
@@ -1057,6 +1066,8 @@ static int mpc_dma_remove(struct platform_device *op) | |||
1057 | struct device *dev = &op->dev; | 1066 | struct device *dev = &op->dev; |
1058 | struct mpc_dma *mdma = dev_get_drvdata(dev); | 1067 | struct mpc_dma *mdma = dev_get_drvdata(dev); |
1059 | 1068 | ||
1069 | if (dev->of_node) | ||
1070 | of_dma_controller_free(dev->of_node); | ||
1060 | dma_async_device_unregister(&mdma->dma); | 1071 | dma_async_device_unregister(&mdma->dma); |
1061 | if (mdma->is_mpc8308) { | 1072 | if (mdma->is_mpc8308) { |
1062 | free_irq(mdma->irq2, mdma); | 1073 | free_irq(mdma->irq2, mdma); |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ead491346da7..5ea61201dbf0 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -413,16 +413,14 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
413 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 413 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
414 | int ret; | 414 | int ret; |
415 | 415 | ||
416 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, | 416 | mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, |
417 | CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, | 417 | CCW_BLOCK_SIZE, |
418 | GFP_KERNEL); | 418 | &mxs_chan->ccw_phys, GFP_KERNEL); |
419 | if (!mxs_chan->ccw) { | 419 | if (!mxs_chan->ccw) { |
420 | ret = -ENOMEM; | 420 | ret = -ENOMEM; |
421 | goto err_alloc; | 421 | goto err_alloc; |
422 | } | 422 | } |
423 | 423 | ||
424 | memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); | ||
425 | |||
426 | if (mxs_chan->chan_irq != NO_IRQ) { | 424 | if (mxs_chan->chan_irq != NO_IRQ) { |
427 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 425 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
428 | 0, "mxs-dma", mxs_dma); | 426 | 0, "mxs-dma", mxs_dma); |
@@ -591,7 +589,7 @@ err_out: | |||
591 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 589 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
592 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 590 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
593 | size_t period_len, enum dma_transfer_direction direction, | 591 | size_t period_len, enum dma_transfer_direction direction, |
594 | unsigned long flags, void *context) | 592 | unsigned long flags) |
595 | { | 593 | { |
596 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 594 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
597 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 595 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c new file mode 100644 index 000000000000..5aeada56a442 --- /dev/null +++ b/drivers/dma/nbpfaxi.c | |||
@@ -0,0 +1,1517 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. | ||
3 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/bitmap.h> | ||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/clk.h> | ||
13 | #include <linux/dma-mapping.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/log2.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/of_device.h> | ||
22 | #include <linux/of_dma.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/slab.h> | ||
25 | |||
26 | #include <dt-bindings/dma/nbpfaxi.h> | ||
27 | |||
28 | #include "dmaengine.h" | ||
29 | |||
30 | #define NBPF_REG_CHAN_OFFSET 0 | ||
31 | #define NBPF_REG_CHAN_SIZE 0x40 | ||
32 | |||
33 | /* Channel Current Transaction Byte register */ | ||
34 | #define NBPF_CHAN_CUR_TR_BYTE 0x20 | ||
35 | |||
36 | /* Channel Status register */ | ||
37 | #define NBPF_CHAN_STAT 0x24 | ||
38 | #define NBPF_CHAN_STAT_EN 1 | ||
39 | #define NBPF_CHAN_STAT_TACT 4 | ||
40 | #define NBPF_CHAN_STAT_ERR 0x10 | ||
41 | #define NBPF_CHAN_STAT_END 0x20 | ||
42 | #define NBPF_CHAN_STAT_TC 0x40 | ||
43 | #define NBPF_CHAN_STAT_DER 0x400 | ||
44 | |||
45 | /* Channel Control register */ | ||
46 | #define NBPF_CHAN_CTRL 0x28 | ||
47 | #define NBPF_CHAN_CTRL_SETEN 1 | ||
48 | #define NBPF_CHAN_CTRL_CLREN 2 | ||
49 | #define NBPF_CHAN_CTRL_STG 4 | ||
50 | #define NBPF_CHAN_CTRL_SWRST 8 | ||
51 | #define NBPF_CHAN_CTRL_CLRRQ 0x10 | ||
52 | #define NBPF_CHAN_CTRL_CLREND 0x20 | ||
53 | #define NBPF_CHAN_CTRL_CLRTC 0x40 | ||
54 | #define NBPF_CHAN_CTRL_SETSUS 0x100 | ||
55 | #define NBPF_CHAN_CTRL_CLRSUS 0x200 | ||
56 | |||
57 | /* Channel Configuration register */ | ||
58 | #define NBPF_CHAN_CFG 0x2c | ||
59 | #define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ | ||
60 | #define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ | ||
61 | #define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ | ||
62 | #define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ | ||
63 | #define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ | ||
64 | #define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ | ||
65 | #define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ | ||
66 | #define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ | ||
67 | #define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ | ||
68 | #define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ | ||
69 | #define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ | ||
70 | #define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ | ||
71 | #define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ | ||
72 | #define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ | ||
73 | #define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ | ||
74 | #define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ | ||
75 | #define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ | ||
76 | #define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ | ||
77 | |||
78 | #define NBPF_CHAN_NXLA 0x38 | ||
79 | #define NBPF_CHAN_CRLA 0x3c | ||
80 | |||
81 | /* Link Header field */ | ||
82 | #define NBPF_HEADER_LV 1 | ||
83 | #define NBPF_HEADER_LE 2 | ||
84 | #define NBPF_HEADER_WBD 4 | ||
85 | #define NBPF_HEADER_DIM 8 | ||
86 | |||
87 | #define NBPF_CTRL 0x300 | ||
88 | #define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ | ||
89 | #define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ | ||
90 | |||
91 | #define NBPF_DSTAT_ER 0x314 | ||
92 | #define NBPF_DSTAT_END 0x318 | ||
93 | |||
94 | #define NBPF_DMA_BUSWIDTHS \ | ||
95 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | ||
96 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
97 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
98 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | ||
99 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
100 | |||
101 | struct nbpf_config { | ||
102 | int num_channels; | ||
103 | int buffer_size; | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * We've got 3 types of objects, used to describe DMA transfers: | ||
108 | * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object | ||
109 | * in it, used to communicate with the user | ||
110 | * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer | ||
111 | * queuing, these must be DMAable, using either the streaming DMA API or | ||
112 | * allocated from coherent memory - one per SG segment | ||
113 | * 3. one per SG segment descriptors, used to manage HW link descriptors from | ||
114 | * (2). They do not have to be DMAable. They can either be (a) allocated | ||
115 | * together with link descriptors as mixed (DMA / CPU) objects, or (b) | ||
116 | * separately. Even if allocated separately it would be best to link them | ||
117 | * to link descriptors once during channel resource allocation and always | ||
118 | * use them as a single object. | ||
119 | * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be | ||
120 | * treated as a single SG segment descriptor. | ||
121 | */ | ||
122 | |||
123 | struct nbpf_link_reg { | ||
124 | u32 header; | ||
125 | u32 src_addr; | ||
126 | u32 dst_addr; | ||
127 | u32 transaction_size; | ||
128 | u32 config; | ||
129 | u32 interval; | ||
130 | u32 extension; | ||
131 | u32 next; | ||
132 | } __packed; | ||
133 | |||
134 | struct nbpf_device; | ||
135 | struct nbpf_channel; | ||
136 | struct nbpf_desc; | ||
137 | |||
138 | struct nbpf_link_desc { | ||
139 | struct nbpf_link_reg *hwdesc; | ||
140 | dma_addr_t hwdesc_dma_addr; | ||
141 | struct nbpf_desc *desc; | ||
142 | struct list_head node; | ||
143 | }; | ||
144 | |||
145 | /** | ||
146 | * struct nbpf_desc - DMA transfer descriptor | ||
147 | * @async_tx: dmaengine object | ||
148 | * @user_wait: waiting for a user ack | ||
149 | * @length: total transfer length | ||
150 | * @sg: list of hardware descriptors, represented by struct nbpf_link_desc | ||
151 | * @node: member in channel descriptor lists | ||
152 | */ | ||
153 | struct nbpf_desc { | ||
154 | struct dma_async_tx_descriptor async_tx; | ||
155 | bool user_wait; | ||
156 | size_t length; | ||
157 | struct nbpf_channel *chan; | ||
158 | struct list_head sg; | ||
159 | struct list_head node; | ||
160 | }; | ||
161 | |||
162 | /* Take a wild guess: allocate 4 segments per descriptor */ | ||
163 | #define NBPF_SEGMENTS_PER_DESC 4 | ||
164 | #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ | ||
165 | (sizeof(struct nbpf_desc) + \ | ||
166 | NBPF_SEGMENTS_PER_DESC * \ | ||
167 | (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) | ||
168 | #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) | ||
169 | |||
170 | struct nbpf_desc_page { | ||
171 | struct list_head node; | ||
172 | struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; | ||
173 | struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; | ||
174 | struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; | ||
175 | }; | ||
176 | |||
177 | /** | ||
178 | * struct nbpf_channel - one DMAC channel | ||
179 | * @dma_chan: standard dmaengine channel object | ||
180 | * @base: register address base | ||
181 | * @nbpf: DMAC | ||
182 | * @name: IRQ name | ||
183 | * @irq: IRQ number | ||
184 | * @slave_addr: address for slave DMA | ||
185 | * @slave_width:slave data size in bytes | ||
186 | * @slave_burst:maximum slave burst size in bytes | ||
187 | * @terminal: DMA terminal, assigned to this channel | ||
188 | * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG | ||
189 | * @flags: configuration flags from DT | ||
190 | * @lock: protect descriptor lists | ||
191 | * @free_links: list of free link descriptors | ||
192 | * @free: list of free descriptors | ||
193 | * @queued: list of queued descriptors | ||
194 | * @active: list of descriptors, scheduled for processing | ||
195 | * @done: list of completed descriptors, waiting post-processing | ||
196 | * @desc_page: list of additionally allocated descriptor pages - if any | ||
197 | */ | ||
198 | struct nbpf_channel { | ||
199 | struct dma_chan dma_chan; | ||
200 | struct tasklet_struct tasklet; | ||
201 | void __iomem *base; | ||
202 | struct nbpf_device *nbpf; | ||
203 | char name[16]; | ||
204 | int irq; | ||
205 | dma_addr_t slave_src_addr; | ||
206 | size_t slave_src_width; | ||
207 | size_t slave_src_burst; | ||
208 | dma_addr_t slave_dst_addr; | ||
209 | size_t slave_dst_width; | ||
210 | size_t slave_dst_burst; | ||
211 | unsigned int terminal; | ||
212 | u32 dmarq_cfg; | ||
213 | unsigned long flags; | ||
214 | spinlock_t lock; | ||
215 | struct list_head free_links; | ||
216 | struct list_head free; | ||
217 | struct list_head queued; | ||
218 | struct list_head active; | ||
219 | struct list_head done; | ||
220 | struct list_head desc_page; | ||
221 | struct nbpf_desc *running; | ||
222 | bool paused; | ||
223 | }; | ||
224 | |||
225 | struct nbpf_device { | ||
226 | struct dma_device dma_dev; | ||
227 | void __iomem *base; | ||
228 | struct clk *clk; | ||
229 | const struct nbpf_config *config; | ||
230 | struct nbpf_channel chan[]; | ||
231 | }; | ||
232 | |||
233 | enum nbpf_model { | ||
234 | NBPF1B4, | ||
235 | NBPF1B8, | ||
236 | NBPF1B16, | ||
237 | NBPF4B4, | ||
238 | NBPF4B8, | ||
239 | NBPF4B16, | ||
240 | NBPF8B4, | ||
241 | NBPF8B8, | ||
242 | NBPF8B16, | ||
243 | }; | ||
244 | |||
245 | static struct nbpf_config nbpf_cfg[] = { | ||
246 | [NBPF1B4] = { | ||
247 | .num_channels = 1, | ||
248 | .buffer_size = 4, | ||
249 | }, | ||
250 | [NBPF1B8] = { | ||
251 | .num_channels = 1, | ||
252 | .buffer_size = 8, | ||
253 | }, | ||
254 | [NBPF1B16] = { | ||
255 | .num_channels = 1, | ||
256 | .buffer_size = 16, | ||
257 | }, | ||
258 | [NBPF4B4] = { | ||
259 | .num_channels = 4, | ||
260 | .buffer_size = 4, | ||
261 | }, | ||
262 | [NBPF4B8] = { | ||
263 | .num_channels = 4, | ||
264 | .buffer_size = 8, | ||
265 | }, | ||
266 | [NBPF4B16] = { | ||
267 | .num_channels = 4, | ||
268 | .buffer_size = 16, | ||
269 | }, | ||
270 | [NBPF8B4] = { | ||
271 | .num_channels = 8, | ||
272 | .buffer_size = 4, | ||
273 | }, | ||
274 | [NBPF8B8] = { | ||
275 | .num_channels = 8, | ||
276 | .buffer_size = 8, | ||
277 | }, | ||
278 | [NBPF8B16] = { | ||
279 | .num_channels = 8, | ||
280 | .buffer_size = 16, | ||
281 | }, | ||
282 | }; | ||
283 | |||
284 | #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) | ||
285 | |||
286 | /* | ||
287 | * dmaengine drivers seem to have a lot in common and instead of sharing more | ||
288 | * code, they reimplement those common algorithms independently. In this driver | ||
289 | * we try to separate the hardware-specific part from the (largely) generic | ||
290 | * part. This improves code readability and makes it possible in the future to | ||
291 | * reuse the generic code in form of a helper library. That generic code should | ||
292 | * be suitable for various DMA controllers, using transfer descriptors in RAM | ||
293 | * and pushing one SG list at a time to the DMA controller. | ||
294 | */ | ||
295 | |||
296 | /* Hardware-specific part */ | ||
297 | |||
298 | static inline u32 nbpf_chan_read(struct nbpf_channel *chan, | ||
299 | unsigned int offset) | ||
300 | { | ||
301 | u32 data = ioread32(chan->base + offset); | ||
302 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", | ||
303 | __func__, chan->base, offset, data); | ||
304 | return data; | ||
305 | } | ||
306 | |||
307 | static inline void nbpf_chan_write(struct nbpf_channel *chan, | ||
308 | unsigned int offset, u32 data) | ||
309 | { | ||
310 | iowrite32(data, chan->base + offset); | ||
311 | dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", | ||
312 | __func__, chan->base, offset, data); | ||
313 | } | ||
314 | |||
315 | static inline u32 nbpf_read(struct nbpf_device *nbpf, | ||
316 | unsigned int offset) | ||
317 | { | ||
318 | u32 data = ioread32(nbpf->base + offset); | ||
319 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", | ||
320 | __func__, nbpf->base, offset, data); | ||
321 | return data; | ||
322 | } | ||
323 | |||
324 | static inline void nbpf_write(struct nbpf_device *nbpf, | ||
325 | unsigned int offset, u32 data) | ||
326 | { | ||
327 | iowrite32(data, nbpf->base + offset); | ||
328 | dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", | ||
329 | __func__, nbpf->base, offset, data); | ||
330 | } | ||
331 | |||
332 | static void nbpf_chan_halt(struct nbpf_channel *chan) | ||
333 | { | ||
334 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); | ||
335 | } | ||
336 | |||
337 | static bool nbpf_status_get(struct nbpf_channel *chan) | ||
338 | { | ||
339 | u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); | ||
340 | |||
341 | return status & BIT(chan - chan->nbpf->chan); | ||
342 | } | ||
343 | |||
344 | static void nbpf_status_ack(struct nbpf_channel *chan) | ||
345 | { | ||
346 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); | ||
347 | } | ||
348 | |||
349 | static u32 nbpf_error_get(struct nbpf_device *nbpf) | ||
350 | { | ||
351 | return nbpf_read(nbpf, NBPF_DSTAT_ER); | ||
352 | } | ||
353 | |||
354 | static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) | ||
355 | { | ||
356 | return nbpf->chan + __ffs(error); | ||
357 | } | ||
358 | |||
359 | static void nbpf_error_clear(struct nbpf_channel *chan) | ||
360 | { | ||
361 | u32 status; | ||
362 | int i; | ||
363 | |||
364 | /* Stop the channel, make sure DMA has been aborted */ | ||
365 | nbpf_chan_halt(chan); | ||
366 | |||
367 | for (i = 1000; i; i--) { | ||
368 | status = nbpf_chan_read(chan, NBPF_CHAN_STAT); | ||
369 | if (!(status & NBPF_CHAN_STAT_TACT)) | ||
370 | break; | ||
371 | cpu_relax(); | ||
372 | } | ||
373 | |||
374 | if (!i) | ||
375 | dev_err(chan->dma_chan.device->dev, | ||
376 | "%s(): abort timeout, channel status 0x%x\n", __func__, status); | ||
377 | |||
378 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); | ||
379 | } | ||
380 | |||
381 | static int nbpf_start(struct nbpf_desc *desc) | ||
382 | { | ||
383 | struct nbpf_channel *chan = desc->chan; | ||
384 | struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); | ||
385 | |||
386 | nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); | ||
387 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); | ||
388 | chan->paused = false; | ||
389 | |||
390 | /* Software trigger MEMCPY - only MEMCPY uses the block mode */ | ||
391 | if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) | ||
392 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); | ||
393 | |||
394 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, | ||
395 | nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); | ||
396 | |||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static void nbpf_chan_prepare(struct nbpf_channel *chan) | ||
401 | { | ||
402 | chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | | ||
403 | (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | | ||
404 | (chan->flags & NBPF_SLAVE_RQ_LEVEL ? | ||
405 | NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | | ||
406 | chan->terminal; | ||
407 | } | ||
408 | |||
409 | static void nbpf_chan_prepare_default(struct nbpf_channel *chan) | ||
410 | { | ||
411 | /* Don't output DMAACK */ | ||
412 | chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; | ||
413 | chan->terminal = 0; | ||
414 | chan->flags = 0; | ||
415 | } | ||
416 | |||
417 | static void nbpf_chan_configure(struct nbpf_channel *chan) | ||
418 | { | ||
419 | /* | ||
420 | * We assume, that only the link mode and DMA request line configuration | ||
421 | * have to be set in the configuration register manually. Dynamic | ||
422 | * per-transfer configuration will be loaded from transfer descriptors. | ||
423 | */ | ||
424 | nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); | ||
425 | } | ||
426 | |||
427 | static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size) | ||
428 | { | ||
429 | /* Maximum supported bursts depend on the buffer size */ | ||
430 | return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8)); | ||
431 | } | ||
432 | |||
433 | static size_t nbpf_xfer_size(struct nbpf_device *nbpf, | ||
434 | enum dma_slave_buswidth width, u32 burst) | ||
435 | { | ||
436 | size_t size; | ||
437 | |||
438 | if (!burst) | ||
439 | burst = 1; | ||
440 | |||
441 | switch (width) { | ||
442 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
443 | size = 8 * burst; | ||
444 | break; | ||
445 | |||
446 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
447 | size = 4 * burst; | ||
448 | break; | ||
449 | |||
450 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
451 | size = 2 * burst; | ||
452 | break; | ||
453 | |||
454 | default: | ||
455 | pr_warn("%s(): invalid bus width %u\n", __func__, width); | ||
456 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
457 | size = burst; | ||
458 | } | ||
459 | |||
460 | return nbpf_xfer_ds(nbpf, size); | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * We need a way to recognise slaves, whose data is sent "raw" over the bus, | ||
465 | * i.e. it isn't known in advance how many bytes will be received. Therefore | ||
466 | * the slave driver has to provide a "large enough" buffer and either read the | ||
467 | * buffer, when it is full, or detect, that some data has arrived, then wait for | ||
468 | * a timeout, if no more data arrives - receive what's already there. We want to | ||
469 | * handle such slaves in a special way to allow an optimised mode for other | ||
470 | * users, for whom the amount of data is known in advance. So far there's no way | ||
471 | * to recognise such slaves. We use a data-width check to distinguish between | ||
472 | * the SD host and the PL011 UART. | ||
473 | */ | ||
474 | |||
475 | static int nbpf_prep_one(struct nbpf_link_desc *ldesc, | ||
476 | enum dma_transfer_direction direction, | ||
477 | dma_addr_t src, dma_addr_t dst, size_t size, bool last) | ||
478 | { | ||
479 | struct nbpf_link_reg *hwdesc = ldesc->hwdesc; | ||
480 | struct nbpf_desc *desc = ldesc->desc; | ||
481 | struct nbpf_channel *chan = desc->chan; | ||
482 | struct device *dev = chan->dma_chan.device->dev; | ||
483 | size_t mem_xfer, slave_xfer; | ||
484 | bool can_burst; | ||
485 | |||
486 | hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | | ||
487 | (last ? NBPF_HEADER_LE : 0); | ||
488 | |||
489 | hwdesc->src_addr = src; | ||
490 | hwdesc->dst_addr = dst; | ||
491 | hwdesc->transaction_size = size; | ||
492 | |||
493 | /* | ||
494 | * set config: SAD, DAD, DDS, SDS, etc. | ||
495 | * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, | ||
496 | * but it is important to have transaction size a multiple of both | ||
497 | * receiver and transmitter transfer sizes. It is also possible to use | ||
498 | * different RAM and device transfer sizes, and it does work well with | ||
499 | * some devices, e.g. with V08R07S01E SD host controllers, which can use | ||
500 | * 128 byte transfers. But this doesn't work with other devices, | ||
501 | * especially when the transaction size is unknown. This is the case, | ||
502 | * e.g. with serial drivers like amba-pl011.c. For reception it sets up | ||
503 | * the transaction size of 4K and if fewer bytes are received, it | ||
504 | * pauses DMA and reads out data received via DMA as well as those left | ||
505 | * in the Rx FIFO. For this to work with the RAM side using burst | ||
506 | * transfers we enable the SBE bit and terminate the transfer in our | ||
507 | * DMA_PAUSE handler. | ||
508 | */ | ||
509 | mem_xfer = nbpf_xfer_ds(chan->nbpf, size); | ||
510 | |||
511 | switch (direction) { | ||
512 | case DMA_DEV_TO_MEM: | ||
513 | can_burst = chan->slave_src_width >= 3; | ||
514 | slave_xfer = min(mem_xfer, can_burst ? | ||
515 | chan->slave_src_burst : chan->slave_src_width); | ||
516 | /* | ||
517 | * Is the slave narrower than 64 bits, i.e. isn't using the full | ||
518 | * bus width and cannot use bursts? | ||
519 | */ | ||
520 | if (mem_xfer > chan->slave_src_burst && !can_burst) | ||
521 | mem_xfer = chan->slave_src_burst; | ||
522 | /* Device-to-RAM DMA is unreliable without REQD set */ | ||
523 | hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | | ||
524 | (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | | ||
525 | NBPF_CHAN_CFG_SBE; | ||
526 | break; | ||
527 | |||
528 | case DMA_MEM_TO_DEV: | ||
529 | slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? | ||
530 | chan->slave_dst_burst : chan->slave_dst_width); | ||
531 | hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | | ||
532 | (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; | ||
533 | break; | ||
534 | |||
535 | case DMA_MEM_TO_MEM: | ||
536 | hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | | ||
537 | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | | ||
538 | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); | ||
539 | break; | ||
540 | |||
541 | default: | ||
542 | return -EINVAL; | ||
543 | } | ||
544 | |||
545 | hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | | ||
546 | NBPF_CHAN_CFG_DMS; | ||
547 | |||
548 | dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", | ||
549 | __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, | ||
550 | hwdesc->config, size, &src, &dst); | ||
551 | |||
552 | dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), | ||
553 | DMA_TO_DEVICE); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | static size_t nbpf_bytes_left(struct nbpf_channel *chan) | ||
559 | { | ||
560 | return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); | ||
561 | } | ||
562 | |||
563 | static void nbpf_configure(struct nbpf_device *nbpf) | ||
564 | { | ||
565 | nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); | ||
566 | } | ||
567 | |||
568 | static void nbpf_pause(struct nbpf_channel *chan) | ||
569 | { | ||
570 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); | ||
571 | /* See comment in nbpf_prep_one() */ | ||
572 | nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); | ||
573 | } | ||
574 | |||
575 | /* Generic part */ | ||
576 | |||
577 | /* DMA ENGINE functions */ | ||
578 | static void nbpf_issue_pending(struct dma_chan *dchan) | ||
579 | { | ||
580 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
581 | unsigned long flags; | ||
582 | |||
583 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); | ||
584 | |||
585 | spin_lock_irqsave(&chan->lock, flags); | ||
586 | if (list_empty(&chan->queued)) | ||
587 | goto unlock; | ||
588 | |||
589 | list_splice_tail_init(&chan->queued, &chan->active); | ||
590 | |||
591 | if (!chan->running) { | ||
592 | struct nbpf_desc *desc = list_first_entry(&chan->active, | ||
593 | struct nbpf_desc, node); | ||
594 | if (!nbpf_start(desc)) | ||
595 | chan->running = desc; | ||
596 | } | ||
597 | |||
598 | unlock: | ||
599 | spin_unlock_irqrestore(&chan->lock, flags); | ||
600 | } | ||
601 | |||
602 | static enum dma_status nbpf_tx_status(struct dma_chan *dchan, | ||
603 | dma_cookie_t cookie, struct dma_tx_state *state) | ||
604 | { | ||
605 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
606 | enum dma_status status = dma_cookie_status(dchan, cookie, state); | ||
607 | |||
608 | if (state) { | ||
609 | dma_cookie_t running; | ||
610 | unsigned long flags; | ||
611 | |||
612 | spin_lock_irqsave(&chan->lock, flags); | ||
613 | running = chan->running ? chan->running->async_tx.cookie : -EINVAL; | ||
614 | |||
615 | if (cookie == running) { | ||
616 | state->residue = nbpf_bytes_left(chan); | ||
617 | dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, | ||
618 | state->residue); | ||
619 | } else if (status == DMA_IN_PROGRESS) { | ||
620 | struct nbpf_desc *desc; | ||
621 | bool found = false; | ||
622 | |||
623 | list_for_each_entry(desc, &chan->active, node) | ||
624 | if (desc->async_tx.cookie == cookie) { | ||
625 | found = true; | ||
626 | break; | ||
627 | } | ||
628 | |||
629 | if (!found) | ||
630 | list_for_each_entry(desc, &chan->queued, node) | ||
631 | if (desc->async_tx.cookie == cookie) { | ||
632 | found = true; | ||
633 | break; | ||
634 | |||
635 | } | ||
636 | |||
637 | state->residue = found ? desc->length : 0; | ||
638 | } | ||
639 | |||
640 | spin_unlock_irqrestore(&chan->lock, flags); | ||
641 | } | ||
642 | |||
643 | if (chan->paused) | ||
644 | status = DMA_PAUSED; | ||
645 | |||
646 | return status; | ||
647 | } | ||
648 | |||
649 | static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) | ||
650 | { | ||
651 | struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); | ||
652 | struct nbpf_channel *chan = desc->chan; | ||
653 | unsigned long flags; | ||
654 | dma_cookie_t cookie; | ||
655 | |||
656 | spin_lock_irqsave(&chan->lock, flags); | ||
657 | cookie = dma_cookie_assign(tx); | ||
658 | list_add_tail(&desc->node, &chan->queued); | ||
659 | spin_unlock_irqrestore(&chan->lock, flags); | ||
660 | |||
661 | dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); | ||
662 | |||
663 | return cookie; | ||
664 | } | ||
665 | |||
666 | static int nbpf_desc_page_alloc(struct nbpf_channel *chan) | ||
667 | { | ||
668 | struct dma_chan *dchan = &chan->dma_chan; | ||
669 | struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
670 | struct nbpf_link_desc *ldesc; | ||
671 | struct nbpf_link_reg *hwdesc; | ||
672 | struct nbpf_desc *desc; | ||
673 | LIST_HEAD(head); | ||
674 | LIST_HEAD(lhead); | ||
675 | int i; | ||
676 | struct device *dev = dchan->device->dev; | ||
677 | |||
678 | if (!dpage) | ||
679 | return -ENOMEM; | ||
680 | |||
681 | dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n", | ||
682 | __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); | ||
683 | |||
684 | for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; | ||
685 | i < ARRAY_SIZE(dpage->ldesc); | ||
686 | i++, ldesc++, hwdesc++) { | ||
687 | ldesc->hwdesc = hwdesc; | ||
688 | list_add_tail(&ldesc->node, &lhead); | ||
689 | ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, | ||
690 | hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); | ||
691 | |||
692 | dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, | ||
693 | hwdesc, &ldesc->hwdesc_dma_addr); | ||
694 | } | ||
695 | |||
696 | for (i = 0, desc = dpage->desc; | ||
697 | i < ARRAY_SIZE(dpage->desc); | ||
698 | i++, desc++) { | ||
699 | dma_async_tx_descriptor_init(&desc->async_tx, dchan); | ||
700 | desc->async_tx.tx_submit = nbpf_tx_submit; | ||
701 | desc->chan = chan; | ||
702 | INIT_LIST_HEAD(&desc->sg); | ||
703 | list_add_tail(&desc->node, &head); | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * This function cannot be called from interrupt context, so, no need to | ||
708 | * save flags | ||
709 | */ | ||
710 | spin_lock_irq(&chan->lock); | ||
711 | list_splice_tail(&lhead, &chan->free_links); | ||
712 | list_splice_tail(&head, &chan->free); | ||
713 | list_add(&dpage->node, &chan->desc_page); | ||
714 | spin_unlock_irq(&chan->lock); | ||
715 | |||
716 | return ARRAY_SIZE(dpage->desc); | ||
717 | } | ||
718 | |||
719 | static void nbpf_desc_put(struct nbpf_desc *desc) | ||
720 | { | ||
721 | struct nbpf_channel *chan = desc->chan; | ||
722 | struct nbpf_link_desc *ldesc, *tmp; | ||
723 | unsigned long flags; | ||
724 | |||
725 | spin_lock_irqsave(&chan->lock, flags); | ||
726 | list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) | ||
727 | list_move(&ldesc->node, &chan->free_links); | ||
728 | |||
729 | list_add(&desc->node, &chan->free); | ||
730 | spin_unlock_irqrestore(&chan->lock, flags); | ||
731 | } | ||
732 | |||
733 | static void nbpf_scan_acked(struct nbpf_channel *chan) | ||
734 | { | ||
735 | struct nbpf_desc *desc, *tmp; | ||
736 | unsigned long flags; | ||
737 | LIST_HEAD(head); | ||
738 | |||
739 | spin_lock_irqsave(&chan->lock, flags); | ||
740 | list_for_each_entry_safe(desc, tmp, &chan->done, node) | ||
741 | if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { | ||
742 | list_move(&desc->node, &head); | ||
743 | desc->user_wait = false; | ||
744 | } | ||
745 | spin_unlock_irqrestore(&chan->lock, flags); | ||
746 | |||
747 | list_for_each_entry_safe(desc, tmp, &head, node) { | ||
748 | list_del(&desc->node); | ||
749 | nbpf_desc_put(desc); | ||
750 | } | ||
751 | } | ||
752 | |||
753 | /* | ||
754 | * We have to allocate descriptors with the channel lock dropped. This means, | ||
755 | * before we re-acquire the lock buffers can be taken already, so we have to | ||
756 | * re-check after re-acquiring the lock and possibly retry, if buffers are gone | ||
757 | * again. | ||
758 | */ | ||
759 | static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) | ||
760 | { | ||
761 | struct nbpf_desc *desc = NULL; | ||
762 | struct nbpf_link_desc *ldesc, *prev = NULL; | ||
763 | |||
764 | nbpf_scan_acked(chan); | ||
765 | |||
766 | spin_lock_irq(&chan->lock); | ||
767 | |||
768 | do { | ||
769 | int i = 0, ret; | ||
770 | |||
771 | if (list_empty(&chan->free)) { | ||
772 | /* No more free descriptors */ | ||
773 | spin_unlock_irq(&chan->lock); | ||
774 | ret = nbpf_desc_page_alloc(chan); | ||
775 | if (ret < 0) | ||
776 | return NULL; | ||
777 | spin_lock_irq(&chan->lock); | ||
778 | continue; | ||
779 | } | ||
780 | desc = list_first_entry(&chan->free, struct nbpf_desc, node); | ||
781 | list_del(&desc->node); | ||
782 | |||
783 | do { | ||
784 | if (list_empty(&chan->free_links)) { | ||
785 | /* No more free link descriptors */ | ||
786 | spin_unlock_irq(&chan->lock); | ||
787 | ret = nbpf_desc_page_alloc(chan); | ||
788 | if (ret < 0) { | ||
789 | nbpf_desc_put(desc); | ||
790 | return NULL; | ||
791 | } | ||
792 | spin_lock_irq(&chan->lock); | ||
793 | continue; | ||
794 | } | ||
795 | |||
796 | ldesc = list_first_entry(&chan->free_links, | ||
797 | struct nbpf_link_desc, node); | ||
798 | ldesc->desc = desc; | ||
799 | if (prev) | ||
800 | prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; | ||
801 | |||
802 | prev = ldesc; | ||
803 | list_move_tail(&ldesc->node, &desc->sg); | ||
804 | |||
805 | i++; | ||
806 | } while (i < len); | ||
807 | } while (!desc); | ||
808 | |||
809 | prev->hwdesc->next = 0; | ||
810 | |||
811 | spin_unlock_irq(&chan->lock); | ||
812 | |||
813 | return desc; | ||
814 | } | ||
815 | |||
816 | static void nbpf_chan_idle(struct nbpf_channel *chan) | ||
817 | { | ||
818 | struct nbpf_desc *desc, *tmp; | ||
819 | unsigned long flags; | ||
820 | LIST_HEAD(head); | ||
821 | |||
822 | spin_lock_irqsave(&chan->lock, flags); | ||
823 | |||
824 | list_splice_init(&chan->done, &head); | ||
825 | list_splice_init(&chan->active, &head); | ||
826 | list_splice_init(&chan->queued, &head); | ||
827 | |||
828 | chan->running = NULL; | ||
829 | |||
830 | spin_unlock_irqrestore(&chan->lock, flags); | ||
831 | |||
832 | list_for_each_entry_safe(desc, tmp, &head, node) { | ||
833 | dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", | ||
834 | __func__, desc, desc->async_tx.cookie); | ||
835 | list_del(&desc->node); | ||
836 | nbpf_desc_put(desc); | ||
837 | } | ||
838 | } | ||
839 | |||
840 | static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | ||
841 | unsigned long arg) | ||
842 | { | ||
843 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
844 | struct dma_slave_config *config; | ||
845 | |||
846 | dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); | ||
847 | |||
848 | switch (cmd) { | ||
849 | case DMA_TERMINATE_ALL: | ||
850 | dev_dbg(dchan->device->dev, "Terminating\n"); | ||
851 | nbpf_chan_halt(chan); | ||
852 | nbpf_chan_idle(chan); | ||
853 | break; | ||
854 | |||
855 | case DMA_SLAVE_CONFIG: | ||
856 | if (!arg) | ||
857 | return -EINVAL; | ||
858 | config = (struct dma_slave_config *)arg; | ||
859 | |||
860 | /* | ||
861 | * We could check config->slave_id to match chan->terminal here, | ||
862 | * but with DT they would be coming from the same source, so | ||
863 | * such a check would be superflous | ||
864 | */ | ||
865 | |||
866 | chan->slave_dst_addr = config->dst_addr; | ||
867 | chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, | ||
868 | config->dst_addr_width, 1); | ||
869 | chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, | ||
870 | config->dst_addr_width, | ||
871 | config->dst_maxburst); | ||
872 | chan->slave_src_addr = config->src_addr; | ||
873 | chan->slave_src_width = nbpf_xfer_size(chan->nbpf, | ||
874 | config->src_addr_width, 1); | ||
875 | chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, | ||
876 | config->src_addr_width, | ||
877 | config->src_maxburst); | ||
878 | break; | ||
879 | |||
880 | case DMA_PAUSE: | ||
881 | chan->paused = true; | ||
882 | nbpf_pause(chan); | ||
883 | break; | ||
884 | |||
885 | default: | ||
886 | return -ENXIO; | ||
887 | } | ||
888 | |||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, | ||
893 | struct scatterlist *src_sg, struct scatterlist *dst_sg, | ||
894 | size_t len, enum dma_transfer_direction direction, | ||
895 | unsigned long flags) | ||
896 | { | ||
897 | struct nbpf_link_desc *ldesc; | ||
898 | struct scatterlist *mem_sg; | ||
899 | struct nbpf_desc *desc; | ||
900 | bool inc_src, inc_dst; | ||
901 | size_t data_len = 0; | ||
902 | int i = 0; | ||
903 | |||
904 | switch (direction) { | ||
905 | case DMA_DEV_TO_MEM: | ||
906 | mem_sg = dst_sg; | ||
907 | inc_src = false; | ||
908 | inc_dst = true; | ||
909 | break; | ||
910 | |||
911 | case DMA_MEM_TO_DEV: | ||
912 | mem_sg = src_sg; | ||
913 | inc_src = true; | ||
914 | inc_dst = false; | ||
915 | break; | ||
916 | |||
917 | default: | ||
918 | case DMA_MEM_TO_MEM: | ||
919 | mem_sg = src_sg; | ||
920 | inc_src = true; | ||
921 | inc_dst = true; | ||
922 | } | ||
923 | |||
924 | desc = nbpf_desc_get(chan, len); | ||
925 | if (!desc) | ||
926 | return NULL; | ||
927 | |||
928 | desc->async_tx.flags = flags; | ||
929 | desc->async_tx.cookie = -EBUSY; | ||
930 | desc->user_wait = false; | ||
931 | |||
932 | /* | ||
933 | * This is a private descriptor list, and we own the descriptor. No need | ||
934 | * to lock. | ||
935 | */ | ||
936 | list_for_each_entry(ldesc, &desc->sg, node) { | ||
937 | int ret = nbpf_prep_one(ldesc, direction, | ||
938 | sg_dma_address(src_sg), | ||
939 | sg_dma_address(dst_sg), | ||
940 | sg_dma_len(mem_sg), | ||
941 | i == len - 1); | ||
942 | if (ret < 0) { | ||
943 | nbpf_desc_put(desc); | ||
944 | return NULL; | ||
945 | } | ||
946 | data_len += sg_dma_len(mem_sg); | ||
947 | if (inc_src) | ||
948 | src_sg = sg_next(src_sg); | ||
949 | if (inc_dst) | ||
950 | dst_sg = sg_next(dst_sg); | ||
951 | mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; | ||
952 | i++; | ||
953 | } | ||
954 | |||
955 | desc->length = data_len; | ||
956 | |||
957 | /* The user has to return the descriptor to us ASAP via .tx_submit() */ | ||
958 | return &desc->async_tx; | ||
959 | } | ||
960 | |||
961 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy( | ||
962 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, | ||
963 | size_t len, unsigned long flags) | ||
964 | { | ||
965 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
966 | struct scatterlist dst_sg; | ||
967 | struct scatterlist src_sg; | ||
968 | |||
969 | sg_init_table(&dst_sg, 1); | ||
970 | sg_init_table(&src_sg, 1); | ||
971 | |||
972 | sg_dma_address(&dst_sg) = dst; | ||
973 | sg_dma_address(&src_sg) = src; | ||
974 | |||
975 | sg_dma_len(&dst_sg) = len; | ||
976 | sg_dma_len(&src_sg) = len; | ||
977 | |||
978 | dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", | ||
979 | __func__, len, &src, &dst); | ||
980 | |||
981 | return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, | ||
982 | DMA_MEM_TO_MEM, flags); | ||
983 | } | ||
984 | |||
985 | static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( | ||
986 | struct dma_chan *dchan, | ||
987 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
988 | struct scatterlist *src_sg, unsigned int src_nents, | ||
989 | unsigned long flags) | ||
990 | { | ||
991 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
992 | |||
993 | if (dst_nents != src_nents) | ||
994 | return NULL; | ||
995 | |||
996 | return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, | ||
997 | DMA_MEM_TO_MEM, flags); | ||
998 | } | ||
999 | |||
1000 | static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( | ||
1001 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
1002 | enum dma_transfer_direction direction, unsigned long flags, void *context) | ||
1003 | { | ||
1004 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
1005 | struct scatterlist slave_sg; | ||
1006 | |||
1007 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); | ||
1008 | |||
1009 | sg_init_table(&slave_sg, 1); | ||
1010 | |||
1011 | switch (direction) { | ||
1012 | case DMA_MEM_TO_DEV: | ||
1013 | sg_dma_address(&slave_sg) = chan->slave_dst_addr; | ||
1014 | return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, | ||
1015 | direction, flags); | ||
1016 | |||
1017 | case DMA_DEV_TO_MEM: | ||
1018 | sg_dma_address(&slave_sg) = chan->slave_src_addr; | ||
1019 | return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, | ||
1020 | direction, flags); | ||
1021 | |||
1022 | default: | ||
1023 | return NULL; | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | static int nbpf_alloc_chan_resources(struct dma_chan *dchan) | ||
1028 | { | ||
1029 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
1030 | int ret; | ||
1031 | |||
1032 | INIT_LIST_HEAD(&chan->free); | ||
1033 | INIT_LIST_HEAD(&chan->free_links); | ||
1034 | INIT_LIST_HEAD(&chan->queued); | ||
1035 | INIT_LIST_HEAD(&chan->active); | ||
1036 | INIT_LIST_HEAD(&chan->done); | ||
1037 | |||
1038 | ret = nbpf_desc_page_alloc(chan); | ||
1039 | if (ret < 0) | ||
1040 | return ret; | ||
1041 | |||
1042 | dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, | ||
1043 | chan->terminal); | ||
1044 | |||
1045 | nbpf_chan_configure(chan); | ||
1046 | |||
1047 | return ret; | ||
1048 | } | ||
1049 | |||
1050 | static void nbpf_free_chan_resources(struct dma_chan *dchan) | ||
1051 | { | ||
1052 | struct nbpf_channel *chan = nbpf_to_chan(dchan); | ||
1053 | struct nbpf_desc_page *dpage, *tmp; | ||
1054 | |||
1055 | dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); | ||
1056 | |||
1057 | nbpf_chan_halt(chan); | ||
1058 | nbpf_chan_idle(chan); | ||
1059 | /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ | ||
1060 | nbpf_chan_prepare_default(chan); | ||
1061 | |||
1062 | list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { | ||
1063 | struct nbpf_link_desc *ldesc; | ||
1064 | int i; | ||
1065 | list_del(&dpage->node); | ||
1066 | for (i = 0, ldesc = dpage->ldesc; | ||
1067 | i < ARRAY_SIZE(dpage->ldesc); | ||
1068 | i++, ldesc++) | ||
1069 | dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, | ||
1070 | sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); | ||
1071 | free_page((unsigned long)dpage); | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | static int nbpf_slave_caps(struct dma_chan *dchan, | ||
1076 | struct dma_slave_caps *caps) | ||
1077 | { | ||
1078 | caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; | ||
1079 | caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS; | ||
1080 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
1081 | caps->cmd_pause = false; | ||
1082 | caps->cmd_terminate = true; | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, | ||
1088 | struct of_dma *ofdma) | ||
1089 | { | ||
1090 | struct nbpf_device *nbpf = ofdma->of_dma_data; | ||
1091 | struct dma_chan *dchan; | ||
1092 | struct nbpf_channel *chan; | ||
1093 | |||
1094 | if (dma_spec->args_count != 2) | ||
1095 | return NULL; | ||
1096 | |||
1097 | dchan = dma_get_any_slave_channel(&nbpf->dma_dev); | ||
1098 | if (!dchan) | ||
1099 | return NULL; | ||
1100 | |||
1101 | dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, | ||
1102 | dma_spec->np->name); | ||
1103 | |||
1104 | chan = nbpf_to_chan(dchan); | ||
1105 | |||
1106 | chan->terminal = dma_spec->args[0]; | ||
1107 | chan->flags = dma_spec->args[1]; | ||
1108 | |||
1109 | nbpf_chan_prepare(chan); | ||
1110 | nbpf_chan_configure(chan); | ||
1111 | |||
1112 | return dchan; | ||
1113 | } | ||
1114 | |||
1115 | static void nbpf_chan_tasklet(unsigned long data) | ||
1116 | { | ||
1117 | struct nbpf_channel *chan = (struct nbpf_channel *)data; | ||
1118 | struct nbpf_desc *desc, *tmp; | ||
1119 | dma_async_tx_callback callback; | ||
1120 | void *param; | ||
1121 | |||
1122 | while (!list_empty(&chan->done)) { | ||
1123 | bool found = false, must_put, recycling = false; | ||
1124 | |||
1125 | spin_lock_irq(&chan->lock); | ||
1126 | |||
1127 | list_for_each_entry_safe(desc, tmp, &chan->done, node) { | ||
1128 | if (!desc->user_wait) { | ||
1129 | /* Newly completed descriptor, have to process */ | ||
1130 | found = true; | ||
1131 | break; | ||
1132 | } else if (async_tx_test_ack(&desc->async_tx)) { | ||
1133 | /* | ||
1134 | * This descriptor was waiting for a user ACK, | ||
1135 | * it can be recycled now. | ||
1136 | */ | ||
1137 | list_del(&desc->node); | ||
1138 | spin_unlock_irq(&chan->lock); | ||
1139 | nbpf_desc_put(desc); | ||
1140 | recycling = true; | ||
1141 | break; | ||
1142 | } | ||
1143 | } | ||
1144 | |||
1145 | if (recycling) | ||
1146 | continue; | ||
1147 | |||
1148 | if (!found) { | ||
1149 | /* This can happen if TERMINATE_ALL has been called */ | ||
1150 | spin_unlock_irq(&chan->lock); | ||
1151 | break; | ||
1152 | } | ||
1153 | |||
1154 | dma_cookie_complete(&desc->async_tx); | ||
1155 | |||
1156 | /* | ||
1157 | * With released lock we cannot dereference desc, maybe it's | ||
1158 | * still on the "done" list | ||
1159 | */ | ||
1160 | if (async_tx_test_ack(&desc->async_tx)) { | ||
1161 | list_del(&desc->node); | ||
1162 | must_put = true; | ||
1163 | } else { | ||
1164 | desc->user_wait = true; | ||
1165 | must_put = false; | ||
1166 | } | ||
1167 | |||
1168 | callback = desc->async_tx.callback; | ||
1169 | param = desc->async_tx.callback_param; | ||
1170 | |||
1171 | /* ack and callback completed descriptor */ | ||
1172 | spin_unlock_irq(&chan->lock); | ||
1173 | |||
1174 | if (callback) | ||
1175 | callback(param); | ||
1176 | |||
1177 | if (must_put) | ||
1178 | nbpf_desc_put(desc); | ||
1179 | } | ||
1180 | } | ||
1181 | |||
1182 | static irqreturn_t nbpf_chan_irq(int irq, void *dev) | ||
1183 | { | ||
1184 | struct nbpf_channel *chan = dev; | ||
1185 | bool done = nbpf_status_get(chan); | ||
1186 | struct nbpf_desc *desc; | ||
1187 | irqreturn_t ret; | ||
1188 | bool bh = false; | ||
1189 | |||
1190 | if (!done) | ||
1191 | return IRQ_NONE; | ||
1192 | |||
1193 | nbpf_status_ack(chan); | ||
1194 | |||
1195 | dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); | ||
1196 | |||
1197 | spin_lock(&chan->lock); | ||
1198 | desc = chan->running; | ||
1199 | if (WARN_ON(!desc)) { | ||
1200 | ret = IRQ_NONE; | ||
1201 | goto unlock; | ||
1202 | } else { | ||
1203 | ret = IRQ_HANDLED; | ||
1204 | bh = true; | ||
1205 | } | ||
1206 | |||
1207 | list_move_tail(&desc->node, &chan->done); | ||
1208 | chan->running = NULL; | ||
1209 | |||
1210 | if (!list_empty(&chan->active)) { | ||
1211 | desc = list_first_entry(&chan->active, | ||
1212 | struct nbpf_desc, node); | ||
1213 | if (!nbpf_start(desc)) | ||
1214 | chan->running = desc; | ||
1215 | } | ||
1216 | |||
1217 | unlock: | ||
1218 | spin_unlock(&chan->lock); | ||
1219 | |||
1220 | if (bh) | ||
1221 | tasklet_schedule(&chan->tasklet); | ||
1222 | |||
1223 | return ret; | ||
1224 | } | ||
1225 | |||
1226 | static irqreturn_t nbpf_err_irq(int irq, void *dev) | ||
1227 | { | ||
1228 | struct nbpf_device *nbpf = dev; | ||
1229 | u32 error = nbpf_error_get(nbpf); | ||
1230 | |||
1231 | dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); | ||
1232 | |||
1233 | if (!error) | ||
1234 | return IRQ_NONE; | ||
1235 | |||
1236 | do { | ||
1237 | struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); | ||
1238 | /* On error: abort all queued transfers, no callback */ | ||
1239 | nbpf_error_clear(chan); | ||
1240 | nbpf_chan_idle(chan); | ||
1241 | error = nbpf_error_get(nbpf); | ||
1242 | } while (error); | ||
1243 | |||
1244 | return IRQ_HANDLED; | ||
1245 | } | ||
1246 | |||
1247 | static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) | ||
1248 | { | ||
1249 | struct dma_device *dma_dev = &nbpf->dma_dev; | ||
1250 | struct nbpf_channel *chan = nbpf->chan + n; | ||
1251 | int ret; | ||
1252 | |||
1253 | chan->nbpf = nbpf; | ||
1254 | chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; | ||
1255 | INIT_LIST_HEAD(&chan->desc_page); | ||
1256 | spin_lock_init(&chan->lock); | ||
1257 | chan->dma_chan.device = dma_dev; | ||
1258 | dma_cookie_init(&chan->dma_chan); | ||
1259 | nbpf_chan_prepare_default(chan); | ||
1260 | |||
1261 | dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); | ||
1262 | |||
1263 | snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); | ||
1264 | |||
1265 | tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan); | ||
1266 | ret = devm_request_irq(dma_dev->dev, chan->irq, | ||
1267 | nbpf_chan_irq, IRQF_SHARED, | ||
1268 | chan->name, chan); | ||
1269 | if (ret < 0) | ||
1270 | return ret; | ||
1271 | |||
1272 | /* Add the channel to DMA device channel list */ | ||
1273 | list_add_tail(&chan->dma_chan.device_node, | ||
1274 | &dma_dev->channels); | ||
1275 | |||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | static const struct of_device_id nbpf_match[] = { | ||
1280 | {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]}, | ||
1281 | {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]}, | ||
1282 | {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]}, | ||
1283 | {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]}, | ||
1284 | {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]}, | ||
1285 | {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]}, | ||
1286 | {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]}, | ||
1287 | {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]}, | ||
1288 | {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]}, | ||
1289 | {} | ||
1290 | }; | ||
1291 | MODULE_DEVICE_TABLE(of, nbpf_match); | ||
1292 | |||
1293 | static int nbpf_probe(struct platform_device *pdev) | ||
1294 | { | ||
1295 | struct device *dev = &pdev->dev; | ||
1296 | const struct of_device_id *of_id = of_match_device(nbpf_match, dev); | ||
1297 | struct device_node *np = dev->of_node; | ||
1298 | struct nbpf_device *nbpf; | ||
1299 | struct dma_device *dma_dev; | ||
1300 | struct resource *iomem, *irq_res; | ||
1301 | const struct nbpf_config *cfg; | ||
1302 | int num_channels; | ||
1303 | int ret, irq, eirq, i; | ||
1304 | int irqbuf[9] /* maximum 8 channels + error IRQ */; | ||
1305 | unsigned int irqs = 0; | ||
1306 | |||
1307 | BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); | ||
1308 | |||
1309 | /* DT only */ | ||
1310 | if (!np || !of_id || !of_id->data) | ||
1311 | return -ENODEV; | ||
1312 | |||
1313 | cfg = of_id->data; | ||
1314 | num_channels = cfg->num_channels; | ||
1315 | |||
1316 | nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * | ||
1317 | sizeof(nbpf->chan[0]), GFP_KERNEL); | ||
1318 | if (!nbpf) { | ||
1319 | dev_err(dev, "Memory allocation failed\n"); | ||
1320 | return -ENOMEM; | ||
1321 | } | ||
1322 | dma_dev = &nbpf->dma_dev; | ||
1323 | dma_dev->dev = dev; | ||
1324 | |||
1325 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1326 | nbpf->base = devm_ioremap_resource(dev, iomem); | ||
1327 | if (IS_ERR(nbpf->base)) | ||
1328 | return PTR_ERR(nbpf->base); | ||
1329 | |||
1330 | nbpf->clk = devm_clk_get(dev, NULL); | ||
1331 | if (IS_ERR(nbpf->clk)) | ||
1332 | return PTR_ERR(nbpf->clk); | ||
1333 | |||
1334 | nbpf->config = cfg; | ||
1335 | |||
1336 | for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { | ||
1337 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); | ||
1338 | if (!irq_res) | ||
1339 | break; | ||
1340 | |||
1341 | for (irq = irq_res->start; irq <= irq_res->end; | ||
1342 | irq++, irqs++) | ||
1343 | irqbuf[irqs] = irq; | ||
1344 | } | ||
1345 | |||
1346 | /* | ||
1347 | * 3 IRQ resource schemes are supported: | ||
1348 | * 1. 1 shared IRQ for error and all channels | ||
1349 | * 2. 2 IRQs: one for error and one shared for all channels | ||
1350 | * 3. 1 IRQ for error and an own IRQ for each channel | ||
1351 | */ | ||
1352 | if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) | ||
1353 | return -ENXIO; | ||
1354 | |||
1355 | if (irqs == 1) { | ||
1356 | eirq = irqbuf[0]; | ||
1357 | |||
1358 | for (i = 0; i <= num_channels; i++) | ||
1359 | nbpf->chan[i].irq = irqbuf[0]; | ||
1360 | } else { | ||
1361 | eirq = platform_get_irq_byname(pdev, "error"); | ||
1362 | if (eirq < 0) | ||
1363 | return eirq; | ||
1364 | |||
1365 | if (irqs == num_channels + 1) { | ||
1366 | struct nbpf_channel *chan; | ||
1367 | |||
1368 | for (i = 0, chan = nbpf->chan; i <= num_channels; | ||
1369 | i++, chan++) { | ||
1370 | /* Skip the error IRQ */ | ||
1371 | if (irqbuf[i] == eirq) | ||
1372 | i++; | ||
1373 | chan->irq = irqbuf[i]; | ||
1374 | } | ||
1375 | |||
1376 | if (chan != nbpf->chan + num_channels) | ||
1377 | return -EINVAL; | ||
1378 | } else { | ||
1379 | /* 2 IRQs and more than one channel */ | ||
1380 | if (irqbuf[0] == eirq) | ||
1381 | irq = irqbuf[1]; | ||
1382 | else | ||
1383 | irq = irqbuf[0]; | ||
1384 | |||
1385 | for (i = 0; i <= num_channels; i++) | ||
1386 | nbpf->chan[i].irq = irq; | ||
1387 | } | ||
1388 | } | ||
1389 | |||
1390 | ret = devm_request_irq(dev, eirq, nbpf_err_irq, | ||
1391 | IRQF_SHARED, "dma error", nbpf); | ||
1392 | if (ret < 0) | ||
1393 | return ret; | ||
1394 | |||
1395 | INIT_LIST_HEAD(&dma_dev->channels); | ||
1396 | |||
1397 | /* Create DMA Channel */ | ||
1398 | for (i = 0; i < num_channels; i++) { | ||
1399 | ret = nbpf_chan_probe(nbpf, i); | ||
1400 | if (ret < 0) | ||
1401 | return ret; | ||
1402 | } | ||
1403 | |||
1404 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
1405 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
1406 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | ||
1407 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | ||
1408 | |||
1409 | /* Common and MEMCPY operations */ | ||
1410 | dma_dev->device_alloc_chan_resources | ||
1411 | = nbpf_alloc_chan_resources; | ||
1412 | dma_dev->device_free_chan_resources = nbpf_free_chan_resources; | ||
1413 | dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; | ||
1414 | dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; | ||
1415 | dma_dev->device_tx_status = nbpf_tx_status; | ||
1416 | dma_dev->device_issue_pending = nbpf_issue_pending; | ||
1417 | dma_dev->device_slave_caps = nbpf_slave_caps; | ||
1418 | |||
1419 | /* | ||
1420 | * If we drop support for unaligned MEMCPY buffer addresses and / or | ||
1421 | * lengths by setting | ||
1422 | * dma_dev->copy_align = 4; | ||
1423 | * then we can set transfer length to 4 bytes in nbpf_prep_one() for | ||
1424 | * DMA_MEM_TO_MEM | ||
1425 | */ | ||
1426 | |||
1427 | /* Compulsory for DMA_SLAVE fields */ | ||
1428 | dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; | ||
1429 | dma_dev->device_control = nbpf_control; | ||
1430 | |||
1431 | platform_set_drvdata(pdev, nbpf); | ||
1432 | |||
1433 | ret = clk_prepare_enable(nbpf->clk); | ||
1434 | if (ret < 0) | ||
1435 | return ret; | ||
1436 | |||
1437 | nbpf_configure(nbpf); | ||
1438 | |||
1439 | ret = dma_async_device_register(dma_dev); | ||
1440 | if (ret < 0) | ||
1441 | goto e_clk_off; | ||
1442 | |||
1443 | ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf); | ||
1444 | if (ret < 0) | ||
1445 | goto e_dma_dev_unreg; | ||
1446 | |||
1447 | return 0; | ||
1448 | |||
1449 | e_dma_dev_unreg: | ||
1450 | dma_async_device_unregister(dma_dev); | ||
1451 | e_clk_off: | ||
1452 | clk_disable_unprepare(nbpf->clk); | ||
1453 | |||
1454 | return ret; | ||
1455 | } | ||
1456 | |||
1457 | static int nbpf_remove(struct platform_device *pdev) | ||
1458 | { | ||
1459 | struct nbpf_device *nbpf = platform_get_drvdata(pdev); | ||
1460 | |||
1461 | of_dma_controller_free(pdev->dev.of_node); | ||
1462 | dma_async_device_unregister(&nbpf->dma_dev); | ||
1463 | clk_disable_unprepare(nbpf->clk); | ||
1464 | |||
1465 | return 0; | ||
1466 | } | ||
1467 | |||
1468 | static struct platform_device_id nbpf_ids[] = { | ||
1469 | {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, | ||
1470 | {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, | ||
1471 | {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, | ||
1472 | {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, | ||
1473 | {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, | ||
1474 | {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, | ||
1475 | {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, | ||
1476 | {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, | ||
1477 | {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, | ||
1478 | {}, | ||
1479 | }; | ||
1480 | MODULE_DEVICE_TABLE(platform, nbpf_ids); | ||
1481 | |||
1482 | #ifdef CONFIG_PM_RUNTIME | ||
1483 | static int nbpf_runtime_suspend(struct device *dev) | ||
1484 | { | ||
1485 | struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); | ||
1486 | clk_disable_unprepare(nbpf->clk); | ||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | static int nbpf_runtime_resume(struct device *dev) | ||
1491 | { | ||
1492 | struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); | ||
1493 | return clk_prepare_enable(nbpf->clk); | ||
1494 | } | ||
1495 | #endif | ||
1496 | |||
1497 | static const struct dev_pm_ops nbpf_pm_ops = { | ||
1498 | SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) | ||
1499 | }; | ||
1500 | |||
1501 | static struct platform_driver nbpf_driver = { | ||
1502 | .driver = { | ||
1503 | .owner = THIS_MODULE, | ||
1504 | .name = "dma-nbpf", | ||
1505 | .of_match_table = nbpf_match, | ||
1506 | .pm = &nbpf_pm_ops, | ||
1507 | }, | ||
1508 | .id_table = nbpf_ids, | ||
1509 | .probe = nbpf_probe, | ||
1510 | .remove = nbpf_remove, | ||
1511 | }; | ||
1512 | |||
1513 | module_platform_driver(nbpf_driver); | ||
1514 | |||
1515 | MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); | ||
1516 | MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs"); | ||
1517 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index e8fe9dc455f4..d5fbeaa1e7ba 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -218,3 +218,38 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |||
218 | &dma_spec->args[0]); | 218 | &dma_spec->args[0]); |
219 | } | 219 | } |
220 | EXPORT_SYMBOL_GPL(of_dma_simple_xlate); | 220 | EXPORT_SYMBOL_GPL(of_dma_simple_xlate); |
221 | |||
222 | /** | ||
223 | * of_dma_xlate_by_chan_id - Translate dt property to DMA channel by channel id | ||
224 | * @dma_spec: pointer to DMA specifier as found in the device tree | ||
225 | * @of_dma: pointer to DMA controller data | ||
226 | * | ||
227 | * This function can be used as the of xlate callback for DMA driver which wants | ||
228 | * to match the channel based on the channel id. When using this xlate function | ||
229 | * the #dma-cells propety of the DMA controller dt node needs to be set to 1. | ||
230 | * The data parameter of of_dma_controller_register must be a pointer to the | ||
231 | * dma_device struct the function should match upon. | ||
232 | * | ||
233 | * Returns pointer to appropriate dma channel on success or NULL on error. | ||
234 | */ | ||
235 | struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, | ||
236 | struct of_dma *ofdma) | ||
237 | { | ||
238 | struct dma_device *dev = ofdma->of_dma_data; | ||
239 | struct dma_chan *chan, *candidate = NULL; | ||
240 | |||
241 | if (!dev || dma_spec->args_count != 1) | ||
242 | return NULL; | ||
243 | |||
244 | list_for_each_entry(chan, &dev->channels, device_node) | ||
245 | if (chan->chan_id == dma_spec->args[0]) { | ||
246 | candidate = chan; | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | if (!candidate) | ||
251 | return NULL; | ||
252 | |||
253 | return dma_get_slave_channel(candidate); | ||
254 | } | ||
255 | EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id); | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index b19f04f4390b..4cf7d9a950d7 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -853,8 +853,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | |||
853 | 853 | ||
854 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | 854 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( |
855 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 855 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
856 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, | 856 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags) |
857 | void *context) | ||
858 | { | 857 | { |
859 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | 858 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); |
860 | struct omap_chan *c = to_omap_dma_chan(chan); | 859 | struct omap_chan *c = to_omap_dma_chan(chan); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 73fa9b7a10ab..d5149aacd2fe 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -33,26 +33,15 @@ | |||
33 | #define PL330_MAX_IRQS 32 | 33 | #define PL330_MAX_IRQS 32 |
34 | #define PL330_MAX_PERI 32 | 34 | #define PL330_MAX_PERI 32 |
35 | 35 | ||
36 | enum pl330_srccachectrl { | 36 | enum pl330_cachectrl { |
37 | SCCTRL0, /* Noncacheable and nonbufferable */ | 37 | CCTRL0, /* Noncacheable and nonbufferable */ |
38 | SCCTRL1, /* Bufferable only */ | 38 | CCTRL1, /* Bufferable only */ |
39 | SCCTRL2, /* Cacheable, but do not allocate */ | 39 | CCTRL2, /* Cacheable, but do not allocate */ |
40 | SCCTRL3, /* Cacheable and bufferable, but do not allocate */ | 40 | CCTRL3, /* Cacheable and bufferable, but do not allocate */ |
41 | SINVALID1, | 41 | INVALID1, /* AWCACHE = 0x1000 */ |
42 | SINVALID2, | 42 | INVALID2, |
43 | SCCTRL6, /* Cacheable write-through, allocate on reads only */ | 43 | CCTRL6, /* Cacheable write-through, allocate on writes only */ |
44 | SCCTRL7, /* Cacheable write-back, allocate on reads only */ | 44 | CCTRL7, /* Cacheable write-back, allocate on writes only */ |
45 | }; | ||
46 | |||
47 | enum pl330_dstcachectrl { | ||
48 | DCCTRL0, /* Noncacheable and nonbufferable */ | ||
49 | DCCTRL1, /* Bufferable only */ | ||
50 | DCCTRL2, /* Cacheable, but do not allocate */ | ||
51 | DCCTRL3, /* Cacheable and bufferable, but do not allocate */ | ||
52 | DINVALID1, /* AWCACHE = 0x1000 */ | ||
53 | DINVALID2, | ||
54 | DCCTRL6, /* Cacheable write-through, allocate on writes only */ | ||
55 | DCCTRL7, /* Cacheable write-back, allocate on writes only */ | ||
56 | }; | 45 | }; |
57 | 46 | ||
58 | enum pl330_byteswap { | 47 | enum pl330_byteswap { |
@@ -63,13 +52,6 @@ enum pl330_byteswap { | |||
63 | SWAP_16, | 52 | SWAP_16, |
64 | }; | 53 | }; |
65 | 54 | ||
66 | enum pl330_reqtype { | ||
67 | MEMTOMEM, | ||
68 | MEMTODEV, | ||
69 | DEVTOMEM, | ||
70 | DEVTODEV, | ||
71 | }; | ||
72 | |||
73 | /* Register and Bit field Definitions */ | 55 | /* Register and Bit field Definitions */ |
74 | #define DS 0x0 | 56 | #define DS 0x0 |
75 | #define DS_ST_STOP 0x0 | 57 | #define DS_ST_STOP 0x0 |
@@ -263,9 +245,6 @@ enum pl330_reqtype { | |||
263 | */ | 245 | */ |
264 | #define MCODE_BUFF_PER_REQ 256 | 246 | #define MCODE_BUFF_PER_REQ 256 |
265 | 247 | ||
266 | /* If the _pl330_req is available to the client */ | ||
267 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | ||
268 | |||
269 | /* Use this _only_ to wait on transient states */ | 248 | /* Use this _only_ to wait on transient states */ |
270 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | 249 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); |
271 | 250 | ||
@@ -300,27 +279,6 @@ struct pl330_config { | |||
300 | u32 irq_ns; | 279 | u32 irq_ns; |
301 | }; | 280 | }; |
302 | 281 | ||
303 | /* Handle to the DMAC provided to the PL330 core */ | ||
304 | struct pl330_info { | ||
305 | /* Owning device */ | ||
306 | struct device *dev; | ||
307 | /* Size of MicroCode buffers for each channel. */ | ||
308 | unsigned mcbufsz; | ||
309 | /* ioremap'ed address of PL330 registers. */ | ||
310 | void __iomem *base; | ||
311 | /* Client can freely use it. */ | ||
312 | void *client_data; | ||
313 | /* PL330 core data, Client must not touch it. */ | ||
314 | void *pl330_data; | ||
315 | /* Populated by the PL330 core driver during pl330_add */ | ||
316 | struct pl330_config pcfg; | ||
317 | /* | ||
318 | * If the DMAC has some reset mechanism, then the | ||
319 | * client may want to provide pointer to the method. | ||
320 | */ | ||
321 | void (*dmac_reset)(struct pl330_info *pi); | ||
322 | }; | ||
323 | |||
324 | /** | 282 | /** |
325 | * Request Configuration. | 283 | * Request Configuration. |
326 | * The PL330 core does not modify this and uses the last | 284 | * The PL330 core does not modify this and uses the last |
@@ -344,8 +302,8 @@ struct pl330_reqcfg { | |||
344 | unsigned brst_len:5; | 302 | unsigned brst_len:5; |
345 | unsigned brst_size:3; /* in power of 2 */ | 303 | unsigned brst_size:3; /* in power of 2 */ |
346 | 304 | ||
347 | enum pl330_dstcachectrl dcctl; | 305 | enum pl330_cachectrl dcctl; |
348 | enum pl330_srccachectrl scctl; | 306 | enum pl330_cachectrl scctl; |
349 | enum pl330_byteswap swap; | 307 | enum pl330_byteswap swap; |
350 | struct pl330_config *pcfg; | 308 | struct pl330_config *pcfg; |
351 | }; | 309 | }; |
@@ -359,11 +317,6 @@ struct pl330_xfer { | |||
359 | u32 dst_addr; | 317 | u32 dst_addr; |
360 | /* Size to xfer */ | 318 | /* Size to xfer */ |
361 | u32 bytes; | 319 | u32 bytes; |
362 | /* | ||
363 | * Pointer to next xfer in the list. | ||
364 | * The last xfer in the req must point to NULL. | ||
365 | */ | ||
366 | struct pl330_xfer *next; | ||
367 | }; | 320 | }; |
368 | 321 | ||
369 | /* The xfer callbacks are made with one of these arguments. */ | 322 | /* The xfer callbacks are made with one of these arguments. */ |
@@ -376,67 +329,6 @@ enum pl330_op_err { | |||
376 | PL330_ERR_FAIL, | 329 | PL330_ERR_FAIL, |
377 | }; | 330 | }; |
378 | 331 | ||
379 | /* A request defining Scatter-Gather List ending with NULL xfer. */ | ||
380 | struct pl330_req { | ||
381 | enum pl330_reqtype rqtype; | ||
382 | /* Index of peripheral for the xfer. */ | ||
383 | unsigned peri:5; | ||
384 | /* Unique token for this xfer, set by the client. */ | ||
385 | void *token; | ||
386 | /* Callback to be called after xfer. */ | ||
387 | void (*xfer_cb)(void *token, enum pl330_op_err err); | ||
388 | /* If NULL, req will be done at last set parameters. */ | ||
389 | struct pl330_reqcfg *cfg; | ||
390 | /* Pointer to first xfer in the request. */ | ||
391 | struct pl330_xfer *x; | ||
392 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
393 | struct list_head rqd; | ||
394 | }; | ||
395 | |||
396 | /* | ||
397 | * To know the status of the channel and DMAC, the client | ||
398 | * provides a pointer to this structure. The PL330 core | ||
399 | * fills it with current information. | ||
400 | */ | ||
401 | struct pl330_chanstatus { | ||
402 | /* | ||
403 | * If the DMAC engine halted due to some error, | ||
404 | * the client should remove-add DMAC. | ||
405 | */ | ||
406 | bool dmac_halted; | ||
407 | /* | ||
408 | * If channel is halted due to some error, | ||
409 | * the client should ABORT/FLUSH and START the channel. | ||
410 | */ | ||
411 | bool faulting; | ||
412 | /* Location of last load */ | ||
413 | u32 src_addr; | ||
414 | /* Location of last store */ | ||
415 | u32 dst_addr; | ||
416 | /* | ||
417 | * Pointer to the currently active req, NULL if channel is | ||
418 | * inactive, even though the requests may be present. | ||
419 | */ | ||
420 | struct pl330_req *top_req; | ||
421 | /* Pointer to req waiting second in the queue if any. */ | ||
422 | struct pl330_req *wait_req; | ||
423 | }; | ||
424 | |||
425 | enum pl330_chan_op { | ||
426 | /* Start the channel */ | ||
427 | PL330_OP_START, | ||
428 | /* Abort the active xfer */ | ||
429 | PL330_OP_ABORT, | ||
430 | /* Stop xfer and flush queue */ | ||
431 | PL330_OP_FLUSH, | ||
432 | }; | ||
433 | |||
434 | struct _xfer_spec { | ||
435 | u32 ccr; | ||
436 | struct pl330_req *r; | ||
437 | struct pl330_xfer *x; | ||
438 | }; | ||
439 | |||
440 | enum dmamov_dst { | 332 | enum dmamov_dst { |
441 | SAR = 0, | 333 | SAR = 0, |
442 | CCR, | 334 | CCR, |
@@ -454,12 +346,12 @@ enum pl330_cond { | |||
454 | ALWAYS, | 346 | ALWAYS, |
455 | }; | 347 | }; |
456 | 348 | ||
349 | struct dma_pl330_desc; | ||
350 | |||
457 | struct _pl330_req { | 351 | struct _pl330_req { |
458 | u32 mc_bus; | 352 | u32 mc_bus; |
459 | void *mc_cpu; | 353 | void *mc_cpu; |
460 | /* Number of bytes taken to setup MC for the req */ | 354 | struct dma_pl330_desc *desc; |
461 | u32 mc_len; | ||
462 | struct pl330_req *r; | ||
463 | }; | 355 | }; |
464 | 356 | ||
465 | /* ToBeDone for tasklet */ | 357 | /* ToBeDone for tasklet */ |
@@ -491,30 +383,6 @@ enum pl330_dmac_state { | |||
491 | DYING, | 383 | DYING, |
492 | }; | 384 | }; |
493 | 385 | ||
494 | /* A DMAC */ | ||
495 | struct pl330_dmac { | ||
496 | spinlock_t lock; | ||
497 | /* Holds list of reqs with due callbacks */ | ||
498 | struct list_head req_done; | ||
499 | /* Pointer to platform specific stuff */ | ||
500 | struct pl330_info *pinfo; | ||
501 | /* Maximum possible events/irqs */ | ||
502 | int events[32]; | ||
503 | /* BUS address of MicroCode buffer */ | ||
504 | dma_addr_t mcode_bus; | ||
505 | /* CPU address of MicroCode buffer */ | ||
506 | void *mcode_cpu; | ||
507 | /* List of all Channel threads */ | ||
508 | struct pl330_thread *channels; | ||
509 | /* Pointer to the MANAGER thread */ | ||
510 | struct pl330_thread *manager; | ||
511 | /* To handle bad news in interrupt */ | ||
512 | struct tasklet_struct tasks; | ||
513 | struct _pl330_tbd dmac_tbd; | ||
514 | /* State of DMAC operation */ | ||
515 | enum pl330_dmac_state state; | ||
516 | }; | ||
517 | |||
518 | enum desc_status { | 386 | enum desc_status { |
519 | /* In the DMAC pool */ | 387 | /* In the DMAC pool */ |
520 | FREE, | 388 | FREE, |
@@ -555,15 +423,16 @@ struct dma_pl330_chan { | |||
555 | * As the parent, this DMAC also provides descriptors | 423 | * As the parent, this DMAC also provides descriptors |
556 | * to the channel. | 424 | * to the channel. |
557 | */ | 425 | */ |
558 | struct dma_pl330_dmac *dmac; | 426 | struct pl330_dmac *dmac; |
559 | 427 | ||
560 | /* To protect channel manipulation */ | 428 | /* To protect channel manipulation */ |
561 | spinlock_t lock; | 429 | spinlock_t lock; |
562 | 430 | ||
563 | /* Token of a hardware channel thread of PL330 DMAC | 431 | /* |
564 | * NULL if the channel is available to be acquired. | 432 | * Hardware channel thread of PL330 DMAC. NULL if the channel is |
433 | * available. | ||
565 | */ | 434 | */ |
566 | void *pl330_chid; | 435 | struct pl330_thread *thread; |
567 | 436 | ||
568 | /* For D-to-M and M-to-D channels */ | 437 | /* For D-to-M and M-to-D channels */ |
569 | int burst_sz; /* the peripheral fifo width */ | 438 | int burst_sz; /* the peripheral fifo width */ |
@@ -574,9 +443,7 @@ struct dma_pl330_chan { | |||
574 | bool cyclic; | 443 | bool cyclic; |
575 | }; | 444 | }; |
576 | 445 | ||
577 | struct dma_pl330_dmac { | 446 | struct pl330_dmac { |
578 | struct pl330_info pif; | ||
579 | |||
580 | /* DMA-Engine Device */ | 447 | /* DMA-Engine Device */ |
581 | struct dma_device ddma; | 448 | struct dma_device ddma; |
582 | 449 | ||
@@ -588,6 +455,32 @@ struct dma_pl330_dmac { | |||
588 | /* To protect desc_pool manipulation */ | 455 | /* To protect desc_pool manipulation */ |
589 | spinlock_t pool_lock; | 456 | spinlock_t pool_lock; |
590 | 457 | ||
458 | /* Size of MicroCode buffers for each channel. */ | ||
459 | unsigned mcbufsz; | ||
460 | /* ioremap'ed address of PL330 registers. */ | ||
461 | void __iomem *base; | ||
462 | /* Populated by the PL330 core driver during pl330_add */ | ||
463 | struct pl330_config pcfg; | ||
464 | |||
465 | spinlock_t lock; | ||
466 | /* Maximum possible events/irqs */ | ||
467 | int events[32]; | ||
468 | /* BUS address of MicroCode buffer */ | ||
469 | dma_addr_t mcode_bus; | ||
470 | /* CPU address of MicroCode buffer */ | ||
471 | void *mcode_cpu; | ||
472 | /* List of all Channel threads */ | ||
473 | struct pl330_thread *channels; | ||
474 | /* Pointer to the MANAGER thread */ | ||
475 | struct pl330_thread *manager; | ||
476 | /* To handle bad news in interrupt */ | ||
477 | struct tasklet_struct tasks; | ||
478 | struct _pl330_tbd dmac_tbd; | ||
479 | /* State of DMAC operation */ | ||
480 | enum pl330_dmac_state state; | ||
481 | /* Holds list of reqs with due callbacks */ | ||
482 | struct list_head req_done; | ||
483 | |||
591 | /* Peripheral channels connected to this DMAC */ | 484 | /* Peripheral channels connected to this DMAC */ |
592 | unsigned int num_peripherals; | 485 | unsigned int num_peripherals; |
593 | struct dma_pl330_chan *peripherals; /* keep at end */ | 486 | struct dma_pl330_chan *peripherals; /* keep at end */ |
@@ -604,49 +497,43 @@ struct dma_pl330_desc { | |||
604 | struct pl330_xfer px; | 497 | struct pl330_xfer px; |
605 | 498 | ||
606 | struct pl330_reqcfg rqcfg; | 499 | struct pl330_reqcfg rqcfg; |
607 | struct pl330_req req; | ||
608 | 500 | ||
609 | enum desc_status status; | 501 | enum desc_status status; |
610 | 502 | ||
611 | /* The channel which currently holds this desc */ | 503 | /* The channel which currently holds this desc */ |
612 | struct dma_pl330_chan *pchan; | 504 | struct dma_pl330_chan *pchan; |
505 | |||
506 | enum dma_transfer_direction rqtype; | ||
507 | /* Index of peripheral for the xfer. */ | ||
508 | unsigned peri:5; | ||
509 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
510 | struct list_head rqd; | ||
613 | }; | 511 | }; |
614 | 512 | ||
615 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | 513 | struct _xfer_spec { |
616 | { | 514 | u32 ccr; |
617 | if (r && r->xfer_cb) | 515 | struct dma_pl330_desc *desc; |
618 | r->xfer_cb(r->token, err); | 516 | }; |
619 | } | ||
620 | 517 | ||
621 | static inline bool _queue_empty(struct pl330_thread *thrd) | 518 | static inline bool _queue_empty(struct pl330_thread *thrd) |
622 | { | 519 | { |
623 | return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) | 520 | return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; |
624 | ? true : false; | ||
625 | } | 521 | } |
626 | 522 | ||
627 | static inline bool _queue_full(struct pl330_thread *thrd) | 523 | static inline bool _queue_full(struct pl330_thread *thrd) |
628 | { | 524 | { |
629 | return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) | 525 | return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; |
630 | ? false : true; | ||
631 | } | 526 | } |
632 | 527 | ||
633 | static inline bool is_manager(struct pl330_thread *thrd) | 528 | static inline bool is_manager(struct pl330_thread *thrd) |
634 | { | 529 | { |
635 | struct pl330_dmac *pl330 = thrd->dmac; | 530 | return thrd->dmac->manager == thrd; |
636 | |||
637 | /* MANAGER is indexed at the end */ | ||
638 | if (thrd->id == pl330->pinfo->pcfg.num_chan) | ||
639 | return true; | ||
640 | else | ||
641 | return false; | ||
642 | } | 531 | } |
643 | 532 | ||
644 | /* If manager of the thread is in Non-Secure mode */ | 533 | /* If manager of the thread is in Non-Secure mode */ |
645 | static inline bool _manager_ns(struct pl330_thread *thrd) | 534 | static inline bool _manager_ns(struct pl330_thread *thrd) |
646 | { | 535 | { |
647 | struct pl330_dmac *pl330 = thrd->dmac; | 536 | return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; |
648 | |||
649 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | ||
650 | } | 537 | } |
651 | 538 | ||
652 | static inline u32 get_revision(u32 periph_id) | 539 | static inline u32 get_revision(u32 periph_id) |
@@ -1004,7 +891,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | |||
1004 | /* Returns Time-Out */ | 891 | /* Returns Time-Out */ |
1005 | static bool _until_dmac_idle(struct pl330_thread *thrd) | 892 | static bool _until_dmac_idle(struct pl330_thread *thrd) |
1006 | { | 893 | { |
1007 | void __iomem *regs = thrd->dmac->pinfo->base; | 894 | void __iomem *regs = thrd->dmac->base; |
1008 | unsigned long loops = msecs_to_loops(5); | 895 | unsigned long loops = msecs_to_loops(5); |
1009 | 896 | ||
1010 | do { | 897 | do { |
@@ -1024,7 +911,7 @@ static bool _until_dmac_idle(struct pl330_thread *thrd) | |||
1024 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | 911 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, |
1025 | u8 insn[], bool as_manager) | 912 | u8 insn[], bool as_manager) |
1026 | { | 913 | { |
1027 | void __iomem *regs = thrd->dmac->pinfo->base; | 914 | void __iomem *regs = thrd->dmac->base; |
1028 | u32 val; | 915 | u32 val; |
1029 | 916 | ||
1030 | val = (insn[0] << 16) | (insn[1] << 24); | 917 | val = (insn[0] << 16) | (insn[1] << 24); |
@@ -1039,7 +926,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, | |||
1039 | 926 | ||
1040 | /* If timed out due to halted state-machine */ | 927 | /* If timed out due to halted state-machine */ |
1041 | if (_until_dmac_idle(thrd)) { | 928 | if (_until_dmac_idle(thrd)) { |
1042 | dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); | 929 | dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); |
1043 | return; | 930 | return; |
1044 | } | 931 | } |
1045 | 932 | ||
@@ -1047,25 +934,9 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, | |||
1047 | writel(0, regs + DBGCMD); | 934 | writel(0, regs + DBGCMD); |
1048 | } | 935 | } |
1049 | 936 | ||
1050 | /* | ||
1051 | * Mark a _pl330_req as free. | ||
1052 | * We do it by writing DMAEND as the first instruction | ||
1053 | * because no valid request is going to have DMAEND as | ||
1054 | * its first instruction to execute. | ||
1055 | */ | ||
1056 | static void mark_free(struct pl330_thread *thrd, int idx) | ||
1057 | { | ||
1058 | struct _pl330_req *req = &thrd->req[idx]; | ||
1059 | |||
1060 | _emit_END(0, req->mc_cpu); | ||
1061 | req->mc_len = 0; | ||
1062 | |||
1063 | thrd->req_running = -1; | ||
1064 | } | ||
1065 | |||
1066 | static inline u32 _state(struct pl330_thread *thrd) | 937 | static inline u32 _state(struct pl330_thread *thrd) |
1067 | { | 938 | { |
1068 | void __iomem *regs = thrd->dmac->pinfo->base; | 939 | void __iomem *regs = thrd->dmac->base; |
1069 | u32 val; | 940 | u32 val; |
1070 | 941 | ||
1071 | if (is_manager(thrd)) | 942 | if (is_manager(thrd)) |
@@ -1123,7 +994,7 @@ static inline u32 _state(struct pl330_thread *thrd) | |||
1123 | 994 | ||
1124 | static void _stop(struct pl330_thread *thrd) | 995 | static void _stop(struct pl330_thread *thrd) |
1125 | { | 996 | { |
1126 | void __iomem *regs = thrd->dmac->pinfo->base; | 997 | void __iomem *regs = thrd->dmac->base; |
1127 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | 998 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; |
1128 | 999 | ||
1129 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | 1000 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) |
@@ -1146,9 +1017,9 @@ static void _stop(struct pl330_thread *thrd) | |||
1146 | /* Start doing req 'idx' of thread 'thrd' */ | 1017 | /* Start doing req 'idx' of thread 'thrd' */ |
1147 | static bool _trigger(struct pl330_thread *thrd) | 1018 | static bool _trigger(struct pl330_thread *thrd) |
1148 | { | 1019 | { |
1149 | void __iomem *regs = thrd->dmac->pinfo->base; | 1020 | void __iomem *regs = thrd->dmac->base; |
1150 | struct _pl330_req *req; | 1021 | struct _pl330_req *req; |
1151 | struct pl330_req *r; | 1022 | struct dma_pl330_desc *desc; |
1152 | struct _arg_GO go; | 1023 | struct _arg_GO go; |
1153 | unsigned ns; | 1024 | unsigned ns; |
1154 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | 1025 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; |
@@ -1159,32 +1030,27 @@ static bool _trigger(struct pl330_thread *thrd) | |||
1159 | return true; | 1030 | return true; |
1160 | 1031 | ||
1161 | idx = 1 - thrd->lstenq; | 1032 | idx = 1 - thrd->lstenq; |
1162 | if (!IS_FREE(&thrd->req[idx])) | 1033 | if (thrd->req[idx].desc != NULL) { |
1163 | req = &thrd->req[idx]; | 1034 | req = &thrd->req[idx]; |
1164 | else { | 1035 | } else { |
1165 | idx = thrd->lstenq; | 1036 | idx = thrd->lstenq; |
1166 | if (!IS_FREE(&thrd->req[idx])) | 1037 | if (thrd->req[idx].desc != NULL) |
1167 | req = &thrd->req[idx]; | 1038 | req = &thrd->req[idx]; |
1168 | else | 1039 | else |
1169 | req = NULL; | 1040 | req = NULL; |
1170 | } | 1041 | } |
1171 | 1042 | ||
1172 | /* Return if no request */ | 1043 | /* Return if no request */ |
1173 | if (!req || !req->r) | 1044 | if (!req) |
1174 | return true; | 1045 | return true; |
1175 | 1046 | ||
1176 | r = req->r; | 1047 | desc = req->desc; |
1177 | 1048 | ||
1178 | if (r->cfg) | 1049 | ns = desc->rqcfg.nonsecure ? 1 : 0; |
1179 | ns = r->cfg->nonsecure ? 1 : 0; | ||
1180 | else if (readl(regs + CS(thrd->id)) & CS_CNS) | ||
1181 | ns = 1; | ||
1182 | else | ||
1183 | ns = 0; | ||
1184 | 1050 | ||
1185 | /* See 'Abort Sources' point-4 at Page 2-25 */ | 1051 | /* See 'Abort Sources' point-4 at Page 2-25 */ |
1186 | if (_manager_ns(thrd) && !ns) | 1052 | if (_manager_ns(thrd) && !ns) |
1187 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", | 1053 | dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", |
1188 | __func__, __LINE__); | 1054 | __func__, __LINE__); |
1189 | 1055 | ||
1190 | go.chan = thrd->id; | 1056 | go.chan = thrd->id; |
@@ -1240,7 +1106,7 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | |||
1240 | const struct _xfer_spec *pxs, int cyc) | 1106 | const struct _xfer_spec *pxs, int cyc) |
1241 | { | 1107 | { |
1242 | int off = 0; | 1108 | int off = 0; |
1243 | struct pl330_config *pcfg = pxs->r->cfg->pcfg; | 1109 | struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; |
1244 | 1110 | ||
1245 | /* check lock-up free version */ | 1111 | /* check lock-up free version */ |
1246 | if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { | 1112 | if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { |
@@ -1266,10 +1132,10 @@ static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | |||
1266 | int off = 0; | 1132 | int off = 0; |
1267 | 1133 | ||
1268 | while (cyc--) { | 1134 | while (cyc--) { |
1269 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | 1135 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); |
1270 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); | 1136 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri); |
1271 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | 1137 | off += _emit_ST(dry_run, &buf[off], ALWAYS); |
1272 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | 1138 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); |
1273 | } | 1139 | } |
1274 | 1140 | ||
1275 | return off; | 1141 | return off; |
@@ -1281,10 +1147,10 @@ static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | |||
1281 | int off = 0; | 1147 | int off = 0; |
1282 | 1148 | ||
1283 | while (cyc--) { | 1149 | while (cyc--) { |
1284 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | 1150 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); |
1285 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | 1151 | off += _emit_LD(dry_run, &buf[off], ALWAYS); |
1286 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); | 1152 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); |
1287 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | 1153 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); |
1288 | } | 1154 | } |
1289 | 1155 | ||
1290 | return off; | 1156 | return off; |
@@ -1295,14 +1161,14 @@ static int _bursts(unsigned dry_run, u8 buf[], | |||
1295 | { | 1161 | { |
1296 | int off = 0; | 1162 | int off = 0; |
1297 | 1163 | ||
1298 | switch (pxs->r->rqtype) { | 1164 | switch (pxs->desc->rqtype) { |
1299 | case MEMTODEV: | 1165 | case DMA_MEM_TO_DEV: |
1300 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | 1166 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); |
1301 | break; | 1167 | break; |
1302 | case DEVTOMEM: | 1168 | case DMA_DEV_TO_MEM: |
1303 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | 1169 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); |
1304 | break; | 1170 | break; |
1305 | case MEMTOMEM: | 1171 | case DMA_MEM_TO_MEM: |
1306 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | 1172 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); |
1307 | break; | 1173 | break; |
1308 | default: | 1174 | default: |
@@ -1395,7 +1261,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], | |||
1395 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | 1261 | static inline int _setup_loops(unsigned dry_run, u8 buf[], |
1396 | const struct _xfer_spec *pxs) | 1262 | const struct _xfer_spec *pxs) |
1397 | { | 1263 | { |
1398 | struct pl330_xfer *x = pxs->x; | 1264 | struct pl330_xfer *x = &pxs->desc->px; |
1399 | u32 ccr = pxs->ccr; | 1265 | u32 ccr = pxs->ccr; |
1400 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | 1266 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); |
1401 | int off = 0; | 1267 | int off = 0; |
@@ -1412,7 +1278,7 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[], | |||
1412 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | 1278 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], |
1413 | const struct _xfer_spec *pxs) | 1279 | const struct _xfer_spec *pxs) |
1414 | { | 1280 | { |
1415 | struct pl330_xfer *x = pxs->x; | 1281 | struct pl330_xfer *x = &pxs->desc->px; |
1416 | int off = 0; | 1282 | int off = 0; |
1417 | 1283 | ||
1418 | /* DMAMOV SAR, x->src_addr */ | 1284 | /* DMAMOV SAR, x->src_addr */ |
@@ -1443,17 +1309,12 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | |||
1443 | /* DMAMOV CCR, ccr */ | 1309 | /* DMAMOV CCR, ccr */ |
1444 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | 1310 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); |
1445 | 1311 | ||
1446 | x = pxs->r->x; | 1312 | x = &pxs->desc->px; |
1447 | do { | 1313 | /* Error if xfer length is not aligned at burst size */ |
1448 | /* Error if xfer length is not aligned at burst size */ | 1314 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) |
1449 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | 1315 | return -EINVAL; |
1450 | return -EINVAL; | ||
1451 | |||
1452 | pxs->x = x; | ||
1453 | off += _setup_xfer(dry_run, &buf[off], pxs); | ||
1454 | 1316 | ||
1455 | x = x->next; | 1317 | off += _setup_xfer(dry_run, &buf[off], pxs); |
1456 | } while (x); | ||
1457 | 1318 | ||
1458 | /* DMASEV peripheral/event */ | 1319 | /* DMASEV peripheral/event */ |
1459 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | 1320 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); |
@@ -1495,31 +1356,15 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | |||
1495 | return ccr; | 1356 | return ccr; |
1496 | } | 1357 | } |
1497 | 1358 | ||
1498 | static inline bool _is_valid(u32 ccr) | ||
1499 | { | ||
1500 | enum pl330_dstcachectrl dcctl; | ||
1501 | enum pl330_srccachectrl scctl; | ||
1502 | |||
1503 | dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; | ||
1504 | scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; | ||
1505 | |||
1506 | if (dcctl == DINVALID1 || dcctl == DINVALID2 | ||
1507 | || scctl == SINVALID1 || scctl == SINVALID2) | ||
1508 | return false; | ||
1509 | else | ||
1510 | return true; | ||
1511 | } | ||
1512 | |||
1513 | /* | 1359 | /* |
1514 | * Submit a list of xfers after which the client wants notification. | 1360 | * Submit a list of xfers after which the client wants notification. |
1515 | * Client is not notified after each xfer unit, just once after all | 1361 | * Client is not notified after each xfer unit, just once after all |
1516 | * xfer units are done or some error occurs. | 1362 | * xfer units are done or some error occurs. |
1517 | */ | 1363 | */ |
1518 | static int pl330_submit_req(void *ch_id, struct pl330_req *r) | 1364 | static int pl330_submit_req(struct pl330_thread *thrd, |
1365 | struct dma_pl330_desc *desc) | ||
1519 | { | 1366 | { |
1520 | struct pl330_thread *thrd = ch_id; | 1367 | struct pl330_dmac *pl330 = thrd->dmac; |
1521 | struct pl330_dmac *pl330; | ||
1522 | struct pl330_info *pi; | ||
1523 | struct _xfer_spec xs; | 1368 | struct _xfer_spec xs; |
1524 | unsigned long flags; | 1369 | unsigned long flags; |
1525 | void __iomem *regs; | 1370 | void __iomem *regs; |
@@ -1528,25 +1373,24 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) | |||
1528 | int ret = 0; | 1373 | int ret = 0; |
1529 | 1374 | ||
1530 | /* No Req or Unacquired Channel or DMAC */ | 1375 | /* No Req or Unacquired Channel or DMAC */ |
1531 | if (!r || !thrd || thrd->free) | 1376 | if (!desc || !thrd || thrd->free) |
1532 | return -EINVAL; | 1377 | return -EINVAL; |
1533 | 1378 | ||
1534 | pl330 = thrd->dmac; | 1379 | regs = thrd->dmac->base; |
1535 | pi = pl330->pinfo; | ||
1536 | regs = pi->base; | ||
1537 | 1380 | ||
1538 | if (pl330->state == DYING | 1381 | if (pl330->state == DYING |
1539 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | 1382 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { |
1540 | dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", | 1383 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", |
1541 | __func__, __LINE__); | 1384 | __func__, __LINE__); |
1542 | return -EAGAIN; | 1385 | return -EAGAIN; |
1543 | } | 1386 | } |
1544 | 1387 | ||
1545 | /* If request for non-existing peripheral */ | 1388 | /* If request for non-existing peripheral */ |
1546 | if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { | 1389 | if (desc->rqtype != DMA_MEM_TO_MEM && |
1547 | dev_info(thrd->dmac->pinfo->dev, | 1390 | desc->peri >= pl330->pcfg.num_peri) { |
1391 | dev_info(thrd->dmac->ddma.dev, | ||
1548 | "%s:%d Invalid peripheral(%u)!\n", | 1392 | "%s:%d Invalid peripheral(%u)!\n", |
1549 | __func__, __LINE__, r->peri); | 1393 | __func__, __LINE__, desc->peri); |
1550 | return -EINVAL; | 1394 | return -EINVAL; |
1551 | } | 1395 | } |
1552 | 1396 | ||
@@ -1557,41 +1401,26 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) | |||
1557 | goto xfer_exit; | 1401 | goto xfer_exit; |
1558 | } | 1402 | } |
1559 | 1403 | ||
1404 | /* Prefer Secure Channel */ | ||
1405 | if (!_manager_ns(thrd)) | ||
1406 | desc->rqcfg.nonsecure = 0; | ||
1407 | else | ||
1408 | desc->rqcfg.nonsecure = 1; | ||
1560 | 1409 | ||
1561 | /* Use last settings, if not provided */ | 1410 | ccr = _prepare_ccr(&desc->rqcfg); |
1562 | if (r->cfg) { | ||
1563 | /* Prefer Secure Channel */ | ||
1564 | if (!_manager_ns(thrd)) | ||
1565 | r->cfg->nonsecure = 0; | ||
1566 | else | ||
1567 | r->cfg->nonsecure = 1; | ||
1568 | |||
1569 | ccr = _prepare_ccr(r->cfg); | ||
1570 | } else { | ||
1571 | ccr = readl(regs + CC(thrd->id)); | ||
1572 | } | ||
1573 | |||
1574 | /* If this req doesn't have valid xfer settings */ | ||
1575 | if (!_is_valid(ccr)) { | ||
1576 | ret = -EINVAL; | ||
1577 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", | ||
1578 | __func__, __LINE__, ccr); | ||
1579 | goto xfer_exit; | ||
1580 | } | ||
1581 | 1411 | ||
1582 | idx = IS_FREE(&thrd->req[0]) ? 0 : 1; | 1412 | idx = thrd->req[0].desc == NULL ? 0 : 1; |
1583 | 1413 | ||
1584 | xs.ccr = ccr; | 1414 | xs.ccr = ccr; |
1585 | xs.r = r; | 1415 | xs.desc = desc; |
1586 | 1416 | ||
1587 | /* First dry run to check if req is acceptable */ | 1417 | /* First dry run to check if req is acceptable */ |
1588 | ret = _setup_req(1, thrd, idx, &xs); | 1418 | ret = _setup_req(1, thrd, idx, &xs); |
1589 | if (ret < 0) | 1419 | if (ret < 0) |
1590 | goto xfer_exit; | 1420 | goto xfer_exit; |
1591 | 1421 | ||
1592 | if (ret > pi->mcbufsz / 2) { | 1422 | if (ret > pl330->mcbufsz / 2) { |
1593 | dev_info(thrd->dmac->pinfo->dev, | 1423 | dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n", |
1594 | "%s:%d Trying increasing mcbufsz\n", | ||
1595 | __func__, __LINE__); | 1424 | __func__, __LINE__); |
1596 | ret = -ENOMEM; | 1425 | ret = -ENOMEM; |
1597 | goto xfer_exit; | 1426 | goto xfer_exit; |
@@ -1599,8 +1428,8 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) | |||
1599 | 1428 | ||
1600 | /* Hook the request */ | 1429 | /* Hook the request */ |
1601 | thrd->lstenq = idx; | 1430 | thrd->lstenq = idx; |
1602 | thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); | 1431 | thrd->req[idx].desc = desc; |
1603 | thrd->req[idx].r = r; | 1432 | _setup_req(0, thrd, idx, &xs); |
1604 | 1433 | ||
1605 | ret = 0; | 1434 | ret = 0; |
1606 | 1435 | ||
@@ -1610,10 +1439,32 @@ xfer_exit: | |||
1610 | return ret; | 1439 | return ret; |
1611 | } | 1440 | } |
1612 | 1441 | ||
1442 | static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) | ||
1443 | { | ||
1444 | struct dma_pl330_chan *pch; | ||
1445 | unsigned long flags; | ||
1446 | |||
1447 | if (!desc) | ||
1448 | return; | ||
1449 | |||
1450 | pch = desc->pchan; | ||
1451 | |||
1452 | /* If desc aborted */ | ||
1453 | if (!pch) | ||
1454 | return; | ||
1455 | |||
1456 | spin_lock_irqsave(&pch->lock, flags); | ||
1457 | |||
1458 | desc->status = DONE; | ||
1459 | |||
1460 | spin_unlock_irqrestore(&pch->lock, flags); | ||
1461 | |||
1462 | tasklet_schedule(&pch->task); | ||
1463 | } | ||
1464 | |||
1613 | static void pl330_dotask(unsigned long data) | 1465 | static void pl330_dotask(unsigned long data) |
1614 | { | 1466 | { |
1615 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | 1467 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; |
1616 | struct pl330_info *pi = pl330->pinfo; | ||
1617 | unsigned long flags; | 1468 | unsigned long flags; |
1618 | int i; | 1469 | int i; |
1619 | 1470 | ||
@@ -1631,16 +1482,16 @@ static void pl330_dotask(unsigned long data) | |||
1631 | if (pl330->dmac_tbd.reset_mngr) { | 1482 | if (pl330->dmac_tbd.reset_mngr) { |
1632 | _stop(pl330->manager); | 1483 | _stop(pl330->manager); |
1633 | /* Reset all channels */ | 1484 | /* Reset all channels */ |
1634 | pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; | 1485 | pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1; |
1635 | /* Clear the reset flag */ | 1486 | /* Clear the reset flag */ |
1636 | pl330->dmac_tbd.reset_mngr = false; | 1487 | pl330->dmac_tbd.reset_mngr = false; |
1637 | } | 1488 | } |
1638 | 1489 | ||
1639 | for (i = 0; i < pi->pcfg.num_chan; i++) { | 1490 | for (i = 0; i < pl330->pcfg.num_chan; i++) { |
1640 | 1491 | ||
1641 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | 1492 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { |
1642 | struct pl330_thread *thrd = &pl330->channels[i]; | 1493 | struct pl330_thread *thrd = &pl330->channels[i]; |
1643 | void __iomem *regs = pi->base; | 1494 | void __iomem *regs = pl330->base; |
1644 | enum pl330_op_err err; | 1495 | enum pl330_op_err err; |
1645 | 1496 | ||
1646 | _stop(thrd); | 1497 | _stop(thrd); |
@@ -1651,16 +1502,13 @@ static void pl330_dotask(unsigned long data) | |||
1651 | err = PL330_ERR_ABORT; | 1502 | err = PL330_ERR_ABORT; |
1652 | 1503 | ||
1653 | spin_unlock_irqrestore(&pl330->lock, flags); | 1504 | spin_unlock_irqrestore(&pl330->lock, flags); |
1654 | 1505 | dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); | |
1655 | _callback(thrd->req[1 - thrd->lstenq].r, err); | 1506 | dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); |
1656 | _callback(thrd->req[thrd->lstenq].r, err); | ||
1657 | |||
1658 | spin_lock_irqsave(&pl330->lock, flags); | 1507 | spin_lock_irqsave(&pl330->lock, flags); |
1659 | 1508 | ||
1660 | thrd->req[0].r = NULL; | 1509 | thrd->req[0].desc = NULL; |
1661 | thrd->req[1].r = NULL; | 1510 | thrd->req[1].desc = NULL; |
1662 | mark_free(thrd, 0); | 1511 | thrd->req_running = -1; |
1663 | mark_free(thrd, 1); | ||
1664 | 1512 | ||
1665 | /* Clear the reset flag */ | 1513 | /* Clear the reset flag */ |
1666 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | 1514 | pl330->dmac_tbd.reset_chan &= ~(1 << i); |
@@ -1673,20 +1521,15 @@ static void pl330_dotask(unsigned long data) | |||
1673 | } | 1521 | } |
1674 | 1522 | ||
1675 | /* Returns 1 if state was updated, 0 otherwise */ | 1523 | /* Returns 1 if state was updated, 0 otherwise */ |
1676 | static int pl330_update(const struct pl330_info *pi) | 1524 | static int pl330_update(struct pl330_dmac *pl330) |
1677 | { | 1525 | { |
1678 | struct pl330_req *rqdone, *tmp; | 1526 | struct dma_pl330_desc *descdone, *tmp; |
1679 | struct pl330_dmac *pl330; | ||
1680 | unsigned long flags; | 1527 | unsigned long flags; |
1681 | void __iomem *regs; | 1528 | void __iomem *regs; |
1682 | u32 val; | 1529 | u32 val; |
1683 | int id, ev, ret = 0; | 1530 | int id, ev, ret = 0; |
1684 | 1531 | ||
1685 | if (!pi || !pi->pl330_data) | 1532 | regs = pl330->base; |
1686 | return 0; | ||
1687 | |||
1688 | regs = pi->base; | ||
1689 | pl330 = pi->pl330_data; | ||
1690 | 1533 | ||
1691 | spin_lock_irqsave(&pl330->lock, flags); | 1534 | spin_lock_irqsave(&pl330->lock, flags); |
1692 | 1535 | ||
@@ -1696,13 +1539,13 @@ static int pl330_update(const struct pl330_info *pi) | |||
1696 | else | 1539 | else |
1697 | pl330->dmac_tbd.reset_mngr = false; | 1540 | pl330->dmac_tbd.reset_mngr = false; |
1698 | 1541 | ||
1699 | val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); | 1542 | val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1); |
1700 | pl330->dmac_tbd.reset_chan |= val; | 1543 | pl330->dmac_tbd.reset_chan |= val; |
1701 | if (val) { | 1544 | if (val) { |
1702 | int i = 0; | 1545 | int i = 0; |
1703 | while (i < pi->pcfg.num_chan) { | 1546 | while (i < pl330->pcfg.num_chan) { |
1704 | if (val & (1 << i)) { | 1547 | if (val & (1 << i)) { |
1705 | dev_info(pi->dev, | 1548 | dev_info(pl330->ddma.dev, |
1706 | "Reset Channel-%d\t CS-%x FTC-%x\n", | 1549 | "Reset Channel-%d\t CS-%x FTC-%x\n", |
1707 | i, readl(regs + CS(i)), | 1550 | i, readl(regs + CS(i)), |
1708 | readl(regs + FTC(i))); | 1551 | readl(regs + FTC(i))); |
@@ -1714,15 +1557,16 @@ static int pl330_update(const struct pl330_info *pi) | |||
1714 | 1557 | ||
1715 | /* Check which event happened i.e, thread notified */ | 1558 | /* Check which event happened i.e, thread notified */ |
1716 | val = readl(regs + ES); | 1559 | val = readl(regs + ES); |
1717 | if (pi->pcfg.num_events < 32 | 1560 | if (pl330->pcfg.num_events < 32 |
1718 | && val & ~((1 << pi->pcfg.num_events) - 1)) { | 1561 | && val & ~((1 << pl330->pcfg.num_events) - 1)) { |
1719 | pl330->dmac_tbd.reset_dmac = true; | 1562 | pl330->dmac_tbd.reset_dmac = true; |
1720 | dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); | 1563 | dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__, |
1564 | __LINE__); | ||
1721 | ret = 1; | 1565 | ret = 1; |
1722 | goto updt_exit; | 1566 | goto updt_exit; |
1723 | } | 1567 | } |
1724 | 1568 | ||
1725 | for (ev = 0; ev < pi->pcfg.num_events; ev++) { | 1569 | for (ev = 0; ev < pl330->pcfg.num_events; ev++) { |
1726 | if (val & (1 << ev)) { /* Event occurred */ | 1570 | if (val & (1 << ev)) { /* Event occurred */ |
1727 | struct pl330_thread *thrd; | 1571 | struct pl330_thread *thrd; |
1728 | u32 inten = readl(regs + INTEN); | 1572 | u32 inten = readl(regs + INTEN); |
@@ -1743,25 +1587,22 @@ static int pl330_update(const struct pl330_info *pi) | |||
1743 | continue; | 1587 | continue; |
1744 | 1588 | ||
1745 | /* Detach the req */ | 1589 | /* Detach the req */ |
1746 | rqdone = thrd->req[active].r; | 1590 | descdone = thrd->req[active].desc; |
1747 | thrd->req[active].r = NULL; | 1591 | thrd->req[active].desc = NULL; |
1748 | |||
1749 | mark_free(thrd, active); | ||
1750 | 1592 | ||
1751 | /* Get going again ASAP */ | 1593 | /* Get going again ASAP */ |
1752 | _start(thrd); | 1594 | _start(thrd); |
1753 | 1595 | ||
1754 | /* For now, just make a list of callbacks to be done */ | 1596 | /* For now, just make a list of callbacks to be done */ |
1755 | list_add_tail(&rqdone->rqd, &pl330->req_done); | 1597 | list_add_tail(&descdone->rqd, &pl330->req_done); |
1756 | } | 1598 | } |
1757 | } | 1599 | } |
1758 | 1600 | ||
1759 | /* Now that we are in no hurry, do the callbacks */ | 1601 | /* Now that we are in no hurry, do the callbacks */ |
1760 | list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { | 1602 | list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { |
1761 | list_del(&rqdone->rqd); | 1603 | list_del(&descdone->rqd); |
1762 | |||
1763 | spin_unlock_irqrestore(&pl330->lock, flags); | 1604 | spin_unlock_irqrestore(&pl330->lock, flags); |
1764 | _callback(rqdone, PL330_ERR_NONE); | 1605 | dma_pl330_rqcb(descdone, PL330_ERR_NONE); |
1765 | spin_lock_irqsave(&pl330->lock, flags); | 1606 | spin_lock_irqsave(&pl330->lock, flags); |
1766 | } | 1607 | } |
1767 | 1608 | ||
@@ -1778,65 +1619,13 @@ updt_exit: | |||
1778 | return ret; | 1619 | return ret; |
1779 | } | 1620 | } |
1780 | 1621 | ||
1781 | static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | ||
1782 | { | ||
1783 | struct pl330_thread *thrd = ch_id; | ||
1784 | struct pl330_dmac *pl330; | ||
1785 | unsigned long flags; | ||
1786 | int ret = 0, active; | ||
1787 | |||
1788 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | ||
1789 | return -EINVAL; | ||
1790 | |||
1791 | pl330 = thrd->dmac; | ||
1792 | active = thrd->req_running; | ||
1793 | |||
1794 | spin_lock_irqsave(&pl330->lock, flags); | ||
1795 | |||
1796 | switch (op) { | ||
1797 | case PL330_OP_FLUSH: | ||
1798 | /* Make sure the channel is stopped */ | ||
1799 | _stop(thrd); | ||
1800 | |||
1801 | thrd->req[0].r = NULL; | ||
1802 | thrd->req[1].r = NULL; | ||
1803 | mark_free(thrd, 0); | ||
1804 | mark_free(thrd, 1); | ||
1805 | break; | ||
1806 | |||
1807 | case PL330_OP_ABORT: | ||
1808 | /* Make sure the channel is stopped */ | ||
1809 | _stop(thrd); | ||
1810 | |||
1811 | /* ABORT is only for the active req */ | ||
1812 | if (active == -1) | ||
1813 | break; | ||
1814 | |||
1815 | thrd->req[active].r = NULL; | ||
1816 | mark_free(thrd, active); | ||
1817 | |||
1818 | /* Start the next */ | ||
1819 | case PL330_OP_START: | ||
1820 | if ((active == -1) && !_start(thrd)) | ||
1821 | ret = -EIO; | ||
1822 | break; | ||
1823 | |||
1824 | default: | ||
1825 | ret = -EINVAL; | ||
1826 | } | ||
1827 | |||
1828 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
1829 | return ret; | ||
1830 | } | ||
1831 | |||
1832 | /* Reserve an event */ | 1622 | /* Reserve an event */ |
1833 | static inline int _alloc_event(struct pl330_thread *thrd) | 1623 | static inline int _alloc_event(struct pl330_thread *thrd) |
1834 | { | 1624 | { |
1835 | struct pl330_dmac *pl330 = thrd->dmac; | 1625 | struct pl330_dmac *pl330 = thrd->dmac; |
1836 | struct pl330_info *pi = pl330->pinfo; | ||
1837 | int ev; | 1626 | int ev; |
1838 | 1627 | ||
1839 | for (ev = 0; ev < pi->pcfg.num_events; ev++) | 1628 | for (ev = 0; ev < pl330->pcfg.num_events; ev++) |
1840 | if (pl330->events[ev] == -1) { | 1629 | if (pl330->events[ev] == -1) { |
1841 | pl330->events[ev] = thrd->id; | 1630 | pl330->events[ev] = thrd->id; |
1842 | return ev; | 1631 | return ev; |
@@ -1845,45 +1634,38 @@ static inline int _alloc_event(struct pl330_thread *thrd) | |||
1845 | return -1; | 1634 | return -1; |
1846 | } | 1635 | } |
1847 | 1636 | ||
1848 | static bool _chan_ns(const struct pl330_info *pi, int i) | 1637 | static bool _chan_ns(const struct pl330_dmac *pl330, int i) |
1849 | { | 1638 | { |
1850 | return pi->pcfg.irq_ns & (1 << i); | 1639 | return pl330->pcfg.irq_ns & (1 << i); |
1851 | } | 1640 | } |
1852 | 1641 | ||
1853 | /* Upon success, returns IdentityToken for the | 1642 | /* Upon success, returns IdentityToken for the |
1854 | * allocated channel, NULL otherwise. | 1643 | * allocated channel, NULL otherwise. |
1855 | */ | 1644 | */ |
1856 | static void *pl330_request_channel(const struct pl330_info *pi) | 1645 | static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) |
1857 | { | 1646 | { |
1858 | struct pl330_thread *thrd = NULL; | 1647 | struct pl330_thread *thrd = NULL; |
1859 | struct pl330_dmac *pl330; | ||
1860 | unsigned long flags; | 1648 | unsigned long flags; |
1861 | int chans, i; | 1649 | int chans, i; |
1862 | 1650 | ||
1863 | if (!pi || !pi->pl330_data) | ||
1864 | return NULL; | ||
1865 | |||
1866 | pl330 = pi->pl330_data; | ||
1867 | |||
1868 | if (pl330->state == DYING) | 1651 | if (pl330->state == DYING) |
1869 | return NULL; | 1652 | return NULL; |
1870 | 1653 | ||
1871 | chans = pi->pcfg.num_chan; | 1654 | chans = pl330->pcfg.num_chan; |
1872 | 1655 | ||
1873 | spin_lock_irqsave(&pl330->lock, flags); | 1656 | spin_lock_irqsave(&pl330->lock, flags); |
1874 | 1657 | ||
1875 | for (i = 0; i < chans; i++) { | 1658 | for (i = 0; i < chans; i++) { |
1876 | thrd = &pl330->channels[i]; | 1659 | thrd = &pl330->channels[i]; |
1877 | if ((thrd->free) && (!_manager_ns(thrd) || | 1660 | if ((thrd->free) && (!_manager_ns(thrd) || |
1878 | _chan_ns(pi, i))) { | 1661 | _chan_ns(pl330, i))) { |
1879 | thrd->ev = _alloc_event(thrd); | 1662 | thrd->ev = _alloc_event(thrd); |
1880 | if (thrd->ev >= 0) { | 1663 | if (thrd->ev >= 0) { |
1881 | thrd->free = false; | 1664 | thrd->free = false; |
1882 | thrd->lstenq = 1; | 1665 | thrd->lstenq = 1; |
1883 | thrd->req[0].r = NULL; | 1666 | thrd->req[0].desc = NULL; |
1884 | mark_free(thrd, 0); | 1667 | thrd->req[1].desc = NULL; |
1885 | thrd->req[1].r = NULL; | 1668 | thrd->req_running = -1; |
1886 | mark_free(thrd, 1); | ||
1887 | break; | 1669 | break; |
1888 | } | 1670 | } |
1889 | } | 1671 | } |
@@ -1899,17 +1681,15 @@ static void *pl330_request_channel(const struct pl330_info *pi) | |||
1899 | static inline void _free_event(struct pl330_thread *thrd, int ev) | 1681 | static inline void _free_event(struct pl330_thread *thrd, int ev) |
1900 | { | 1682 | { |
1901 | struct pl330_dmac *pl330 = thrd->dmac; | 1683 | struct pl330_dmac *pl330 = thrd->dmac; |
1902 | struct pl330_info *pi = pl330->pinfo; | ||
1903 | 1684 | ||
1904 | /* If the event is valid and was held by the thread */ | 1685 | /* If the event is valid and was held by the thread */ |
1905 | if (ev >= 0 && ev < pi->pcfg.num_events | 1686 | if (ev >= 0 && ev < pl330->pcfg.num_events |
1906 | && pl330->events[ev] == thrd->id) | 1687 | && pl330->events[ev] == thrd->id) |
1907 | pl330->events[ev] = -1; | 1688 | pl330->events[ev] = -1; |
1908 | } | 1689 | } |
1909 | 1690 | ||
1910 | static void pl330_release_channel(void *ch_id) | 1691 | static void pl330_release_channel(struct pl330_thread *thrd) |
1911 | { | 1692 | { |
1912 | struct pl330_thread *thrd = ch_id; | ||
1913 | struct pl330_dmac *pl330; | 1693 | struct pl330_dmac *pl330; |
1914 | unsigned long flags; | 1694 | unsigned long flags; |
1915 | 1695 | ||
@@ -1918,8 +1698,8 @@ static void pl330_release_channel(void *ch_id) | |||
1918 | 1698 | ||
1919 | _stop(thrd); | 1699 | _stop(thrd); |
1920 | 1700 | ||
1921 | _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); | 1701 | dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); |
1922 | _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); | 1702 | dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); |
1923 | 1703 | ||
1924 | pl330 = thrd->dmac; | 1704 | pl330 = thrd->dmac; |
1925 | 1705 | ||
@@ -1932,72 +1712,70 @@ static void pl330_release_channel(void *ch_id) | |||
1932 | /* Initialize the structure for PL330 configuration, that can be used | 1712 | /* Initialize the structure for PL330 configuration, that can be used |
1933 | * by the client driver the make best use of the DMAC | 1713 | * by the client driver the make best use of the DMAC |
1934 | */ | 1714 | */ |
1935 | static void read_dmac_config(struct pl330_info *pi) | 1715 | static void read_dmac_config(struct pl330_dmac *pl330) |
1936 | { | 1716 | { |
1937 | void __iomem *regs = pi->base; | 1717 | void __iomem *regs = pl330->base; |
1938 | u32 val; | 1718 | u32 val; |
1939 | 1719 | ||
1940 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | 1720 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; |
1941 | val &= CRD_DATA_WIDTH_MASK; | 1721 | val &= CRD_DATA_WIDTH_MASK; |
1942 | pi->pcfg.data_bus_width = 8 * (1 << val); | 1722 | pl330->pcfg.data_bus_width = 8 * (1 << val); |
1943 | 1723 | ||
1944 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | 1724 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; |
1945 | val &= CRD_DATA_BUFF_MASK; | 1725 | val &= CRD_DATA_BUFF_MASK; |
1946 | pi->pcfg.data_buf_dep = val + 1; | 1726 | pl330->pcfg.data_buf_dep = val + 1; |
1947 | 1727 | ||
1948 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | 1728 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; |
1949 | val &= CR0_NUM_CHANS_MASK; | 1729 | val &= CR0_NUM_CHANS_MASK; |
1950 | val += 1; | 1730 | val += 1; |
1951 | pi->pcfg.num_chan = val; | 1731 | pl330->pcfg.num_chan = val; |
1952 | 1732 | ||
1953 | val = readl(regs + CR0); | 1733 | val = readl(regs + CR0); |
1954 | if (val & CR0_PERIPH_REQ_SET) { | 1734 | if (val & CR0_PERIPH_REQ_SET) { |
1955 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | 1735 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; |
1956 | val += 1; | 1736 | val += 1; |
1957 | pi->pcfg.num_peri = val; | 1737 | pl330->pcfg.num_peri = val; |
1958 | pi->pcfg.peri_ns = readl(regs + CR4); | 1738 | pl330->pcfg.peri_ns = readl(regs + CR4); |
1959 | } else { | 1739 | } else { |
1960 | pi->pcfg.num_peri = 0; | 1740 | pl330->pcfg.num_peri = 0; |
1961 | } | 1741 | } |
1962 | 1742 | ||
1963 | val = readl(regs + CR0); | 1743 | val = readl(regs + CR0); |
1964 | if (val & CR0_BOOT_MAN_NS) | 1744 | if (val & CR0_BOOT_MAN_NS) |
1965 | pi->pcfg.mode |= DMAC_MODE_NS; | 1745 | pl330->pcfg.mode |= DMAC_MODE_NS; |
1966 | else | 1746 | else |
1967 | pi->pcfg.mode &= ~DMAC_MODE_NS; | 1747 | pl330->pcfg.mode &= ~DMAC_MODE_NS; |
1968 | 1748 | ||
1969 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | 1749 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; |
1970 | val &= CR0_NUM_EVENTS_MASK; | 1750 | val &= CR0_NUM_EVENTS_MASK; |
1971 | val += 1; | 1751 | val += 1; |
1972 | pi->pcfg.num_events = val; | 1752 | pl330->pcfg.num_events = val; |
1973 | 1753 | ||
1974 | pi->pcfg.irq_ns = readl(regs + CR3); | 1754 | pl330->pcfg.irq_ns = readl(regs + CR3); |
1975 | } | 1755 | } |
1976 | 1756 | ||
1977 | static inline void _reset_thread(struct pl330_thread *thrd) | 1757 | static inline void _reset_thread(struct pl330_thread *thrd) |
1978 | { | 1758 | { |
1979 | struct pl330_dmac *pl330 = thrd->dmac; | 1759 | struct pl330_dmac *pl330 = thrd->dmac; |
1980 | struct pl330_info *pi = pl330->pinfo; | ||
1981 | 1760 | ||
1982 | thrd->req[0].mc_cpu = pl330->mcode_cpu | 1761 | thrd->req[0].mc_cpu = pl330->mcode_cpu |
1983 | + (thrd->id * pi->mcbufsz); | 1762 | + (thrd->id * pl330->mcbufsz); |
1984 | thrd->req[0].mc_bus = pl330->mcode_bus | 1763 | thrd->req[0].mc_bus = pl330->mcode_bus |
1985 | + (thrd->id * pi->mcbufsz); | 1764 | + (thrd->id * pl330->mcbufsz); |
1986 | thrd->req[0].r = NULL; | 1765 | thrd->req[0].desc = NULL; |
1987 | mark_free(thrd, 0); | ||
1988 | 1766 | ||
1989 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | 1767 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu |
1990 | + pi->mcbufsz / 2; | 1768 | + pl330->mcbufsz / 2; |
1991 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | 1769 | thrd->req[1].mc_bus = thrd->req[0].mc_bus |
1992 | + pi->mcbufsz / 2; | 1770 | + pl330->mcbufsz / 2; |
1993 | thrd->req[1].r = NULL; | 1771 | thrd->req[1].desc = NULL; |
1994 | mark_free(thrd, 1); | 1772 | |
1773 | thrd->req_running = -1; | ||
1995 | } | 1774 | } |
1996 | 1775 | ||
1997 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | 1776 | static int dmac_alloc_threads(struct pl330_dmac *pl330) |
1998 | { | 1777 | { |
1999 | struct pl330_info *pi = pl330->pinfo; | 1778 | int chans = pl330->pcfg.num_chan; |
2000 | int chans = pi->pcfg.num_chan; | ||
2001 | struct pl330_thread *thrd; | 1779 | struct pl330_thread *thrd; |
2002 | int i; | 1780 | int i; |
2003 | 1781 | ||
@@ -2028,29 +1806,28 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330) | |||
2028 | 1806 | ||
2029 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | 1807 | static int dmac_alloc_resources(struct pl330_dmac *pl330) |
2030 | { | 1808 | { |
2031 | struct pl330_info *pi = pl330->pinfo; | 1809 | int chans = pl330->pcfg.num_chan; |
2032 | int chans = pi->pcfg.num_chan; | ||
2033 | int ret; | 1810 | int ret; |
2034 | 1811 | ||
2035 | /* | 1812 | /* |
2036 | * Alloc MicroCode buffer for 'chans' Channel threads. | 1813 | * Alloc MicroCode buffer for 'chans' Channel threads. |
2037 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | 1814 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) |
2038 | */ | 1815 | */ |
2039 | pl330->mcode_cpu = dma_alloc_coherent(pi->dev, | 1816 | pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, |
2040 | chans * pi->mcbufsz, | 1817 | chans * pl330->mcbufsz, |
2041 | &pl330->mcode_bus, GFP_KERNEL); | 1818 | &pl330->mcode_bus, GFP_KERNEL); |
2042 | if (!pl330->mcode_cpu) { | 1819 | if (!pl330->mcode_cpu) { |
2043 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | 1820 | dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", |
2044 | __func__, __LINE__); | 1821 | __func__, __LINE__); |
2045 | return -ENOMEM; | 1822 | return -ENOMEM; |
2046 | } | 1823 | } |
2047 | 1824 | ||
2048 | ret = dmac_alloc_threads(pl330); | 1825 | ret = dmac_alloc_threads(pl330); |
2049 | if (ret) { | 1826 | if (ret) { |
2050 | dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", | 1827 | dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", |
2051 | __func__, __LINE__); | 1828 | __func__, __LINE__); |
2052 | dma_free_coherent(pi->dev, | 1829 | dma_free_coherent(pl330->ddma.dev, |
2053 | chans * pi->mcbufsz, | 1830 | chans * pl330->mcbufsz, |
2054 | pl330->mcode_cpu, pl330->mcode_bus); | 1831 | pl330->mcode_cpu, pl330->mcode_bus); |
2055 | return ret; | 1832 | return ret; |
2056 | } | 1833 | } |
@@ -2058,71 +1835,45 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) | |||
2058 | return 0; | 1835 | return 0; |
2059 | } | 1836 | } |
2060 | 1837 | ||
2061 | static int pl330_add(struct pl330_info *pi) | 1838 | static int pl330_add(struct pl330_dmac *pl330) |
2062 | { | 1839 | { |
2063 | struct pl330_dmac *pl330; | ||
2064 | void __iomem *regs; | 1840 | void __iomem *regs; |
2065 | int i, ret; | 1841 | int i, ret; |
2066 | 1842 | ||
2067 | if (!pi || !pi->dev) | 1843 | regs = pl330->base; |
2068 | return -EINVAL; | ||
2069 | |||
2070 | /* If already added */ | ||
2071 | if (pi->pl330_data) | ||
2072 | return -EINVAL; | ||
2073 | |||
2074 | /* | ||
2075 | * If the SoC can perform reset on the DMAC, then do it | ||
2076 | * before reading its configuration. | ||
2077 | */ | ||
2078 | if (pi->dmac_reset) | ||
2079 | pi->dmac_reset(pi); | ||
2080 | |||
2081 | regs = pi->base; | ||
2082 | 1844 | ||
2083 | /* Check if we can handle this DMAC */ | 1845 | /* Check if we can handle this DMAC */ |
2084 | if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { | 1846 | if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { |
2085 | dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); | 1847 | dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", |
1848 | pl330->pcfg.periph_id); | ||
2086 | return -EINVAL; | 1849 | return -EINVAL; |
2087 | } | 1850 | } |
2088 | 1851 | ||
2089 | /* Read the configuration of the DMAC */ | 1852 | /* Read the configuration of the DMAC */ |
2090 | read_dmac_config(pi); | 1853 | read_dmac_config(pl330); |
2091 | 1854 | ||
2092 | if (pi->pcfg.num_events == 0) { | 1855 | if (pl330->pcfg.num_events == 0) { |
2093 | dev_err(pi->dev, "%s:%d Can't work without events!\n", | 1856 | dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n", |
2094 | __func__, __LINE__); | 1857 | __func__, __LINE__); |
2095 | return -EINVAL; | 1858 | return -EINVAL; |
2096 | } | 1859 | } |
2097 | 1860 | ||
2098 | pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); | ||
2099 | if (!pl330) { | ||
2100 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
2101 | __func__, __LINE__); | ||
2102 | return -ENOMEM; | ||
2103 | } | ||
2104 | |||
2105 | /* Assign the info structure and private data */ | ||
2106 | pl330->pinfo = pi; | ||
2107 | pi->pl330_data = pl330; | ||
2108 | |||
2109 | spin_lock_init(&pl330->lock); | 1861 | spin_lock_init(&pl330->lock); |
2110 | 1862 | ||
2111 | INIT_LIST_HEAD(&pl330->req_done); | 1863 | INIT_LIST_HEAD(&pl330->req_done); |
2112 | 1864 | ||
2113 | /* Use default MC buffer size if not provided */ | 1865 | /* Use default MC buffer size if not provided */ |
2114 | if (!pi->mcbufsz) | 1866 | if (!pl330->mcbufsz) |
2115 | pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; | 1867 | pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2; |
2116 | 1868 | ||
2117 | /* Mark all events as free */ | 1869 | /* Mark all events as free */ |
2118 | for (i = 0; i < pi->pcfg.num_events; i++) | 1870 | for (i = 0; i < pl330->pcfg.num_events; i++) |
2119 | pl330->events[i] = -1; | 1871 | pl330->events[i] = -1; |
2120 | 1872 | ||
2121 | /* Allocate resources needed by the DMAC */ | 1873 | /* Allocate resources needed by the DMAC */ |
2122 | ret = dmac_alloc_resources(pl330); | 1874 | ret = dmac_alloc_resources(pl330); |
2123 | if (ret) { | 1875 | if (ret) { |
2124 | dev_err(pi->dev, "Unable to create channels for DMAC\n"); | 1876 | dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n"); |
2125 | kfree(pl330); | ||
2126 | return ret; | 1877 | return ret; |
2127 | } | 1878 | } |
2128 | 1879 | ||
@@ -2135,15 +1886,13 @@ static int pl330_add(struct pl330_info *pi) | |||
2135 | 1886 | ||
2136 | static int dmac_free_threads(struct pl330_dmac *pl330) | 1887 | static int dmac_free_threads(struct pl330_dmac *pl330) |
2137 | { | 1888 | { |
2138 | struct pl330_info *pi = pl330->pinfo; | ||
2139 | int chans = pi->pcfg.num_chan; | ||
2140 | struct pl330_thread *thrd; | 1889 | struct pl330_thread *thrd; |
2141 | int i; | 1890 | int i; |
2142 | 1891 | ||
2143 | /* Release Channel threads */ | 1892 | /* Release Channel threads */ |
2144 | for (i = 0; i < chans; i++) { | 1893 | for (i = 0; i < pl330->pcfg.num_chan; i++) { |
2145 | thrd = &pl330->channels[i]; | 1894 | thrd = &pl330->channels[i]; |
2146 | pl330_release_channel((void *)thrd); | 1895 | pl330_release_channel(thrd); |
2147 | } | 1896 | } |
2148 | 1897 | ||
2149 | /* Free memory */ | 1898 | /* Free memory */ |
@@ -2152,35 +1901,18 @@ static int dmac_free_threads(struct pl330_dmac *pl330) | |||
2152 | return 0; | 1901 | return 0; |
2153 | } | 1902 | } |
2154 | 1903 | ||
2155 | static void dmac_free_resources(struct pl330_dmac *pl330) | 1904 | static void pl330_del(struct pl330_dmac *pl330) |
2156 | { | 1905 | { |
2157 | struct pl330_info *pi = pl330->pinfo; | ||
2158 | int chans = pi->pcfg.num_chan; | ||
2159 | |||
2160 | dmac_free_threads(pl330); | ||
2161 | |||
2162 | dma_free_coherent(pi->dev, chans * pi->mcbufsz, | ||
2163 | pl330->mcode_cpu, pl330->mcode_bus); | ||
2164 | } | ||
2165 | |||
2166 | static void pl330_del(struct pl330_info *pi) | ||
2167 | { | ||
2168 | struct pl330_dmac *pl330; | ||
2169 | |||
2170 | if (!pi || !pi->pl330_data) | ||
2171 | return; | ||
2172 | |||
2173 | pl330 = pi->pl330_data; | ||
2174 | |||
2175 | pl330->state = UNINIT; | 1906 | pl330->state = UNINIT; |
2176 | 1907 | ||
2177 | tasklet_kill(&pl330->tasks); | 1908 | tasklet_kill(&pl330->tasks); |
2178 | 1909 | ||
2179 | /* Free DMAC resources */ | 1910 | /* Free DMAC resources */ |
2180 | dmac_free_resources(pl330); | 1911 | dmac_free_threads(pl330); |
2181 | 1912 | ||
2182 | kfree(pl330); | 1913 | dma_free_coherent(pl330->ddma.dev, |
2183 | pi->pl330_data = NULL; | 1914 | pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, |
1915 | pl330->mcode_bus); | ||
2184 | } | 1916 | } |
2185 | 1917 | ||
2186 | /* forward declaration */ | 1918 | /* forward declaration */ |
@@ -2212,8 +1944,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) | |||
2212 | if (desc->status == BUSY) | 1944 | if (desc->status == BUSY) |
2213 | continue; | 1945 | continue; |
2214 | 1946 | ||
2215 | ret = pl330_submit_req(pch->pl330_chid, | 1947 | ret = pl330_submit_req(pch->thread, desc); |
2216 | &desc->req); | ||
2217 | if (!ret) { | 1948 | if (!ret) { |
2218 | desc->status = BUSY; | 1949 | desc->status = BUSY; |
2219 | } else if (ret == -EAGAIN) { | 1950 | } else if (ret == -EAGAIN) { |
@@ -2222,7 +1953,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) | |||
2222 | } else { | 1953 | } else { |
2223 | /* Unacceptable request */ | 1954 | /* Unacceptable request */ |
2224 | desc->status = DONE; | 1955 | desc->status = DONE; |
2225 | dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", | 1956 | dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", |
2226 | __func__, __LINE__, desc->txd.cookie); | 1957 | __func__, __LINE__, desc->txd.cookie); |
2227 | tasklet_schedule(&pch->task); | 1958 | tasklet_schedule(&pch->task); |
2228 | } | 1959 | } |
@@ -2249,7 +1980,9 @@ static void pl330_tasklet(unsigned long data) | |||
2249 | fill_queue(pch); | 1980 | fill_queue(pch); |
2250 | 1981 | ||
2251 | /* Make sure the PL330 Channel thread is active */ | 1982 | /* Make sure the PL330 Channel thread is active */ |
2252 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); | 1983 | spin_lock(&pch->thread->dmac->lock); |
1984 | _start(pch->thread); | ||
1985 | spin_unlock(&pch->thread->dmac->lock); | ||
2253 | 1986 | ||
2254 | while (!list_empty(&pch->completed_list)) { | 1987 | while (!list_empty(&pch->completed_list)) { |
2255 | dma_async_tx_callback callback; | 1988 | dma_async_tx_callback callback; |
@@ -2280,25 +2013,6 @@ static void pl330_tasklet(unsigned long data) | |||
2280 | spin_unlock_irqrestore(&pch->lock, flags); | 2013 | spin_unlock_irqrestore(&pch->lock, flags); |
2281 | } | 2014 | } |
2282 | 2015 | ||
2283 | static void dma_pl330_rqcb(void *token, enum pl330_op_err err) | ||
2284 | { | ||
2285 | struct dma_pl330_desc *desc = token; | ||
2286 | struct dma_pl330_chan *pch = desc->pchan; | ||
2287 | unsigned long flags; | ||
2288 | |||
2289 | /* If desc aborted */ | ||
2290 | if (!pch) | ||
2291 | return; | ||
2292 | |||
2293 | spin_lock_irqsave(&pch->lock, flags); | ||
2294 | |||
2295 | desc->status = DONE; | ||
2296 | |||
2297 | spin_unlock_irqrestore(&pch->lock, flags); | ||
2298 | |||
2299 | tasklet_schedule(&pch->task); | ||
2300 | } | ||
2301 | |||
2302 | bool pl330_filter(struct dma_chan *chan, void *param) | 2016 | bool pl330_filter(struct dma_chan *chan, void *param) |
2303 | { | 2017 | { |
2304 | u8 *peri_id; | 2018 | u8 *peri_id; |
@@ -2315,23 +2029,26 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, | |||
2315 | struct of_dma *ofdma) | 2029 | struct of_dma *ofdma) |
2316 | { | 2030 | { |
2317 | int count = dma_spec->args_count; | 2031 | int count = dma_spec->args_count; |
2318 | struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; | 2032 | struct pl330_dmac *pl330 = ofdma->of_dma_data; |
2319 | unsigned int chan_id; | 2033 | unsigned int chan_id; |
2320 | 2034 | ||
2035 | if (!pl330) | ||
2036 | return NULL; | ||
2037 | |||
2321 | if (count != 1) | 2038 | if (count != 1) |
2322 | return NULL; | 2039 | return NULL; |
2323 | 2040 | ||
2324 | chan_id = dma_spec->args[0]; | 2041 | chan_id = dma_spec->args[0]; |
2325 | if (chan_id >= pdmac->num_peripherals) | 2042 | if (chan_id >= pl330->num_peripherals) |
2326 | return NULL; | 2043 | return NULL; |
2327 | 2044 | ||
2328 | return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan); | 2045 | return dma_get_slave_channel(&pl330->peripherals[chan_id].chan); |
2329 | } | 2046 | } |
2330 | 2047 | ||
2331 | static int pl330_alloc_chan_resources(struct dma_chan *chan) | 2048 | static int pl330_alloc_chan_resources(struct dma_chan *chan) |
2332 | { | 2049 | { |
2333 | struct dma_pl330_chan *pch = to_pchan(chan); | 2050 | struct dma_pl330_chan *pch = to_pchan(chan); |
2334 | struct dma_pl330_dmac *pdmac = pch->dmac; | 2051 | struct pl330_dmac *pl330 = pch->dmac; |
2335 | unsigned long flags; | 2052 | unsigned long flags; |
2336 | 2053 | ||
2337 | spin_lock_irqsave(&pch->lock, flags); | 2054 | spin_lock_irqsave(&pch->lock, flags); |
@@ -2339,8 +2056,8 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) | |||
2339 | dma_cookie_init(chan); | 2056 | dma_cookie_init(chan); |
2340 | pch->cyclic = false; | 2057 | pch->cyclic = false; |
2341 | 2058 | ||
2342 | pch->pl330_chid = pl330_request_channel(&pdmac->pif); | 2059 | pch->thread = pl330_request_channel(pl330); |
2343 | if (!pch->pl330_chid) { | 2060 | if (!pch->thread) { |
2344 | spin_unlock_irqrestore(&pch->lock, flags); | 2061 | spin_unlock_irqrestore(&pch->lock, flags); |
2345 | return -ENOMEM; | 2062 | return -ENOMEM; |
2346 | } | 2063 | } |
@@ -2357,7 +2074,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
2357 | struct dma_pl330_chan *pch = to_pchan(chan); | 2074 | struct dma_pl330_chan *pch = to_pchan(chan); |
2358 | struct dma_pl330_desc *desc; | 2075 | struct dma_pl330_desc *desc; |
2359 | unsigned long flags; | 2076 | unsigned long flags; |
2360 | struct dma_pl330_dmac *pdmac = pch->dmac; | 2077 | struct pl330_dmac *pl330 = pch->dmac; |
2361 | struct dma_slave_config *slave_config; | 2078 | struct dma_slave_config *slave_config; |
2362 | LIST_HEAD(list); | 2079 | LIST_HEAD(list); |
2363 | 2080 | ||
@@ -2365,8 +2082,13 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
2365 | case DMA_TERMINATE_ALL: | 2082 | case DMA_TERMINATE_ALL: |
2366 | spin_lock_irqsave(&pch->lock, flags); | 2083 | spin_lock_irqsave(&pch->lock, flags); |
2367 | 2084 | ||
2368 | /* FLUSH the PL330 Channel thread */ | 2085 | spin_lock(&pl330->lock); |
2369 | pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); | 2086 | _stop(pch->thread); |
2087 | spin_unlock(&pl330->lock); | ||
2088 | |||
2089 | pch->thread->req[0].desc = NULL; | ||
2090 | pch->thread->req[1].desc = NULL; | ||
2091 | pch->thread->req_running = -1; | ||
2370 | 2092 | ||
2371 | /* Mark all desc done */ | 2093 | /* Mark all desc done */ |
2372 | list_for_each_entry(desc, &pch->submitted_list, node) { | 2094 | list_for_each_entry(desc, &pch->submitted_list, node) { |
@@ -2384,9 +2106,9 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
2384 | dma_cookie_complete(&desc->txd); | 2106 | dma_cookie_complete(&desc->txd); |
2385 | } | 2107 | } |
2386 | 2108 | ||
2387 | list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool); | 2109 | list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); |
2388 | list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); | 2110 | list_splice_tail_init(&pch->work_list, &pl330->desc_pool); |
2389 | list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); | 2111 | list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); |
2390 | spin_unlock_irqrestore(&pch->lock, flags); | 2112 | spin_unlock_irqrestore(&pch->lock, flags); |
2391 | break; | 2113 | break; |
2392 | case DMA_SLAVE_CONFIG: | 2114 | case DMA_SLAVE_CONFIG: |
@@ -2409,7 +2131,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
2409 | } | 2131 | } |
2410 | break; | 2132 | break; |
2411 | default: | 2133 | default: |
2412 | dev_err(pch->dmac->pif.dev, "Not supported command.\n"); | 2134 | dev_err(pch->dmac->ddma.dev, "Not supported command.\n"); |
2413 | return -ENXIO; | 2135 | return -ENXIO; |
2414 | } | 2136 | } |
2415 | 2137 | ||
@@ -2425,8 +2147,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan) | |||
2425 | 2147 | ||
2426 | spin_lock_irqsave(&pch->lock, flags); | 2148 | spin_lock_irqsave(&pch->lock, flags); |
2427 | 2149 | ||
2428 | pl330_release_channel(pch->pl330_chid); | 2150 | pl330_release_channel(pch->thread); |
2429 | pch->pl330_chid = NULL; | 2151 | pch->thread = NULL; |
2430 | 2152 | ||
2431 | if (pch->cyclic) | 2153 | if (pch->cyclic) |
2432 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); | 2154 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); |
@@ -2489,57 +2211,46 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |||
2489 | 2211 | ||
2490 | static inline void _init_desc(struct dma_pl330_desc *desc) | 2212 | static inline void _init_desc(struct dma_pl330_desc *desc) |
2491 | { | 2213 | { |
2492 | desc->req.x = &desc->px; | ||
2493 | desc->req.token = desc; | ||
2494 | desc->rqcfg.swap = SWAP_NO; | 2214 | desc->rqcfg.swap = SWAP_NO; |
2495 | desc->rqcfg.scctl = SCCTRL0; | 2215 | desc->rqcfg.scctl = CCTRL0; |
2496 | desc->rqcfg.dcctl = DCCTRL0; | 2216 | desc->rqcfg.dcctl = CCTRL0; |
2497 | desc->req.cfg = &desc->rqcfg; | ||
2498 | desc->req.xfer_cb = dma_pl330_rqcb; | ||
2499 | desc->txd.tx_submit = pl330_tx_submit; | 2217 | desc->txd.tx_submit = pl330_tx_submit; |
2500 | 2218 | ||
2501 | INIT_LIST_HEAD(&desc->node); | 2219 | INIT_LIST_HEAD(&desc->node); |
2502 | } | 2220 | } |
2503 | 2221 | ||
2504 | /* Returns the number of descriptors added to the DMAC pool */ | 2222 | /* Returns the number of descriptors added to the DMAC pool */ |
2505 | static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) | 2223 | static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) |
2506 | { | 2224 | { |
2507 | struct dma_pl330_desc *desc; | 2225 | struct dma_pl330_desc *desc; |
2508 | unsigned long flags; | 2226 | unsigned long flags; |
2509 | int i; | 2227 | int i; |
2510 | 2228 | ||
2511 | if (!pdmac) | ||
2512 | return 0; | ||
2513 | |||
2514 | desc = kcalloc(count, sizeof(*desc), flg); | 2229 | desc = kcalloc(count, sizeof(*desc), flg); |
2515 | if (!desc) | 2230 | if (!desc) |
2516 | return 0; | 2231 | return 0; |
2517 | 2232 | ||
2518 | spin_lock_irqsave(&pdmac->pool_lock, flags); | 2233 | spin_lock_irqsave(&pl330->pool_lock, flags); |
2519 | 2234 | ||
2520 | for (i = 0; i < count; i++) { | 2235 | for (i = 0; i < count; i++) { |
2521 | _init_desc(&desc[i]); | 2236 | _init_desc(&desc[i]); |
2522 | list_add_tail(&desc[i].node, &pdmac->desc_pool); | 2237 | list_add_tail(&desc[i].node, &pl330->desc_pool); |
2523 | } | 2238 | } |
2524 | 2239 | ||
2525 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | 2240 | spin_unlock_irqrestore(&pl330->pool_lock, flags); |
2526 | 2241 | ||
2527 | return count; | 2242 | return count; |
2528 | } | 2243 | } |
2529 | 2244 | ||
2530 | static struct dma_pl330_desc * | 2245 | static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) |
2531 | pluck_desc(struct dma_pl330_dmac *pdmac) | ||
2532 | { | 2246 | { |
2533 | struct dma_pl330_desc *desc = NULL; | 2247 | struct dma_pl330_desc *desc = NULL; |
2534 | unsigned long flags; | 2248 | unsigned long flags; |
2535 | 2249 | ||
2536 | if (!pdmac) | 2250 | spin_lock_irqsave(&pl330->pool_lock, flags); |
2537 | return NULL; | ||
2538 | |||
2539 | spin_lock_irqsave(&pdmac->pool_lock, flags); | ||
2540 | 2251 | ||
2541 | if (!list_empty(&pdmac->desc_pool)) { | 2252 | if (!list_empty(&pl330->desc_pool)) { |
2542 | desc = list_entry(pdmac->desc_pool.next, | 2253 | desc = list_entry(pl330->desc_pool.next, |
2543 | struct dma_pl330_desc, node); | 2254 | struct dma_pl330_desc, node); |
2544 | 2255 | ||
2545 | list_del_init(&desc->node); | 2256 | list_del_init(&desc->node); |
@@ -2548,29 +2259,29 @@ pluck_desc(struct dma_pl330_dmac *pdmac) | |||
2548 | desc->txd.callback = NULL; | 2259 | desc->txd.callback = NULL; |
2549 | } | 2260 | } |
2550 | 2261 | ||
2551 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | 2262 | spin_unlock_irqrestore(&pl330->pool_lock, flags); |
2552 | 2263 | ||
2553 | return desc; | 2264 | return desc; |
2554 | } | 2265 | } |
2555 | 2266 | ||
2556 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | 2267 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) |
2557 | { | 2268 | { |
2558 | struct dma_pl330_dmac *pdmac = pch->dmac; | 2269 | struct pl330_dmac *pl330 = pch->dmac; |
2559 | u8 *peri_id = pch->chan.private; | 2270 | u8 *peri_id = pch->chan.private; |
2560 | struct dma_pl330_desc *desc; | 2271 | struct dma_pl330_desc *desc; |
2561 | 2272 | ||
2562 | /* Pluck one desc from the pool of DMAC */ | 2273 | /* Pluck one desc from the pool of DMAC */ |
2563 | desc = pluck_desc(pdmac); | 2274 | desc = pluck_desc(pl330); |
2564 | 2275 | ||
2565 | /* If the DMAC pool is empty, alloc new */ | 2276 | /* If the DMAC pool is empty, alloc new */ |
2566 | if (!desc) { | 2277 | if (!desc) { |
2567 | if (!add_desc(pdmac, GFP_ATOMIC, 1)) | 2278 | if (!add_desc(pl330, GFP_ATOMIC, 1)) |
2568 | return NULL; | 2279 | return NULL; |
2569 | 2280 | ||
2570 | /* Try again */ | 2281 | /* Try again */ |
2571 | desc = pluck_desc(pdmac); | 2282 | desc = pluck_desc(pl330); |
2572 | if (!desc) { | 2283 | if (!desc) { |
2573 | dev_err(pch->dmac->pif.dev, | 2284 | dev_err(pch->dmac->ddma.dev, |
2574 | "%s:%d ALERT!\n", __func__, __LINE__); | 2285 | "%s:%d ALERT!\n", __func__, __LINE__); |
2575 | return NULL; | 2286 | return NULL; |
2576 | } | 2287 | } |
@@ -2581,8 +2292,8 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
2581 | desc->txd.cookie = 0; | 2292 | desc->txd.cookie = 0; |
2582 | async_tx_ack(&desc->txd); | 2293 | async_tx_ack(&desc->txd); |
2583 | 2294 | ||
2584 | desc->req.peri = peri_id ? pch->chan.chan_id : 0; | 2295 | desc->peri = peri_id ? pch->chan.chan_id : 0; |
2585 | desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; | 2296 | desc->rqcfg.pcfg = &pch->dmac->pcfg; |
2586 | 2297 | ||
2587 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | 2298 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); |
2588 | 2299 | ||
@@ -2592,7 +2303,6 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |||
2592 | static inline void fill_px(struct pl330_xfer *px, | 2303 | static inline void fill_px(struct pl330_xfer *px, |
2593 | dma_addr_t dst, dma_addr_t src, size_t len) | 2304 | dma_addr_t dst, dma_addr_t src, size_t len) |
2594 | { | 2305 | { |
2595 | px->next = NULL; | ||
2596 | px->bytes = len; | 2306 | px->bytes = len; |
2597 | px->dst_addr = dst; | 2307 | px->dst_addr = dst; |
2598 | px->src_addr = src; | 2308 | px->src_addr = src; |
@@ -2605,7 +2315,7 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, | |||
2605 | struct dma_pl330_desc *desc = pl330_get_desc(pch); | 2315 | struct dma_pl330_desc *desc = pl330_get_desc(pch); |
2606 | 2316 | ||
2607 | if (!desc) { | 2317 | if (!desc) { |
2608 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | 2318 | dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", |
2609 | __func__, __LINE__); | 2319 | __func__, __LINE__); |
2610 | return NULL; | 2320 | return NULL; |
2611 | } | 2321 | } |
@@ -2629,11 +2339,11 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, | |||
2629 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | 2339 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) |
2630 | { | 2340 | { |
2631 | struct dma_pl330_chan *pch = desc->pchan; | 2341 | struct dma_pl330_chan *pch = desc->pchan; |
2632 | struct pl330_info *pi = &pch->dmac->pif; | 2342 | struct pl330_dmac *pl330 = pch->dmac; |
2633 | int burst_len; | 2343 | int burst_len; |
2634 | 2344 | ||
2635 | burst_len = pi->pcfg.data_bus_width / 8; | 2345 | burst_len = pl330->pcfg.data_bus_width / 8; |
2636 | burst_len *= pi->pcfg.data_buf_dep; | 2346 | burst_len *= pl330->pcfg.data_buf_dep; |
2637 | burst_len >>= desc->rqcfg.brst_size; | 2347 | burst_len >>= desc->rqcfg.brst_size; |
2638 | 2348 | ||
2639 | /* src/dst_burst_len can't be more than 16 */ | 2349 | /* src/dst_burst_len can't be more than 16 */ |
@@ -2652,11 +2362,11 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |||
2652 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | 2362 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
2653 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | 2363 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, |
2654 | size_t period_len, enum dma_transfer_direction direction, | 2364 | size_t period_len, enum dma_transfer_direction direction, |
2655 | unsigned long flags, void *context) | 2365 | unsigned long flags) |
2656 | { | 2366 | { |
2657 | struct dma_pl330_desc *desc = NULL, *first = NULL; | 2367 | struct dma_pl330_desc *desc = NULL, *first = NULL; |
2658 | struct dma_pl330_chan *pch = to_pchan(chan); | 2368 | struct dma_pl330_chan *pch = to_pchan(chan); |
2659 | struct dma_pl330_dmac *pdmac = pch->dmac; | 2369 | struct pl330_dmac *pl330 = pch->dmac; |
2660 | unsigned int i; | 2370 | unsigned int i; |
2661 | dma_addr_t dst; | 2371 | dma_addr_t dst; |
2662 | dma_addr_t src; | 2372 | dma_addr_t src; |
@@ -2665,7 +2375,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
2665 | return NULL; | 2375 | return NULL; |
2666 | 2376 | ||
2667 | if (!is_slave_direction(direction)) { | 2377 | if (!is_slave_direction(direction)) { |
2668 | dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", | 2378 | dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", |
2669 | __func__, __LINE__); | 2379 | __func__, __LINE__); |
2670 | return NULL; | 2380 | return NULL; |
2671 | } | 2381 | } |
@@ -2673,23 +2383,23 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
2673 | for (i = 0; i < len / period_len; i++) { | 2383 | for (i = 0; i < len / period_len; i++) { |
2674 | desc = pl330_get_desc(pch); | 2384 | desc = pl330_get_desc(pch); |
2675 | if (!desc) { | 2385 | if (!desc) { |
2676 | dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", | 2386 | dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", |
2677 | __func__, __LINE__); | 2387 | __func__, __LINE__); |
2678 | 2388 | ||
2679 | if (!first) | 2389 | if (!first) |
2680 | return NULL; | 2390 | return NULL; |
2681 | 2391 | ||
2682 | spin_lock_irqsave(&pdmac->pool_lock, flags); | 2392 | spin_lock_irqsave(&pl330->pool_lock, flags); |
2683 | 2393 | ||
2684 | while (!list_empty(&first->node)) { | 2394 | while (!list_empty(&first->node)) { |
2685 | desc = list_entry(first->node.next, | 2395 | desc = list_entry(first->node.next, |
2686 | struct dma_pl330_desc, node); | 2396 | struct dma_pl330_desc, node); |
2687 | list_move_tail(&desc->node, &pdmac->desc_pool); | 2397 | list_move_tail(&desc->node, &pl330->desc_pool); |
2688 | } | 2398 | } |
2689 | 2399 | ||
2690 | list_move_tail(&first->node, &pdmac->desc_pool); | 2400 | list_move_tail(&first->node, &pl330->desc_pool); |
2691 | 2401 | ||
2692 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | 2402 | spin_unlock_irqrestore(&pl330->pool_lock, flags); |
2693 | 2403 | ||
2694 | return NULL; | 2404 | return NULL; |
2695 | } | 2405 | } |
@@ -2698,14 +2408,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
2698 | case DMA_MEM_TO_DEV: | 2408 | case DMA_MEM_TO_DEV: |
2699 | desc->rqcfg.src_inc = 1; | 2409 | desc->rqcfg.src_inc = 1; |
2700 | desc->rqcfg.dst_inc = 0; | 2410 | desc->rqcfg.dst_inc = 0; |
2701 | desc->req.rqtype = MEMTODEV; | ||
2702 | src = dma_addr; | 2411 | src = dma_addr; |
2703 | dst = pch->fifo_addr; | 2412 | dst = pch->fifo_addr; |
2704 | break; | 2413 | break; |
2705 | case DMA_DEV_TO_MEM: | 2414 | case DMA_DEV_TO_MEM: |
2706 | desc->rqcfg.src_inc = 0; | 2415 | desc->rqcfg.src_inc = 0; |
2707 | desc->rqcfg.dst_inc = 1; | 2416 | desc->rqcfg.dst_inc = 1; |
2708 | desc->req.rqtype = DEVTOMEM; | ||
2709 | src = pch->fifo_addr; | 2417 | src = pch->fifo_addr; |
2710 | dst = dma_addr; | 2418 | dst = dma_addr; |
2711 | break; | 2419 | break; |
@@ -2713,6 +2421,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
2713 | break; | 2421 | break; |
2714 | } | 2422 | } |
2715 | 2423 | ||
2424 | desc->rqtype = direction; | ||
2716 | desc->rqcfg.brst_size = pch->burst_sz; | 2425 | desc->rqcfg.brst_size = pch->burst_sz; |
2717 | desc->rqcfg.brst_len = 1; | 2426 | desc->rqcfg.brst_len = 1; |
2718 | fill_px(&desc->px, dst, src, period_len); | 2427 | fill_px(&desc->px, dst, src, period_len); |
@@ -2740,24 +2449,22 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
2740 | { | 2449 | { |
2741 | struct dma_pl330_desc *desc; | 2450 | struct dma_pl330_desc *desc; |
2742 | struct dma_pl330_chan *pch = to_pchan(chan); | 2451 | struct dma_pl330_chan *pch = to_pchan(chan); |
2743 | struct pl330_info *pi; | 2452 | struct pl330_dmac *pl330 = pch->dmac; |
2744 | int burst; | 2453 | int burst; |
2745 | 2454 | ||
2746 | if (unlikely(!pch || !len)) | 2455 | if (unlikely(!pch || !len)) |
2747 | return NULL; | 2456 | return NULL; |
2748 | 2457 | ||
2749 | pi = &pch->dmac->pif; | ||
2750 | |||
2751 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); | 2458 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); |
2752 | if (!desc) | 2459 | if (!desc) |
2753 | return NULL; | 2460 | return NULL; |
2754 | 2461 | ||
2755 | desc->rqcfg.src_inc = 1; | 2462 | desc->rqcfg.src_inc = 1; |
2756 | desc->rqcfg.dst_inc = 1; | 2463 | desc->rqcfg.dst_inc = 1; |
2757 | desc->req.rqtype = MEMTOMEM; | 2464 | desc->rqtype = DMA_MEM_TO_MEM; |
2758 | 2465 | ||
2759 | /* Select max possible burst size */ | 2466 | /* Select max possible burst size */ |
2760 | burst = pi->pcfg.data_bus_width / 8; | 2467 | burst = pl330->pcfg.data_bus_width / 8; |
2761 | 2468 | ||
2762 | while (burst > 1) { | 2469 | while (burst > 1) { |
2763 | if (!(len % burst)) | 2470 | if (!(len % burst)) |
@@ -2776,7 +2483,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
2776 | return &desc->txd; | 2483 | return &desc->txd; |
2777 | } | 2484 | } |
2778 | 2485 | ||
2779 | static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, | 2486 | static void __pl330_giveback_desc(struct pl330_dmac *pl330, |
2780 | struct dma_pl330_desc *first) | 2487 | struct dma_pl330_desc *first) |
2781 | { | 2488 | { |
2782 | unsigned long flags; | 2489 | unsigned long flags; |
@@ -2785,17 +2492,17 @@ static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, | |||
2785 | if (!first) | 2492 | if (!first) |
2786 | return; | 2493 | return; |
2787 | 2494 | ||
2788 | spin_lock_irqsave(&pdmac->pool_lock, flags); | 2495 | spin_lock_irqsave(&pl330->pool_lock, flags); |
2789 | 2496 | ||
2790 | while (!list_empty(&first->node)) { | 2497 | while (!list_empty(&first->node)) { |
2791 | desc = list_entry(first->node.next, | 2498 | desc = list_entry(first->node.next, |
2792 | struct dma_pl330_desc, node); | 2499 | struct dma_pl330_desc, node); |
2793 | list_move_tail(&desc->node, &pdmac->desc_pool); | 2500 | list_move_tail(&desc->node, &pl330->desc_pool); |
2794 | } | 2501 | } |
2795 | 2502 | ||
2796 | list_move_tail(&first->node, &pdmac->desc_pool); | 2503 | list_move_tail(&first->node, &pl330->desc_pool); |
2797 | 2504 | ||
2798 | spin_unlock_irqrestore(&pdmac->pool_lock, flags); | 2505 | spin_unlock_irqrestore(&pl330->pool_lock, flags); |
2799 | } | 2506 | } |
2800 | 2507 | ||
2801 | static struct dma_async_tx_descriptor * | 2508 | static struct dma_async_tx_descriptor * |
@@ -2820,12 +2527,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
2820 | 2527 | ||
2821 | desc = pl330_get_desc(pch); | 2528 | desc = pl330_get_desc(pch); |
2822 | if (!desc) { | 2529 | if (!desc) { |
2823 | struct dma_pl330_dmac *pdmac = pch->dmac; | 2530 | struct pl330_dmac *pl330 = pch->dmac; |
2824 | 2531 | ||
2825 | dev_err(pch->dmac->pif.dev, | 2532 | dev_err(pch->dmac->ddma.dev, |
2826 | "%s:%d Unable to fetch desc\n", | 2533 | "%s:%d Unable to fetch desc\n", |
2827 | __func__, __LINE__); | 2534 | __func__, __LINE__); |
2828 | __pl330_giveback_desc(pdmac, first); | 2535 | __pl330_giveback_desc(pl330, first); |
2829 | 2536 | ||
2830 | return NULL; | 2537 | return NULL; |
2831 | } | 2538 | } |
@@ -2838,19 +2545,18 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
2838 | if (direction == DMA_MEM_TO_DEV) { | 2545 | if (direction == DMA_MEM_TO_DEV) { |
2839 | desc->rqcfg.src_inc = 1; | 2546 | desc->rqcfg.src_inc = 1; |
2840 | desc->rqcfg.dst_inc = 0; | 2547 | desc->rqcfg.dst_inc = 0; |
2841 | desc->req.rqtype = MEMTODEV; | ||
2842 | fill_px(&desc->px, | 2548 | fill_px(&desc->px, |
2843 | addr, sg_dma_address(sg), sg_dma_len(sg)); | 2549 | addr, sg_dma_address(sg), sg_dma_len(sg)); |
2844 | } else { | 2550 | } else { |
2845 | desc->rqcfg.src_inc = 0; | 2551 | desc->rqcfg.src_inc = 0; |
2846 | desc->rqcfg.dst_inc = 1; | 2552 | desc->rqcfg.dst_inc = 1; |
2847 | desc->req.rqtype = DEVTOMEM; | ||
2848 | fill_px(&desc->px, | 2553 | fill_px(&desc->px, |
2849 | sg_dma_address(sg), addr, sg_dma_len(sg)); | 2554 | sg_dma_address(sg), addr, sg_dma_len(sg)); |
2850 | } | 2555 | } |
2851 | 2556 | ||
2852 | desc->rqcfg.brst_size = pch->burst_sz; | 2557 | desc->rqcfg.brst_size = pch->burst_sz; |
2853 | desc->rqcfg.brst_len = 1; | 2558 | desc->rqcfg.brst_len = 1; |
2559 | desc->rqtype = direction; | ||
2854 | } | 2560 | } |
2855 | 2561 | ||
2856 | /* Return the last desc in the chain */ | 2562 | /* Return the last desc in the chain */ |
@@ -2890,9 +2596,9 @@ static int | |||
2890 | pl330_probe(struct amba_device *adev, const struct amba_id *id) | 2596 | pl330_probe(struct amba_device *adev, const struct amba_id *id) |
2891 | { | 2597 | { |
2892 | struct dma_pl330_platdata *pdat; | 2598 | struct dma_pl330_platdata *pdat; |
2893 | struct dma_pl330_dmac *pdmac; | 2599 | struct pl330_config *pcfg; |
2600 | struct pl330_dmac *pl330; | ||
2894 | struct dma_pl330_chan *pch, *_p; | 2601 | struct dma_pl330_chan *pch, *_p; |
2895 | struct pl330_info *pi; | ||
2896 | struct dma_device *pd; | 2602 | struct dma_device *pd; |
2897 | struct resource *res; | 2603 | struct resource *res; |
2898 | int i, ret, irq; | 2604 | int i, ret, irq; |
@@ -2905,30 +2611,27 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2905 | return ret; | 2611 | return ret; |
2906 | 2612 | ||
2907 | /* Allocate a new DMAC and its Channels */ | 2613 | /* Allocate a new DMAC and its Channels */ |
2908 | pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); | 2614 | pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); |
2909 | if (!pdmac) { | 2615 | if (!pl330) { |
2910 | dev_err(&adev->dev, "unable to allocate mem\n"); | 2616 | dev_err(&adev->dev, "unable to allocate mem\n"); |
2911 | return -ENOMEM; | 2617 | return -ENOMEM; |
2912 | } | 2618 | } |
2913 | 2619 | ||
2914 | pi = &pdmac->pif; | 2620 | pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
2915 | pi->dev = &adev->dev; | ||
2916 | pi->pl330_data = NULL; | ||
2917 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; | ||
2918 | 2621 | ||
2919 | res = &adev->res; | 2622 | res = &adev->res; |
2920 | pi->base = devm_ioremap_resource(&adev->dev, res); | 2623 | pl330->base = devm_ioremap_resource(&adev->dev, res); |
2921 | if (IS_ERR(pi->base)) | 2624 | if (IS_ERR(pl330->base)) |
2922 | return PTR_ERR(pi->base); | 2625 | return PTR_ERR(pl330->base); |
2923 | 2626 | ||
2924 | amba_set_drvdata(adev, pdmac); | 2627 | amba_set_drvdata(adev, pl330); |
2925 | 2628 | ||
2926 | for (i = 0; i < AMBA_NR_IRQS; i++) { | 2629 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
2927 | irq = adev->irq[i]; | 2630 | irq = adev->irq[i]; |
2928 | if (irq) { | 2631 | if (irq) { |
2929 | ret = devm_request_irq(&adev->dev, irq, | 2632 | ret = devm_request_irq(&adev->dev, irq, |
2930 | pl330_irq_handler, 0, | 2633 | pl330_irq_handler, 0, |
2931 | dev_name(&adev->dev), pi); | 2634 | dev_name(&adev->dev), pl330); |
2932 | if (ret) | 2635 | if (ret) |
2933 | return ret; | 2636 | return ret; |
2934 | } else { | 2637 | } else { |
@@ -2936,38 +2639,40 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2936 | } | 2639 | } |
2937 | } | 2640 | } |
2938 | 2641 | ||
2939 | pi->pcfg.periph_id = adev->periphid; | 2642 | pcfg = &pl330->pcfg; |
2940 | ret = pl330_add(pi); | 2643 | |
2644 | pcfg->periph_id = adev->periphid; | ||
2645 | ret = pl330_add(pl330); | ||
2941 | if (ret) | 2646 | if (ret) |
2942 | return ret; | 2647 | return ret; |
2943 | 2648 | ||
2944 | INIT_LIST_HEAD(&pdmac->desc_pool); | 2649 | INIT_LIST_HEAD(&pl330->desc_pool); |
2945 | spin_lock_init(&pdmac->pool_lock); | 2650 | spin_lock_init(&pl330->pool_lock); |
2946 | 2651 | ||
2947 | /* Create a descriptor pool of default size */ | 2652 | /* Create a descriptor pool of default size */ |
2948 | if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) | 2653 | if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) |
2949 | dev_warn(&adev->dev, "unable to allocate desc\n"); | 2654 | dev_warn(&adev->dev, "unable to allocate desc\n"); |
2950 | 2655 | ||
2951 | pd = &pdmac->ddma; | 2656 | pd = &pl330->ddma; |
2952 | INIT_LIST_HEAD(&pd->channels); | 2657 | INIT_LIST_HEAD(&pd->channels); |
2953 | 2658 | ||
2954 | /* Initialize channel parameters */ | 2659 | /* Initialize channel parameters */ |
2955 | if (pdat) | 2660 | if (pdat) |
2956 | num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); | 2661 | num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan); |
2957 | else | 2662 | else |
2958 | num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); | 2663 | num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); |
2959 | 2664 | ||
2960 | pdmac->num_peripherals = num_chan; | 2665 | pl330->num_peripherals = num_chan; |
2961 | 2666 | ||
2962 | pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); | 2667 | pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); |
2963 | if (!pdmac->peripherals) { | 2668 | if (!pl330->peripherals) { |
2964 | ret = -ENOMEM; | 2669 | ret = -ENOMEM; |
2965 | dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); | 2670 | dev_err(&adev->dev, "unable to allocate pl330->peripherals\n"); |
2966 | goto probe_err2; | 2671 | goto probe_err2; |
2967 | } | 2672 | } |
2968 | 2673 | ||
2969 | for (i = 0; i < num_chan; i++) { | 2674 | for (i = 0; i < num_chan; i++) { |
2970 | pch = &pdmac->peripherals[i]; | 2675 | pch = &pl330->peripherals[i]; |
2971 | if (!adev->dev.of_node) | 2676 | if (!adev->dev.of_node) |
2972 | pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; | 2677 | pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; |
2973 | else | 2678 | else |
@@ -2977,9 +2682,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2977 | INIT_LIST_HEAD(&pch->work_list); | 2682 | INIT_LIST_HEAD(&pch->work_list); |
2978 | INIT_LIST_HEAD(&pch->completed_list); | 2683 | INIT_LIST_HEAD(&pch->completed_list); |
2979 | spin_lock_init(&pch->lock); | 2684 | spin_lock_init(&pch->lock); |
2980 | pch->pl330_chid = NULL; | 2685 | pch->thread = NULL; |
2981 | pch->chan.device = pd; | 2686 | pch->chan.device = pd; |
2982 | pch->dmac = pdmac; | 2687 | pch->dmac = pl330; |
2983 | 2688 | ||
2984 | /* Add the channel to the DMAC list */ | 2689 | /* Add the channel to the DMAC list */ |
2985 | list_add_tail(&pch->chan.device_node, &pd->channels); | 2690 | list_add_tail(&pch->chan.device_node, &pd->channels); |
@@ -2990,7 +2695,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2990 | pd->cap_mask = pdat->cap_mask; | 2695 | pd->cap_mask = pdat->cap_mask; |
2991 | } else { | 2696 | } else { |
2992 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | 2697 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); |
2993 | if (pi->pcfg.num_peri) { | 2698 | if (pcfg->num_peri) { |
2994 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | 2699 | dma_cap_set(DMA_SLAVE, pd->cap_mask); |
2995 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); | 2700 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); |
2996 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); | 2701 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); |
@@ -3015,14 +2720,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3015 | 2720 | ||
3016 | if (adev->dev.of_node) { | 2721 | if (adev->dev.of_node) { |
3017 | ret = of_dma_controller_register(adev->dev.of_node, | 2722 | ret = of_dma_controller_register(adev->dev.of_node, |
3018 | of_dma_pl330_xlate, pdmac); | 2723 | of_dma_pl330_xlate, pl330); |
3019 | if (ret) { | 2724 | if (ret) { |
3020 | dev_err(&adev->dev, | 2725 | dev_err(&adev->dev, |
3021 | "unable to register DMA to the generic DT DMA helpers\n"); | 2726 | "unable to register DMA to the generic DT DMA helpers\n"); |
3022 | } | 2727 | } |
3023 | } | 2728 | } |
3024 | 2729 | ||
3025 | adev->dev.dma_parms = &pdmac->dma_parms; | 2730 | adev->dev.dma_parms = &pl330->dma_parms; |
3026 | 2731 | ||
3027 | /* | 2732 | /* |
3028 | * This is the limit for transfers with a buswidth of 1, larger | 2733 | * This is the limit for transfers with a buswidth of 1, larger |
@@ -3037,14 +2742,13 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
3037 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); | 2742 | "Loaded driver for PL330 DMAC-%d\n", adev->periphid); |
3038 | dev_info(&adev->dev, | 2743 | dev_info(&adev->dev, |
3039 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", | 2744 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", |
3040 | pi->pcfg.data_buf_dep, | 2745 | pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, |
3041 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | 2746 | pcfg->num_peri, pcfg->num_events); |
3042 | pi->pcfg.num_peri, pi->pcfg.num_events); | ||
3043 | 2747 | ||
3044 | return 0; | 2748 | return 0; |
3045 | probe_err3: | 2749 | probe_err3: |
3046 | /* Idle the DMAC */ | 2750 | /* Idle the DMAC */ |
3047 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | 2751 | list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, |
3048 | chan.device_node) { | 2752 | chan.device_node) { |
3049 | 2753 | ||
3050 | /* Remove the channel */ | 2754 | /* Remove the channel */ |
@@ -3055,27 +2759,23 @@ probe_err3: | |||
3055 | pl330_free_chan_resources(&pch->chan); | 2759 | pl330_free_chan_resources(&pch->chan); |
3056 | } | 2760 | } |
3057 | probe_err2: | 2761 | probe_err2: |
3058 | pl330_del(pi); | 2762 | pl330_del(pl330); |
3059 | 2763 | ||
3060 | return ret; | 2764 | return ret; |
3061 | } | 2765 | } |
3062 | 2766 | ||
3063 | static int pl330_remove(struct amba_device *adev) | 2767 | static int pl330_remove(struct amba_device *adev) |
3064 | { | 2768 | { |
3065 | struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); | 2769 | struct pl330_dmac *pl330 = amba_get_drvdata(adev); |
3066 | struct dma_pl330_chan *pch, *_p; | 2770 | struct dma_pl330_chan *pch, *_p; |
3067 | struct pl330_info *pi; | ||
3068 | |||
3069 | if (!pdmac) | ||
3070 | return 0; | ||
3071 | 2771 | ||
3072 | if (adev->dev.of_node) | 2772 | if (adev->dev.of_node) |
3073 | of_dma_controller_free(adev->dev.of_node); | 2773 | of_dma_controller_free(adev->dev.of_node); |
3074 | 2774 | ||
3075 | dma_async_device_unregister(&pdmac->ddma); | 2775 | dma_async_device_unregister(&pl330->ddma); |
3076 | 2776 | ||
3077 | /* Idle the DMAC */ | 2777 | /* Idle the DMAC */ |
3078 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | 2778 | list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, |
3079 | chan.device_node) { | 2779 | chan.device_node) { |
3080 | 2780 | ||
3081 | /* Remove the channel */ | 2781 | /* Remove the channel */ |
@@ -3086,9 +2786,7 @@ static int pl330_remove(struct amba_device *adev) | |||
3086 | pl330_free_chan_resources(&pch->chan); | 2786 | pl330_free_chan_resources(&pch->chan); |
3087 | } | 2787 | } |
3088 | 2788 | ||
3089 | pi = &pdmac->pif; | 2789 | pl330_del(pl330); |
3090 | |||
3091 | pl330_del(pi); | ||
3092 | 2790 | ||
3093 | return 0; | 2791 | return 0; |
3094 | } | 2792 | } |
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 82c923146e49..7a4bbb0f80a5 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c | |||
@@ -61,12 +61,17 @@ struct bam_desc_hw { | |||
61 | #define DESC_FLAG_INT BIT(15) | 61 | #define DESC_FLAG_INT BIT(15) |
62 | #define DESC_FLAG_EOT BIT(14) | 62 | #define DESC_FLAG_EOT BIT(14) |
63 | #define DESC_FLAG_EOB BIT(13) | 63 | #define DESC_FLAG_EOB BIT(13) |
64 | #define DESC_FLAG_NWD BIT(12) | ||
64 | 65 | ||
65 | struct bam_async_desc { | 66 | struct bam_async_desc { |
66 | struct virt_dma_desc vd; | 67 | struct virt_dma_desc vd; |
67 | 68 | ||
68 | u32 num_desc; | 69 | u32 num_desc; |
69 | u32 xfer_len; | 70 | u32 xfer_len; |
71 | |||
72 | /* transaction flags, EOT|EOB|NWD */ | ||
73 | u16 flags; | ||
74 | |||
70 | struct bam_desc_hw *curr_desc; | 75 | struct bam_desc_hw *curr_desc; |
71 | 76 | ||
72 | enum dma_transfer_direction dir; | 77 | enum dma_transfer_direction dir; |
@@ -490,6 +495,14 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
490 | if (!async_desc) | 495 | if (!async_desc) |
491 | goto err_out; | 496 | goto err_out; |
492 | 497 | ||
498 | if (flags & DMA_PREP_FENCE) | ||
499 | async_desc->flags |= DESC_FLAG_NWD; | ||
500 | |||
501 | if (flags & DMA_PREP_INTERRUPT) | ||
502 | async_desc->flags |= DESC_FLAG_EOT; | ||
503 | else | ||
504 | async_desc->flags |= DESC_FLAG_INT; | ||
505 | |||
493 | async_desc->num_desc = num_alloc; | 506 | async_desc->num_desc = num_alloc; |
494 | async_desc->curr_desc = async_desc->desc; | 507 | async_desc->curr_desc = async_desc->desc; |
495 | async_desc->dir = direction; | 508 | async_desc->dir = direction; |
@@ -793,8 +806,11 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
793 | else | 806 | else |
794 | async_desc->xfer_len = async_desc->num_desc; | 807 | async_desc->xfer_len = async_desc->num_desc; |
795 | 808 | ||
796 | /* set INT on last descriptor */ | 809 | /* set any special flags on the last descriptor */ |
797 | desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; | 810 | if (async_desc->num_desc == async_desc->xfer_len) |
811 | desc[async_desc->xfer_len - 1].flags = async_desc->flags; | ||
812 | else | ||
813 | desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; | ||
798 | 814 | ||
799 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | 815 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { |
800 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | 816 | u32 partial = MAX_DESCRIPTORS - bchan->tail; |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 012520c9fd79..7416572d1e40 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -889,8 +889,7 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( | |||
889 | 889 | ||
890 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( | 890 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( |
891 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, | 891 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, |
892 | enum dma_transfer_direction direction, unsigned long flags, | 892 | enum dma_transfer_direction direction, unsigned long flags) |
893 | void *context) | ||
894 | { | 893 | { |
895 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | 894 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); |
896 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | 895 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 5ebdfbc1051e..4b0ef043729a 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
@@ -612,7 +612,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( | |||
612 | 612 | ||
613 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( | 613 | static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( |
614 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, | 614 | struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, |
615 | enum dma_transfer_direction dir, unsigned long flags, void *context) | 615 | enum dma_transfer_direction dir, unsigned long flags) |
616 | { | 616 | { |
617 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 617 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
618 | struct sa11x0_dma_desc *txd; | 618 | struct sa11x0_dma_desc *txd; |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 0f719816c91b..0349125a2e20 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -2,21 +2,39 @@ | |||
2 | # DMA engine configuration for sh | 2 | # DMA engine configuration for sh |
3 | # | 3 | # |
4 | 4 | ||
5 | # | ||
6 | # DMA Engine Helpers | ||
7 | # | ||
8 | |||
5 | config SH_DMAE_BASE | 9 | config SH_DMAE_BASE |
6 | bool "Renesas SuperH DMA Engine support" | 10 | bool "Renesas SuperH DMA Engine support" |
7 | depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST | 11 | depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST |
12 | depends on !SUPERH || SH_DMA | ||
8 | depends on !SH_DMA_API | 13 | depends on !SH_DMA_API |
9 | default y | 14 | default y |
10 | select DMA_ENGINE | 15 | select DMA_ENGINE |
11 | help | 16 | help |
12 | Enable support for the Renesas SuperH DMA controllers. | 17 | Enable support for the Renesas SuperH DMA controllers. |
13 | 18 | ||
19 | # | ||
20 | # DMA Controllers | ||
21 | # | ||
22 | |||
14 | config SH_DMAE | 23 | config SH_DMAE |
15 | tristate "Renesas SuperH DMAC support" | 24 | tristate "Renesas SuperH DMAC support" |
16 | depends on SH_DMAE_BASE | 25 | depends on SH_DMAE_BASE |
17 | help | 26 | help |
18 | Enable support for the Renesas SuperH DMA controllers. | 27 | Enable support for the Renesas SuperH DMA controllers. |
19 | 28 | ||
29 | if SH_DMAE | ||
30 | |||
31 | config SH_DMAE_R8A73A4 | ||
32 | def_bool y | ||
33 | depends on ARCH_R8A73A4 | ||
34 | depends on OF | ||
35 | |||
36 | endif | ||
37 | |||
20 | config SUDMAC | 38 | config SUDMAC |
21 | tristate "Renesas SUDMAC support" | 39 | tristate "Renesas SUDMAC support" |
22 | depends on SH_DMAE_BASE | 40 | depends on SH_DMAE_BASE |
@@ -34,7 +52,3 @@ config RCAR_AUDMAC_PP | |||
34 | depends on SH_DMAE_BASE | 52 | depends on SH_DMAE_BASE |
35 | help | 53 | help |
36 | Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. | 54 | Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. |
37 | |||
38 | config SHDMA_R8A73A4 | ||
39 | def_bool y | ||
40 | depends on ARCH_R8A73A4 && SH_DMAE != n | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 1ce88b28cfc6..0a5cfdb76e45 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -1,10 +1,18 @@ | |||
1 | # | ||
2 | # DMA Engine Helpers | ||
3 | # | ||
4 | |||
1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o | 5 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o |
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | 6 | |
7 | # | ||
8 | # DMA Controllers | ||
9 | # | ||
10 | |||
3 | shdma-y := shdmac.o | 11 | shdma-y := shdmac.o |
4 | ifeq ($(CONFIG_OF),y) | 12 | shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o |
5 | shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o | ||
6 | endif | ||
7 | shdma-objs := $(shdma-y) | 13 | shdma-objs := $(shdma-y) |
14 | obj-$(CONFIG_SH_DMAE) += shdma.o | ||
15 | |||
8 | obj-$(CONFIG_SUDMAC) += sudmac.o | 16 | obj-$(CONFIG_SUDMAC) += sudmac.o |
9 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | 17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o |
10 | obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o | 18 | obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o |
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index 2de77289a2e9..dabbf0aba2e9 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/of_dma.h> | ||
25 | #include <linux/platform_data/dma-rcar-audmapp.h> | 26 | #include <linux/platform_data/dma-rcar-audmapp.h> |
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/shdma-base.h> | 28 | #include <linux/shdma-base.h> |
@@ -45,8 +46,9 @@ | |||
45 | 46 | ||
46 | struct audmapp_chan { | 47 | struct audmapp_chan { |
47 | struct shdma_chan shdma_chan; | 48 | struct shdma_chan shdma_chan; |
48 | struct audmapp_slave_config *config; | ||
49 | void __iomem *base; | 49 | void __iomem *base; |
50 | dma_addr_t slave_addr; | ||
51 | u32 chcr; | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | struct audmapp_device { | 54 | struct audmapp_device { |
@@ -56,7 +58,16 @@ struct audmapp_device { | |||
56 | void __iomem *chan_reg; | 58 | void __iomem *chan_reg; |
57 | }; | 59 | }; |
58 | 60 | ||
61 | struct audmapp_desc { | ||
62 | struct shdma_desc shdma_desc; | ||
63 | dma_addr_t src; | ||
64 | dma_addr_t dst; | ||
65 | }; | ||
66 | |||
67 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | ||
68 | |||
59 | #define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) | 69 | #define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) |
70 | #define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc) | ||
60 | #define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \ | 71 | #define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \ |
61 | struct audmapp_device, shdma_dev.dma_dev) | 72 | struct audmapp_device, shdma_dev.dma_dev) |
62 | 73 | ||
@@ -90,70 +101,82 @@ static void audmapp_halt(struct shdma_chan *schan) | |||
90 | } | 101 | } |
91 | 102 | ||
92 | static void audmapp_start_xfer(struct shdma_chan *schan, | 103 | static void audmapp_start_xfer(struct shdma_chan *schan, |
93 | struct shdma_desc *sdecs) | 104 | struct shdma_desc *sdesc) |
94 | { | 105 | { |
95 | struct audmapp_chan *auchan = to_chan(schan); | 106 | struct audmapp_chan *auchan = to_chan(schan); |
96 | struct audmapp_device *audev = to_dev(auchan); | 107 | struct audmapp_device *audev = to_dev(auchan); |
97 | struct audmapp_slave_config *cfg = auchan->config; | 108 | struct audmapp_desc *desc = to_desc(sdesc); |
98 | struct device *dev = audev->dev; | 109 | struct device *dev = audev->dev; |
99 | u32 chcr = cfg->chcr | PDMACHCR_DE; | 110 | u32 chcr = auchan->chcr | PDMACHCR_DE; |
100 | 111 | ||
101 | dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n", | 112 | dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n", |
102 | &cfg->src, &cfg->dst, cfg->chcr); | 113 | &desc->src, &desc->dst, chcr); |
103 | 114 | ||
104 | audmapp_write(auchan, cfg->src, PDMASAR); | 115 | audmapp_write(auchan, desc->src, PDMASAR); |
105 | audmapp_write(auchan, cfg->dst, PDMADAR); | 116 | audmapp_write(auchan, desc->dst, PDMADAR); |
106 | audmapp_write(auchan, chcr, PDMACHCR); | 117 | audmapp_write(auchan, chcr, PDMACHCR); |
107 | } | 118 | } |
108 | 119 | ||
109 | static struct audmapp_slave_config * | 120 | static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, |
110 | audmapp_find_slave(struct audmapp_chan *auchan, int slave_id) | 121 | u32 *chcr, dma_addr_t *dst) |
111 | { | 122 | { |
112 | struct audmapp_device *audev = to_dev(auchan); | 123 | struct audmapp_device *audev = to_dev(auchan); |
113 | struct audmapp_pdata *pdata = audev->pdata; | 124 | struct audmapp_pdata *pdata = audev->pdata; |
114 | struct audmapp_slave_config *cfg; | 125 | struct audmapp_slave_config *cfg; |
115 | int i; | 126 | int i; |
116 | 127 | ||
128 | *chcr = 0; | ||
129 | *dst = 0; | ||
130 | |||
131 | if (!pdata) { /* DT */ | ||
132 | *chcr = ((u32)slave_id) << 16; | ||
133 | auchan->shdma_chan.slave_id = (slave_id) >> 8; | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | /* non-DT */ | ||
138 | |||
117 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) | 139 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) |
118 | return NULL; | 140 | return; |
119 | 141 | ||
120 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | 142 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) |
121 | if (cfg->slave_id == slave_id) | 143 | if (cfg->slave_id == slave_id) { |
122 | return cfg; | 144 | *chcr = cfg->chcr; |
123 | 145 | *dst = cfg->dst; | |
124 | return NULL; | 146 | break; |
147 | } | ||
125 | } | 148 | } |
126 | 149 | ||
127 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, | 150 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, |
128 | dma_addr_t slave_addr, bool try) | 151 | dma_addr_t slave_addr, bool try) |
129 | { | 152 | { |
130 | struct audmapp_chan *auchan = to_chan(schan); | 153 | struct audmapp_chan *auchan = to_chan(schan); |
131 | struct audmapp_slave_config *cfg = | 154 | u32 chcr; |
132 | audmapp_find_slave(auchan, slave_id); | 155 | dma_addr_t dst; |
156 | |||
157 | audmapp_get_config(auchan, slave_id, &chcr, &dst); | ||
133 | 158 | ||
134 | if (!cfg) | ||
135 | return -ENODEV; | ||
136 | if (try) | 159 | if (try) |
137 | return 0; | 160 | return 0; |
138 | 161 | ||
139 | auchan->config = cfg; | 162 | auchan->chcr = chcr; |
163 | auchan->slave_addr = slave_addr ? : dst; | ||
140 | 164 | ||
141 | return 0; | 165 | return 0; |
142 | } | 166 | } |
143 | 167 | ||
144 | static int audmapp_desc_setup(struct shdma_chan *schan, | 168 | static int audmapp_desc_setup(struct shdma_chan *schan, |
145 | struct shdma_desc *sdecs, | 169 | struct shdma_desc *sdesc, |
146 | dma_addr_t src, dma_addr_t dst, size_t *len) | 170 | dma_addr_t src, dma_addr_t dst, size_t *len) |
147 | { | 171 | { |
148 | struct audmapp_chan *auchan = to_chan(schan); | 172 | struct audmapp_desc *desc = to_desc(sdesc); |
149 | struct audmapp_slave_config *cfg = auchan->config; | ||
150 | |||
151 | if (!cfg) | ||
152 | return -ENODEV; | ||
153 | 173 | ||
154 | if (*len > (size_t)AUDMAPP_LEN_MAX) | 174 | if (*len > (size_t)AUDMAPP_LEN_MAX) |
155 | *len = (size_t)AUDMAPP_LEN_MAX; | 175 | *len = (size_t)AUDMAPP_LEN_MAX; |
156 | 176 | ||
177 | desc->src = src; | ||
178 | desc->dst = dst; | ||
179 | |||
157 | return 0; | 180 | return 0; |
158 | } | 181 | } |
159 | 182 | ||
@@ -164,7 +187,9 @@ static void audmapp_setup_xfer(struct shdma_chan *schan, | |||
164 | 187 | ||
165 | static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) | 188 | static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) |
166 | { | 189 | { |
167 | return 0; /* always fixed address */ | 190 | struct audmapp_chan *auchan = to_chan(schan); |
191 | |||
192 | return auchan->slave_addr; | ||
168 | } | 193 | } |
169 | 194 | ||
170 | static bool audmapp_channel_busy(struct shdma_chan *schan) | 195 | static bool audmapp_channel_busy(struct shdma_chan *schan) |
@@ -183,7 +208,7 @@ static bool audmapp_desc_completed(struct shdma_chan *schan, | |||
183 | 208 | ||
184 | static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) | 209 | static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) |
185 | { | 210 | { |
186 | return &((struct shdma_desc *)buf)[i]; | 211 | return &((struct audmapp_desc *)buf)[i].shdma_desc; |
187 | } | 212 | } |
188 | 213 | ||
189 | static const struct shdma_ops audmapp_shdma_ops = { | 214 | static const struct shdma_ops audmapp_shdma_ops = { |
@@ -234,16 +259,39 @@ static void audmapp_chan_remove(struct audmapp_device *audev) | |||
234 | dma_dev->chancnt = 0; | 259 | dma_dev->chancnt = 0; |
235 | } | 260 | } |
236 | 261 | ||
262 | static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, | ||
263 | struct of_dma *ofdma) | ||
264 | { | ||
265 | dma_cap_mask_t mask; | ||
266 | struct dma_chan *chan; | ||
267 | u32 chcr = dma_spec->args[0]; | ||
268 | |||
269 | if (dma_spec->args_count != 1) | ||
270 | return NULL; | ||
271 | |||
272 | dma_cap_zero(mask); | ||
273 | dma_cap_set(DMA_SLAVE, mask); | ||
274 | |||
275 | chan = dma_request_channel(mask, shdma_chan_filter, NULL); | ||
276 | if (chan) | ||
277 | to_shdma_chan(chan)->hw_req = chcr; | ||
278 | |||
279 | return chan; | ||
280 | } | ||
281 | |||
237 | static int audmapp_probe(struct platform_device *pdev) | 282 | static int audmapp_probe(struct platform_device *pdev) |
238 | { | 283 | { |
239 | struct audmapp_pdata *pdata = pdev->dev.platform_data; | 284 | struct audmapp_pdata *pdata = pdev->dev.platform_data; |
285 | struct device_node *np = pdev->dev.of_node; | ||
240 | struct audmapp_device *audev; | 286 | struct audmapp_device *audev; |
241 | struct shdma_dev *sdev; | 287 | struct shdma_dev *sdev; |
242 | struct dma_device *dma_dev; | 288 | struct dma_device *dma_dev; |
243 | struct resource *res; | 289 | struct resource *res; |
244 | int err, i; | 290 | int err, i; |
245 | 291 | ||
246 | if (!pdata) | 292 | if (np) |
293 | of_dma_controller_register(np, audmapp_of_xlate, pdev); | ||
294 | else if (!pdata) | ||
247 | return -ENODEV; | 295 | return -ENODEV; |
248 | 296 | ||
249 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 297 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -260,7 +308,7 @@ static int audmapp_probe(struct platform_device *pdev) | |||
260 | 308 | ||
261 | sdev = &audev->shdma_dev; | 309 | sdev = &audev->shdma_dev; |
262 | sdev->ops = &audmapp_shdma_ops; | 310 | sdev->ops = &audmapp_shdma_ops; |
263 | sdev->desc_size = sizeof(struct shdma_desc); | 311 | sdev->desc_size = sizeof(struct audmapp_desc); |
264 | 312 | ||
265 | dma_dev = &sdev->dma_dev; | 313 | dma_dev = &sdev->dma_dev; |
266 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; | 314 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; |
@@ -305,12 +353,18 @@ static int audmapp_remove(struct platform_device *pdev) | |||
305 | return 0; | 353 | return 0; |
306 | } | 354 | } |
307 | 355 | ||
356 | static const struct of_device_id audmapp_of_match[] = { | ||
357 | { .compatible = "renesas,rcar-audmapp", }, | ||
358 | {}, | ||
359 | }; | ||
360 | |||
308 | static struct platform_driver audmapp_driver = { | 361 | static struct platform_driver audmapp_driver = { |
309 | .probe = audmapp_probe, | 362 | .probe = audmapp_probe, |
310 | .remove = audmapp_remove, | 363 | .remove = audmapp_remove, |
311 | .driver = { | 364 | .driver = { |
312 | .owner = THIS_MODULE, | 365 | .owner = THIS_MODULE, |
313 | .name = "rcar-audmapp-engine", | 366 | .name = "rcar-audmapp-engine", |
367 | .of_match_table = audmapp_of_match, | ||
314 | }, | 368 | }, |
315 | }; | 369 | }; |
316 | module_platform_driver(audmapp_driver); | 370 | module_platform_driver(audmapp_driver); |
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h index a2b8258426c9..a1b0ef45d6a2 100644 --- a/drivers/dma/sh/shdma-arm.h +++ b/drivers/dma/sh/shdma-arm.h | |||
@@ -45,7 +45,7 @@ enum { | |||
45 | ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ | 45 | ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ |
46 | (((i) & TS_HI_BIT) << TS_HI_SHIFT)) | 46 | (((i) & TS_HI_BIT) << TS_HI_SHIFT)) |
47 | 47 | ||
48 | #define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) | 48 | #define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz))) |
49 | #define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) | 49 | #define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz))) |
50 | 50 | ||
51 | #endif | 51 | #endif |
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index b35007e21e6b..42d497416196 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
@@ -206,45 +206,6 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, | |||
206 | return 0; | 206 | return 0; |
207 | } | 207 | } |
208 | 208 | ||
209 | /* | ||
210 | * This is the standard shdma filter function to be used as a replacement to the | ||
211 | * "old" method, using the .private pointer. If for some reason you allocate a | ||
212 | * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter | ||
213 | * parameter. If this filter is used, the slave driver, after calling | ||
214 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | ||
215 | * .slave_id, .direction, and either .src_addr or .dst_addr set. | ||
216 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE | ||
217 | * capability! If this becomes a requirement, hardware glue drivers, using this | ||
218 | * services would have to provide their own filters, which first would check | ||
219 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | ||
220 | * this, and only then, in case of a match, call this common filter. | ||
221 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). | ||
222 | * In that case the MID-RID value is used for slave channel filtering and is | ||
223 | * passed to this function in the "arg" parameter. | ||
224 | */ | ||
225 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | ||
226 | { | ||
227 | struct shdma_chan *schan = to_shdma_chan(chan); | ||
228 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | ||
229 | const struct shdma_ops *ops = sdev->ops; | ||
230 | int match = (long)arg; | ||
231 | int ret; | ||
232 | |||
233 | if (match < 0) | ||
234 | /* No slave requested - arbitrary channel */ | ||
235 | return true; | ||
236 | |||
237 | if (!schan->dev->of_node && match >= slave_num) | ||
238 | return false; | ||
239 | |||
240 | ret = ops->set_slave(schan, match, 0, true); | ||
241 | if (ret < 0) | ||
242 | return false; | ||
243 | |||
244 | return true; | ||
245 | } | ||
246 | EXPORT_SYMBOL(shdma_chan_filter); | ||
247 | |||
248 | static int shdma_alloc_chan_resources(struct dma_chan *chan) | 209 | static int shdma_alloc_chan_resources(struct dma_chan *chan) |
249 | { | 210 | { |
250 | struct shdma_chan *schan = to_shdma_chan(chan); | 211 | struct shdma_chan *schan = to_shdma_chan(chan); |
@@ -295,6 +256,51 @@ esetslave: | |||
295 | return ret; | 256 | return ret; |
296 | } | 257 | } |
297 | 258 | ||
259 | /* | ||
260 | * This is the standard shdma filter function to be used as a replacement to the | ||
261 | * "old" method, using the .private pointer. If for some reason you allocate a | ||
262 | * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter | ||
263 | * parameter. If this filter is used, the slave driver, after calling | ||
264 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | ||
265 | * .slave_id, .direction, and either .src_addr or .dst_addr set. | ||
266 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE | ||
267 | * capability! If this becomes a requirement, hardware glue drivers, using this | ||
268 | * services would have to provide their own filters, which first would check | ||
269 | * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do | ||
270 | * this, and only then, in case of a match, call this common filter. | ||
271 | * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). | ||
272 | * In that case the MID-RID value is used for slave channel filtering and is | ||
273 | * passed to this function in the "arg" parameter. | ||
274 | */ | ||
275 | bool shdma_chan_filter(struct dma_chan *chan, void *arg) | ||
276 | { | ||
277 | struct shdma_chan *schan; | ||
278 | struct shdma_dev *sdev; | ||
279 | int match = (long)arg; | ||
280 | int ret; | ||
281 | |||
282 | /* Only support channels handled by this driver. */ | ||
283 | if (chan->device->device_alloc_chan_resources != | ||
284 | shdma_alloc_chan_resources) | ||
285 | return false; | ||
286 | |||
287 | if (match < 0) | ||
288 | /* No slave requested - arbitrary channel */ | ||
289 | return true; | ||
290 | |||
291 | schan = to_shdma_chan(chan); | ||
292 | if (!schan->dev->of_node && match >= slave_num) | ||
293 | return false; | ||
294 | |||
295 | sdev = to_shdma_dev(schan->dma_chan.device); | ||
296 | ret = sdev->ops->set_slave(schan, match, 0, true); | ||
297 | if (ret < 0) | ||
298 | return false; | ||
299 | |||
300 | return true; | ||
301 | } | ||
302 | EXPORT_SYMBOL(shdma_chan_filter); | ||
303 | |||
298 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) | 304 | static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) |
299 | { | 305 | { |
300 | struct shdma_desc *desc, *_desc; | 306 | struct shdma_desc *desc, *_desc; |
@@ -662,15 +668,16 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( | |||
662 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( | 668 | static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( |
663 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 669 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
664 | size_t period_len, enum dma_transfer_direction direction, | 670 | size_t period_len, enum dma_transfer_direction direction, |
665 | unsigned long flags, void *context) | 671 | unsigned long flags) |
666 | { | 672 | { |
667 | struct shdma_chan *schan = to_shdma_chan(chan); | 673 | struct shdma_chan *schan = to_shdma_chan(chan); |
668 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | 674 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
675 | struct dma_async_tx_descriptor *desc; | ||
669 | const struct shdma_ops *ops = sdev->ops; | 676 | const struct shdma_ops *ops = sdev->ops; |
670 | unsigned int sg_len = buf_len / period_len; | 677 | unsigned int sg_len = buf_len / period_len; |
671 | int slave_id = schan->slave_id; | 678 | int slave_id = schan->slave_id; |
672 | dma_addr_t slave_addr; | 679 | dma_addr_t slave_addr; |
673 | struct scatterlist sgl[SHDMA_MAX_SG_LEN]; | 680 | struct scatterlist *sgl; |
674 | int i; | 681 | int i; |
675 | 682 | ||
676 | if (!chan) | 683 | if (!chan) |
@@ -694,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( | |||
694 | 701 | ||
695 | slave_addr = ops->slave_addr(schan); | 702 | slave_addr = ops->slave_addr(schan); |
696 | 703 | ||
704 | /* | ||
705 | * Allocate the sg list dynamically as it would consumer too much stack | ||
706 | * space. | ||
707 | */ | ||
708 | sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL); | ||
709 | if (!sgl) | ||
710 | return NULL; | ||
711 | |||
697 | sg_init_table(sgl, sg_len); | 712 | sg_init_table(sgl, sg_len); |
713 | |||
698 | for (i = 0; i < sg_len; i++) { | 714 | for (i = 0; i < sg_len; i++) { |
699 | dma_addr_t src = buf_addr + (period_len * i); | 715 | dma_addr_t src = buf_addr + (period_len * i); |
700 | 716 | ||
@@ -704,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( | |||
704 | sg_dma_len(&sgl[i]) = period_len; | 720 | sg_dma_len(&sgl[i]) = period_len; |
705 | } | 721 | } |
706 | 722 | ||
707 | return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, | 723 | desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, |
708 | direction, flags, true); | 724 | direction, flags, true); |
725 | |||
726 | kfree(sgl); | ||
727 | return desc; | ||
709 | } | 728 | } |
710 | 729 | ||
711 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 730 | static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index 758a57b51875..2c0a969adc9f 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h | |||
@@ -62,7 +62,7 @@ struct sh_dmae_desc { | |||
62 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ | 62 | #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ |
63 | struct sh_dmae_device, shdma_dev.dma_dev) | 63 | struct sh_dmae_device, shdma_dev.dma_dev) |
64 | 64 | ||
65 | #ifdef CONFIG_SHDMA_R8A73A4 | 65 | #ifdef CONFIG_SH_DMAE_R8A73A4 |
66 | extern const struct sh_dmae_pdata r8a73a4_dma_pdata; | 66 | extern const struct sh_dmae_pdata r8a73a4_dma_pdata; |
67 | #define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) | 67 | #define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) |
68 | #else | 68 | #else |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 146d5df926db..58eb85770eba 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -38,12 +38,12 @@ | |||
38 | #include "../dmaengine.h" | 38 | #include "../dmaengine.h" |
39 | #include "shdma.h" | 39 | #include "shdma.h" |
40 | 40 | ||
41 | /* DMA register */ | 41 | /* DMA registers */ |
42 | #define SAR 0x00 | 42 | #define SAR 0x00 /* Source Address Register */ |
43 | #define DAR 0x04 | 43 | #define DAR 0x04 /* Destination Address Register */ |
44 | #define TCR 0x08 | 44 | #define TCR 0x08 /* Transfer Count Register */ |
45 | #define CHCR 0x0C | 45 | #define CHCR 0x0C /* Channel Control Register */ |
46 | #define DMAOR 0x40 | 46 | #define DMAOR 0x40 /* DMA Operation Register */ |
47 | 47 | ||
48 | #define TEND 0x18 /* USB-DMAC */ | 48 | #define TEND 0x18 /* USB-DMAC */ |
49 | 49 | ||
@@ -239,9 +239,8 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) | |||
239 | { | 239 | { |
240 | /* | 240 | /* |
241 | * Default configuration for dual address memory-memory transfer. | 241 | * Default configuration for dual address memory-memory transfer. |
242 | * 0x400 represents auto-request. | ||
243 | */ | 242 | */ |
244 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | 243 | u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan, |
245 | LOG2_DEFAULT_XFER_SIZE); | 244 | LOG2_DEFAULT_XFER_SIZE); |
246 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | 245 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); |
247 | chcr_write(sh_chan, chcr); | 246 | chcr_write(sh_chan, chcr); |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 03f7820fa333..aac03ab10c54 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -580,7 +580,7 @@ err_dir: | |||
580 | static struct dma_async_tx_descriptor * | 580 | static struct dma_async_tx_descriptor * |
581 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | 581 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, |
582 | size_t buf_len, size_t period_len, | 582 | size_t buf_len, size_t period_len, |
583 | enum dma_transfer_direction direction, unsigned long flags, void *context) | 583 | enum dma_transfer_direction direction, unsigned long flags) |
584 | { | 584 | { |
585 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | 585 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
586 | struct sirfsoc_dma_desc *sdesc = NULL; | 586 | struct sirfsoc_dma_desc *sdesc = NULL; |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c7984459ede7..5fe59335e247 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -2531,8 +2531,7 @@ d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
2531 | static struct dma_async_tx_descriptor * | 2531 | static struct dma_async_tx_descriptor * |
2532 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2532 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2533 | size_t buf_len, size_t period_len, | 2533 | size_t buf_len, size_t period_len, |
2534 | enum dma_transfer_direction direction, unsigned long flags, | 2534 | enum dma_transfer_direction direction, unsigned long flags) |
2535 | void *context) | ||
2536 | { | 2535 | { |
2537 | unsigned int periods = buf_len / period_len; | 2536 | unsigned int periods = buf_len / period_len; |
2538 | struct dma_async_tx_descriptor *txd; | 2537 | struct dma_async_tx_descriptor *txd; |
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c new file mode 100644 index 000000000000..1f92a56fd2b6 --- /dev/null +++ b/drivers/dma/sun6i-dma.c | |||
@@ -0,0 +1,1053 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd | ||
3 | * Author: Sugar <shuge@allwinnertech.com> | ||
4 | * | ||
5 | * Copyright (C) 2014 Maxime Ripard | ||
6 | * Maxime Ripard <maxime.ripard@free-electrons.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/clk.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/dmapool.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/of_dma.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/reset.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | #include "virt-dma.h" | ||
27 | |||
28 | /* | ||
29 | * There's 16 physical channels that can work in parallel. | ||
30 | * | ||
31 | * However we have 30 different endpoints for our requests. | ||
32 | * | ||
33 | * Since the channels are able to handle only an unidirectional | ||
34 | * transfer, we need to allocate more virtual channels so that | ||
35 | * everyone can grab one channel. | ||
36 | * | ||
37 | * Some devices can't work in both direction (mostly because it | ||
38 | * wouldn't make sense), so we have a bit fewer virtual channels than | ||
39 | * 2 channels per endpoints. | ||
40 | */ | ||
41 | |||
42 | #define NR_MAX_CHANNELS 16 | ||
43 | #define NR_MAX_REQUESTS 30 | ||
44 | #define NR_MAX_VCHANS 53 | ||
45 | |||
46 | /* | ||
47 | * Common registers | ||
48 | */ | ||
49 | #define DMA_IRQ_EN(x) ((x) * 0x04) | ||
50 | #define DMA_IRQ_HALF BIT(0) | ||
51 | #define DMA_IRQ_PKG BIT(1) | ||
52 | #define DMA_IRQ_QUEUE BIT(2) | ||
53 | |||
54 | #define DMA_IRQ_CHAN_NR 8 | ||
55 | #define DMA_IRQ_CHAN_WIDTH 4 | ||
56 | |||
57 | |||
58 | #define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10) | ||
59 | |||
60 | #define DMA_STAT 0x30 | ||
61 | |||
62 | /* | ||
63 | * Channels specific registers | ||
64 | */ | ||
65 | #define DMA_CHAN_ENABLE 0x00 | ||
66 | #define DMA_CHAN_ENABLE_START BIT(0) | ||
67 | #define DMA_CHAN_ENABLE_STOP 0 | ||
68 | |||
69 | #define DMA_CHAN_PAUSE 0x04 | ||
70 | #define DMA_CHAN_PAUSE_PAUSE BIT(1) | ||
71 | #define DMA_CHAN_PAUSE_RESUME 0 | ||
72 | |||
73 | #define DMA_CHAN_LLI_ADDR 0x08 | ||
74 | |||
75 | #define DMA_CHAN_CUR_CFG 0x0c | ||
76 | #define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f) | ||
77 | #define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) | ||
78 | #define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) | ||
79 | #define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7) | ||
80 | #define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) | ||
81 | |||
82 | #define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) | ||
83 | #define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) | ||
84 | #define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) | ||
85 | #define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16) | ||
86 | #define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) | ||
87 | |||
88 | #define DMA_CHAN_CUR_SRC 0x10 | ||
89 | |||
90 | #define DMA_CHAN_CUR_DST 0x14 | ||
91 | |||
92 | #define DMA_CHAN_CUR_CNT 0x18 | ||
93 | |||
94 | #define DMA_CHAN_CUR_PARA 0x1c | ||
95 | |||
96 | |||
97 | /* | ||
98 | * Various hardware related defines | ||
99 | */ | ||
100 | #define LLI_LAST_ITEM 0xfffff800 | ||
101 | #define NORMAL_WAIT 8 | ||
102 | #define DRQ_SDRAM 1 | ||
103 | |||
104 | /* | ||
105 | * Hardware representation of the LLI | ||
106 | * | ||
107 | * The hardware will be fed the physical address of this structure, | ||
108 | * and read its content in order to start the transfer. | ||
109 | */ | ||
110 | struct sun6i_dma_lli { | ||
111 | u32 cfg; | ||
112 | u32 src; | ||
113 | u32 dst; | ||
114 | u32 len; | ||
115 | u32 para; | ||
116 | u32 p_lli_next; | ||
117 | |||
118 | /* | ||
119 | * This field is not used by the DMA controller, but will be | ||
120 | * used by the CPU to go through the list (mostly for dumping | ||
121 | * or freeing it). | ||
122 | */ | ||
123 | struct sun6i_dma_lli *v_lli_next; | ||
124 | }; | ||
125 | |||
126 | |||
127 | struct sun6i_desc { | ||
128 | struct virt_dma_desc vd; | ||
129 | dma_addr_t p_lli; | ||
130 | struct sun6i_dma_lli *v_lli; | ||
131 | }; | ||
132 | |||
133 | struct sun6i_pchan { | ||
134 | u32 idx; | ||
135 | void __iomem *base; | ||
136 | struct sun6i_vchan *vchan; | ||
137 | struct sun6i_desc *desc; | ||
138 | struct sun6i_desc *done; | ||
139 | }; | ||
140 | |||
141 | struct sun6i_vchan { | ||
142 | struct virt_dma_chan vc; | ||
143 | struct list_head node; | ||
144 | struct dma_slave_config cfg; | ||
145 | struct sun6i_pchan *phy; | ||
146 | u8 port; | ||
147 | }; | ||
148 | |||
149 | struct sun6i_dma_dev { | ||
150 | struct dma_device slave; | ||
151 | void __iomem *base; | ||
152 | struct clk *clk; | ||
153 | int irq; | ||
154 | spinlock_t lock; | ||
155 | struct reset_control *rstc; | ||
156 | struct tasklet_struct task; | ||
157 | atomic_t tasklet_shutdown; | ||
158 | struct list_head pending; | ||
159 | struct dma_pool *pool; | ||
160 | struct sun6i_pchan *pchans; | ||
161 | struct sun6i_vchan *vchans; | ||
162 | }; | ||
163 | |||
164 | static struct device *chan2dev(struct dma_chan *chan) | ||
165 | { | ||
166 | return &chan->dev->device; | ||
167 | } | ||
168 | |||
169 | static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d) | ||
170 | { | ||
171 | return container_of(d, struct sun6i_dma_dev, slave); | ||
172 | } | ||
173 | |||
174 | static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan) | ||
175 | { | ||
176 | return container_of(chan, struct sun6i_vchan, vc.chan); | ||
177 | } | ||
178 | |||
179 | static inline struct sun6i_desc * | ||
180 | to_sun6i_desc(struct dma_async_tx_descriptor *tx) | ||
181 | { | ||
182 | return container_of(tx, struct sun6i_desc, vd.tx); | ||
183 | } | ||
184 | |||
185 | static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) | ||
186 | { | ||
187 | dev_dbg(sdev->slave.dev, "Common register:\n" | ||
188 | "\tmask0(%04x): 0x%08x\n" | ||
189 | "\tmask1(%04x): 0x%08x\n" | ||
190 | "\tpend0(%04x): 0x%08x\n" | ||
191 | "\tpend1(%04x): 0x%08x\n" | ||
192 | "\tstats(%04x): 0x%08x\n", | ||
193 | DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)), | ||
194 | DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)), | ||
195 | DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)), | ||
196 | DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)), | ||
197 | DMA_STAT, readl(sdev->base + DMA_STAT)); | ||
198 | } | ||
199 | |||
200 | static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, | ||
201 | struct sun6i_pchan *pchan) | ||
202 | { | ||
203 | phys_addr_t reg = virt_to_phys(pchan->base); | ||
204 | |||
205 | dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" | ||
206 | "\t___en(%04x): \t0x%08x\n" | ||
207 | "\tpause(%04x): \t0x%08x\n" | ||
208 | "\tstart(%04x): \t0x%08x\n" | ||
209 | "\t__cfg(%04x): \t0x%08x\n" | ||
210 | "\t__src(%04x): \t0x%08x\n" | ||
211 | "\t__dst(%04x): \t0x%08x\n" | ||
212 | "\tcount(%04x): \t0x%08x\n" | ||
213 | "\t_para(%04x): \t0x%08x\n\n", | ||
214 | pchan->idx, ®, | ||
215 | DMA_CHAN_ENABLE, | ||
216 | readl(pchan->base + DMA_CHAN_ENABLE), | ||
217 | DMA_CHAN_PAUSE, | ||
218 | readl(pchan->base + DMA_CHAN_PAUSE), | ||
219 | DMA_CHAN_LLI_ADDR, | ||
220 | readl(pchan->base + DMA_CHAN_LLI_ADDR), | ||
221 | DMA_CHAN_CUR_CFG, | ||
222 | readl(pchan->base + DMA_CHAN_CUR_CFG), | ||
223 | DMA_CHAN_CUR_SRC, | ||
224 | readl(pchan->base + DMA_CHAN_CUR_SRC), | ||
225 | DMA_CHAN_CUR_DST, | ||
226 | readl(pchan->base + DMA_CHAN_CUR_DST), | ||
227 | DMA_CHAN_CUR_CNT, | ||
228 | readl(pchan->base + DMA_CHAN_CUR_CNT), | ||
229 | DMA_CHAN_CUR_PARA, | ||
230 | readl(pchan->base + DMA_CHAN_CUR_PARA)); | ||
231 | } | ||
232 | |||
233 | static inline int convert_burst(u32 maxburst, u8 *burst) | ||
234 | { | ||
235 | switch (maxburst) { | ||
236 | case 1: | ||
237 | *burst = 0; | ||
238 | break; | ||
239 | case 8: | ||
240 | *burst = 2; | ||
241 | break; | ||
242 | default: | ||
243 | return -EINVAL; | ||
244 | } | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) | ||
250 | { | ||
251 | if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || | ||
252 | (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
253 | return -EINVAL; | ||
254 | |||
255 | *width = addr_width >> 1; | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, | ||
260 | struct sun6i_dma_lli *next, | ||
261 | dma_addr_t next_phy, | ||
262 | struct sun6i_desc *txd) | ||
263 | { | ||
264 | if ((!prev && !txd) || !next) | ||
265 | return NULL; | ||
266 | |||
267 | if (!prev) { | ||
268 | txd->p_lli = next_phy; | ||
269 | txd->v_lli = next; | ||
270 | } else { | ||
271 | prev->p_lli_next = next_phy; | ||
272 | prev->v_lli_next = next; | ||
273 | } | ||
274 | |||
275 | next->p_lli_next = LLI_LAST_ITEM; | ||
276 | next->v_lli_next = NULL; | ||
277 | |||
278 | return next; | ||
279 | } | ||
280 | |||
281 | static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, | ||
282 | dma_addr_t src, | ||
283 | dma_addr_t dst, u32 len, | ||
284 | struct dma_slave_config *config) | ||
285 | { | ||
286 | u8 src_width, dst_width, src_burst, dst_burst; | ||
287 | int ret; | ||
288 | |||
289 | if (!config) | ||
290 | return -EINVAL; | ||
291 | |||
292 | ret = convert_burst(config->src_maxburst, &src_burst); | ||
293 | if (ret) | ||
294 | return ret; | ||
295 | |||
296 | ret = convert_burst(config->dst_maxburst, &dst_burst); | ||
297 | if (ret) | ||
298 | return ret; | ||
299 | |||
300 | ret = convert_buswidth(config->src_addr_width, &src_width); | ||
301 | if (ret) | ||
302 | return ret; | ||
303 | |||
304 | ret = convert_buswidth(config->dst_addr_width, &dst_width); | ||
305 | if (ret) | ||
306 | return ret; | ||
307 | |||
308 | lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | | ||
309 | DMA_CHAN_CFG_SRC_WIDTH(src_width) | | ||
310 | DMA_CHAN_CFG_DST_BURST(dst_burst) | | ||
311 | DMA_CHAN_CFG_DST_WIDTH(dst_width); | ||
312 | |||
313 | lli->src = src; | ||
314 | lli->dst = dst; | ||
315 | lli->len = len; | ||
316 | lli->para = NORMAL_WAIT; | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, | ||
322 | struct sun6i_dma_lli *lli) | ||
323 | { | ||
324 | phys_addr_t p_lli = virt_to_phys(lli); | ||
325 | |||
326 | dev_dbg(chan2dev(&vchan->vc.chan), | ||
327 | "\n\tdesc: p - %pa v - 0x%p\n" | ||
328 | "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" | ||
329 | "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", | ||
330 | &p_lli, lli, | ||
331 | lli->cfg, lli->src, lli->dst, | ||
332 | lli->len, lli->para, lli->p_lli_next); | ||
333 | } | ||
334 | |||
335 | static void sun6i_dma_free_desc(struct virt_dma_desc *vd) | ||
336 | { | ||
337 | struct sun6i_desc *txd = to_sun6i_desc(&vd->tx); | ||
338 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device); | ||
339 | struct sun6i_dma_lli *v_lli, *v_next; | ||
340 | dma_addr_t p_lli, p_next; | ||
341 | |||
342 | if (unlikely(!txd)) | ||
343 | return; | ||
344 | |||
345 | p_lli = txd->p_lli; | ||
346 | v_lli = txd->v_lli; | ||
347 | |||
348 | while (v_lli) { | ||
349 | v_next = v_lli->v_lli_next; | ||
350 | p_next = v_lli->p_lli_next; | ||
351 | |||
352 | dma_pool_free(sdev->pool, v_lli, p_lli); | ||
353 | |||
354 | v_lli = v_next; | ||
355 | p_lli = p_next; | ||
356 | } | ||
357 | |||
358 | kfree(txd); | ||
359 | } | ||
360 | |||
361 | static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan) | ||
362 | { | ||
363 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); | ||
364 | struct sun6i_pchan *pchan = vchan->phy; | ||
365 | unsigned long flags; | ||
366 | LIST_HEAD(head); | ||
367 | |||
368 | spin_lock(&sdev->lock); | ||
369 | list_del_init(&vchan->node); | ||
370 | spin_unlock(&sdev->lock); | ||
371 | |||
372 | spin_lock_irqsave(&vchan->vc.lock, flags); | ||
373 | |||
374 | vchan_get_all_descriptors(&vchan->vc, &head); | ||
375 | |||
376 | if (pchan) { | ||
377 | writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); | ||
378 | writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); | ||
379 | |||
380 | vchan->phy = NULL; | ||
381 | pchan->vchan = NULL; | ||
382 | pchan->desc = NULL; | ||
383 | pchan->done = NULL; | ||
384 | } | ||
385 | |||
386 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | ||
387 | |||
388 | vchan_dma_desc_free_list(&vchan->vc, &head); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) | ||
394 | { | ||
395 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); | ||
396 | struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc); | ||
397 | struct sun6i_pchan *pchan = vchan->phy; | ||
398 | u32 irq_val, irq_reg, irq_offset; | ||
399 | |||
400 | if (!pchan) | ||
401 | return -EAGAIN; | ||
402 | |||
403 | if (!desc) { | ||
404 | pchan->desc = NULL; | ||
405 | pchan->done = NULL; | ||
406 | return -EAGAIN; | ||
407 | } | ||
408 | |||
409 | list_del(&desc->node); | ||
410 | |||
411 | pchan->desc = to_sun6i_desc(&desc->tx); | ||
412 | pchan->done = NULL; | ||
413 | |||
414 | sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); | ||
415 | |||
416 | irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; | ||
417 | irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; | ||
418 | |||
419 | irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset)); | ||
420 | irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH); | ||
421 | writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset)); | ||
422 | |||
423 | writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); | ||
424 | writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); | ||
425 | |||
426 | sun6i_dma_dump_com_regs(sdev); | ||
427 | sun6i_dma_dump_chan_regs(sdev, pchan); | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static void sun6i_dma_tasklet(unsigned long data) | ||
433 | { | ||
434 | struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; | ||
435 | struct sun6i_vchan *vchan; | ||
436 | struct sun6i_pchan *pchan; | ||
437 | unsigned int pchan_alloc = 0; | ||
438 | unsigned int pchan_idx; | ||
439 | |||
440 | list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) { | ||
441 | spin_lock_irq(&vchan->vc.lock); | ||
442 | |||
443 | pchan = vchan->phy; | ||
444 | |||
445 | if (pchan && pchan->done) { | ||
446 | if (sun6i_dma_start_desc(vchan)) { | ||
447 | /* | ||
448 | * No current txd associated with this channel | ||
449 | */ | ||
450 | dev_dbg(sdev->slave.dev, "pchan %u: free\n", | ||
451 | pchan->idx); | ||
452 | |||
453 | /* Mark this channel free */ | ||
454 | vchan->phy = NULL; | ||
455 | pchan->vchan = NULL; | ||
456 | } | ||
457 | } | ||
458 | spin_unlock_irq(&vchan->vc.lock); | ||
459 | } | ||
460 | |||
461 | spin_lock_irq(&sdev->lock); | ||
462 | for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { | ||
463 | pchan = &sdev->pchans[pchan_idx]; | ||
464 | |||
465 | if (pchan->vchan || list_empty(&sdev->pending)) | ||
466 | continue; | ||
467 | |||
468 | vchan = list_first_entry(&sdev->pending, | ||
469 | struct sun6i_vchan, node); | ||
470 | |||
471 | /* Remove from pending channels */ | ||
472 | list_del_init(&vchan->node); | ||
473 | pchan_alloc |= BIT(pchan_idx); | ||
474 | |||
475 | /* Mark this channel allocated */ | ||
476 | pchan->vchan = vchan; | ||
477 | vchan->phy = pchan; | ||
478 | dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n", | ||
479 | pchan->idx, &vchan->vc); | ||
480 | } | ||
481 | spin_unlock_irq(&sdev->lock); | ||
482 | |||
483 | for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { | ||
484 | if (!(pchan_alloc & BIT(pchan_idx))) | ||
485 | continue; | ||
486 | |||
487 | pchan = sdev->pchans + pchan_idx; | ||
488 | vchan = pchan->vchan; | ||
489 | if (vchan) { | ||
490 | spin_lock_irq(&vchan->vc.lock); | ||
491 | sun6i_dma_start_desc(vchan); | ||
492 | spin_unlock_irq(&vchan->vc.lock); | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | |||
497 | static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) | ||
498 | { | ||
499 | struct sun6i_dma_dev *sdev = dev_id; | ||
500 | struct sun6i_vchan *vchan; | ||
501 | struct sun6i_pchan *pchan; | ||
502 | int i, j, ret = IRQ_NONE; | ||
503 | u32 status; | ||
504 | |||
505 | for (i = 0; i < 2; i++) { | ||
506 | status = readl(sdev->base + DMA_IRQ_STAT(i)); | ||
507 | if (!status) | ||
508 | continue; | ||
509 | |||
510 | dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n", | ||
511 | i ? "high" : "low", status); | ||
512 | |||
513 | writel(status, sdev->base + DMA_IRQ_STAT(i)); | ||
514 | |||
515 | for (j = 0; (j < 8) && status; j++) { | ||
516 | if (status & DMA_IRQ_QUEUE) { | ||
517 | pchan = sdev->pchans + j; | ||
518 | vchan = pchan->vchan; | ||
519 | |||
520 | if (vchan) { | ||
521 | spin_lock(&vchan->vc.lock); | ||
522 | vchan_cookie_complete(&pchan->desc->vd); | ||
523 | pchan->done = pchan->desc; | ||
524 | spin_unlock(&vchan->vc.lock); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | status = status >> 4; | ||
529 | } | ||
530 | |||
531 | if (!atomic_read(&sdev->tasklet_shutdown)) | ||
532 | tasklet_schedule(&sdev->task); | ||
533 | ret = IRQ_HANDLED; | ||
534 | } | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | |||
539 | static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( | ||
540 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
541 | size_t len, unsigned long flags) | ||
542 | { | ||
543 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
544 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
545 | struct dma_slave_config *sconfig = &vchan->cfg; | ||
546 | struct sun6i_dma_lli *v_lli; | ||
547 | struct sun6i_desc *txd; | ||
548 | dma_addr_t p_lli; | ||
549 | int ret; | ||
550 | |||
551 | dev_dbg(chan2dev(chan), | ||
552 | "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", | ||
553 | __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags); | ||
554 | |||
555 | if (!len) | ||
556 | return NULL; | ||
557 | |||
558 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | ||
559 | if (!txd) | ||
560 | return NULL; | ||
561 | |||
562 | v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); | ||
563 | if (!v_lli) { | ||
564 | dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); | ||
565 | goto err_txd_free; | ||
566 | } | ||
567 | |||
568 | ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); | ||
569 | if (ret) | ||
570 | goto err_dma_free; | ||
571 | |||
572 | v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | ||
573 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | ||
574 | DMA_CHAN_CFG_DST_LINEAR_MODE | | ||
575 | DMA_CHAN_CFG_SRC_LINEAR_MODE; | ||
576 | |||
577 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); | ||
578 | |||
579 | sun6i_dma_dump_lli(vchan, v_lli); | ||
580 | |||
581 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | ||
582 | |||
583 | err_dma_free: | ||
584 | dma_pool_free(sdev->pool, v_lli, p_lli); | ||
585 | err_txd_free: | ||
586 | kfree(txd); | ||
587 | return NULL; | ||
588 | } | ||
589 | |||
590 | static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( | ||
591 | struct dma_chan *chan, struct scatterlist *sgl, | ||
592 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
593 | unsigned long flags, void *context) | ||
594 | { | ||
595 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
596 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
597 | struct dma_slave_config *sconfig = &vchan->cfg; | ||
598 | struct sun6i_dma_lli *v_lli, *prev = NULL; | ||
599 | struct sun6i_desc *txd; | ||
600 | struct scatterlist *sg; | ||
601 | dma_addr_t p_lli; | ||
602 | int i, ret; | ||
603 | |||
604 | if (!sgl) | ||
605 | return NULL; | ||
606 | |||
607 | if (!is_slave_direction(dir)) { | ||
608 | dev_err(chan2dev(chan), "Invalid DMA direction\n"); | ||
609 | return NULL; | ||
610 | } | ||
611 | |||
612 | txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | ||
613 | if (!txd) | ||
614 | return NULL; | ||
615 | |||
616 | for_each_sg(sgl, sg, sg_len, i) { | ||
617 | v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); | ||
618 | if (!v_lli) | ||
619 | goto err_lli_free; | ||
620 | |||
621 | if (dir == DMA_MEM_TO_DEV) { | ||
622 | ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg), | ||
623 | sconfig->dst_addr, sg_dma_len(sg), | ||
624 | sconfig); | ||
625 | if (ret) | ||
626 | goto err_cur_lli_free; | ||
627 | |||
628 | v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE | | ||
629 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | ||
630 | DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | ||
631 | DMA_CHAN_CFG_DST_DRQ(vchan->port); | ||
632 | |||
633 | dev_dbg(chan2dev(chan), | ||
634 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", | ||
635 | __func__, vchan->vc.chan.chan_id, | ||
636 | &sconfig->dst_addr, &sg_dma_address(sg), | ||
637 | sg_dma_len(sg), flags); | ||
638 | |||
639 | } else { | ||
640 | ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr, | ||
641 | sg_dma_address(sg), sg_dma_len(sg), | ||
642 | sconfig); | ||
643 | if (ret) | ||
644 | goto err_cur_lli_free; | ||
645 | |||
646 | v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE | | ||
647 | DMA_CHAN_CFG_SRC_IO_MODE | | ||
648 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | ||
649 | DMA_CHAN_CFG_SRC_DRQ(vchan->port); | ||
650 | |||
651 | dev_dbg(chan2dev(chan), | ||
652 | "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", | ||
653 | __func__, vchan->vc.chan.chan_id, | ||
654 | &sg_dma_address(sg), &sconfig->src_addr, | ||
655 | sg_dma_len(sg), flags); | ||
656 | } | ||
657 | |||
658 | prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); | ||
659 | } | ||
660 | |||
661 | dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); | ||
662 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) | ||
663 | sun6i_dma_dump_lli(vchan, prev); | ||
664 | |||
665 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); | ||
666 | |||
667 | err_cur_lli_free: | ||
668 | dma_pool_free(sdev->pool, v_lli, p_lli); | ||
669 | err_lli_free: | ||
670 | for (prev = txd->v_lli; prev; prev = prev->v_lli_next) | ||
671 | dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); | ||
672 | kfree(txd); | ||
673 | return NULL; | ||
674 | } | ||
675 | |||
676 | static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
677 | unsigned long arg) | ||
678 | { | ||
679 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
680 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
681 | struct sun6i_pchan *pchan = vchan->phy; | ||
682 | unsigned long flags; | ||
683 | int ret = 0; | ||
684 | |||
685 | switch (cmd) { | ||
686 | case DMA_RESUME: | ||
687 | dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); | ||
688 | |||
689 | spin_lock_irqsave(&vchan->vc.lock, flags); | ||
690 | |||
691 | if (pchan) { | ||
692 | writel(DMA_CHAN_PAUSE_RESUME, | ||
693 | pchan->base + DMA_CHAN_PAUSE); | ||
694 | } else if (!list_empty(&vchan->vc.desc_issued)) { | ||
695 | spin_lock(&sdev->lock); | ||
696 | list_add_tail(&vchan->node, &sdev->pending); | ||
697 | spin_unlock(&sdev->lock); | ||
698 | } | ||
699 | |||
700 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | ||
701 | break; | ||
702 | |||
703 | case DMA_PAUSE: | ||
704 | dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); | ||
705 | |||
706 | if (pchan) { | ||
707 | writel(DMA_CHAN_PAUSE_PAUSE, | ||
708 | pchan->base + DMA_CHAN_PAUSE); | ||
709 | } else { | ||
710 | spin_lock(&sdev->lock); | ||
711 | list_del_init(&vchan->node); | ||
712 | spin_unlock(&sdev->lock); | ||
713 | } | ||
714 | break; | ||
715 | |||
716 | case DMA_TERMINATE_ALL: | ||
717 | ret = sun6i_dma_terminate_all(vchan); | ||
718 | break; | ||
719 | case DMA_SLAVE_CONFIG: | ||
720 | memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); | ||
721 | break; | ||
722 | default: | ||
723 | ret = -ENXIO; | ||
724 | break; | ||
725 | } | ||
726 | return ret; | ||
727 | } | ||
728 | |||
729 | static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, | ||
730 | dma_cookie_t cookie, | ||
731 | struct dma_tx_state *state) | ||
732 | { | ||
733 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
734 | struct sun6i_pchan *pchan = vchan->phy; | ||
735 | struct sun6i_dma_lli *lli; | ||
736 | struct virt_dma_desc *vd; | ||
737 | struct sun6i_desc *txd; | ||
738 | enum dma_status ret; | ||
739 | unsigned long flags; | ||
740 | size_t bytes = 0; | ||
741 | |||
742 | ret = dma_cookie_status(chan, cookie, state); | ||
743 | if (ret == DMA_COMPLETE) | ||
744 | return ret; | ||
745 | |||
746 | spin_lock_irqsave(&vchan->vc.lock, flags); | ||
747 | |||
748 | vd = vchan_find_desc(&vchan->vc, cookie); | ||
749 | txd = to_sun6i_desc(&vd->tx); | ||
750 | |||
751 | if (vd) { | ||
752 | for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next) | ||
753 | bytes += lli->len; | ||
754 | } else if (!pchan || !pchan->desc) { | ||
755 | bytes = 0; | ||
756 | } else { | ||
757 | bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); | ||
758 | } | ||
759 | |||
760 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | ||
761 | |||
762 | dma_set_residue(state, bytes); | ||
763 | |||
764 | return ret; | ||
765 | } | ||
766 | |||
767 | static void sun6i_dma_issue_pending(struct dma_chan *chan) | ||
768 | { | ||
769 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
770 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
771 | unsigned long flags; | ||
772 | |||
773 | spin_lock_irqsave(&vchan->vc.lock, flags); | ||
774 | |||
775 | if (vchan_issue_pending(&vchan->vc)) { | ||
776 | spin_lock(&sdev->lock); | ||
777 | |||
778 | if (!vchan->phy && list_empty(&vchan->node)) { | ||
779 | list_add_tail(&vchan->node, &sdev->pending); | ||
780 | tasklet_schedule(&sdev->task); | ||
781 | dev_dbg(chan2dev(chan), "vchan %p: issued\n", | ||
782 | &vchan->vc); | ||
783 | } | ||
784 | |||
785 | spin_unlock(&sdev->lock); | ||
786 | } else { | ||
787 | dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n", | ||
788 | &vchan->vc); | ||
789 | } | ||
790 | |||
791 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | ||
792 | } | ||
793 | |||
794 | static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan) | ||
795 | { | ||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | static void sun6i_dma_free_chan_resources(struct dma_chan *chan) | ||
800 | { | ||
801 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | ||
802 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); | ||
803 | unsigned long flags; | ||
804 | |||
805 | spin_lock_irqsave(&sdev->lock, flags); | ||
806 | list_del_init(&vchan->node); | ||
807 | spin_unlock_irqrestore(&sdev->lock, flags); | ||
808 | |||
809 | vchan_free_chan_resources(&vchan->vc); | ||
810 | } | ||
811 | |||
812 | static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, | ||
813 | struct of_dma *ofdma) | ||
814 | { | ||
815 | struct sun6i_dma_dev *sdev = ofdma->of_dma_data; | ||
816 | struct sun6i_vchan *vchan; | ||
817 | struct dma_chan *chan; | ||
818 | u8 port = dma_spec->args[0]; | ||
819 | |||
820 | if (port > NR_MAX_REQUESTS) | ||
821 | return NULL; | ||
822 | |||
823 | chan = dma_get_any_slave_channel(&sdev->slave); | ||
824 | if (!chan) | ||
825 | return NULL; | ||
826 | |||
827 | vchan = to_sun6i_vchan(chan); | ||
828 | vchan->port = port; | ||
829 | |||
830 | return chan; | ||
831 | } | ||
832 | |||
833 | static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev) | ||
834 | { | ||
835 | /* Disable all interrupts from DMA */ | ||
836 | writel(0, sdev->base + DMA_IRQ_EN(0)); | ||
837 | writel(0, sdev->base + DMA_IRQ_EN(1)); | ||
838 | |||
839 | /* Prevent spurious interrupts from scheduling the tasklet */ | ||
840 | atomic_inc(&sdev->tasklet_shutdown); | ||
841 | |||
842 | /* Make sure we won't have any further interrupts */ | ||
843 | devm_free_irq(sdev->slave.dev, sdev->irq, sdev); | ||
844 | |||
845 | /* Actually prevent the tasklet from being scheduled */ | ||
846 | tasklet_kill(&sdev->task); | ||
847 | } | ||
848 | |||
849 | static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) | ||
850 | { | ||
851 | int i; | ||
852 | |||
853 | for (i = 0; i < NR_MAX_VCHANS; i++) { | ||
854 | struct sun6i_vchan *vchan = &sdev->vchans[i]; | ||
855 | |||
856 | list_del(&vchan->vc.chan.device_node); | ||
857 | tasklet_kill(&vchan->vc.task); | ||
858 | } | ||
859 | } | ||
860 | |||
861 | static int sun6i_dma_probe(struct platform_device *pdev) | ||
862 | { | ||
863 | struct sun6i_dma_dev *sdc; | ||
864 | struct resource *res; | ||
865 | struct clk *mux, *pll6; | ||
866 | int ret, i; | ||
867 | |||
868 | sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); | ||
869 | if (!sdc) | ||
870 | return -ENOMEM; | ||
871 | |||
872 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
873 | sdc->base = devm_ioremap_resource(&pdev->dev, res); | ||
874 | if (IS_ERR(sdc->base)) | ||
875 | return PTR_ERR(sdc->base); | ||
876 | |||
877 | sdc->irq = platform_get_irq(pdev, 0); | ||
878 | if (sdc->irq < 0) { | ||
879 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | ||
880 | return sdc->irq; | ||
881 | } | ||
882 | |||
883 | sdc->clk = devm_clk_get(&pdev->dev, NULL); | ||
884 | if (IS_ERR(sdc->clk)) { | ||
885 | dev_err(&pdev->dev, "No clock specified\n"); | ||
886 | return PTR_ERR(sdc->clk); | ||
887 | } | ||
888 | |||
889 | mux = clk_get(NULL, "ahb1_mux"); | ||
890 | if (IS_ERR(mux)) { | ||
891 | dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n"); | ||
892 | return PTR_ERR(mux); | ||
893 | } | ||
894 | |||
895 | pll6 = clk_get(NULL, "pll6"); | ||
896 | if (IS_ERR(pll6)) { | ||
897 | dev_err(&pdev->dev, "Couldn't get PLL6\n"); | ||
898 | clk_put(mux); | ||
899 | return PTR_ERR(pll6); | ||
900 | } | ||
901 | |||
902 | ret = clk_set_parent(mux, pll6); | ||
903 | clk_put(pll6); | ||
904 | clk_put(mux); | ||
905 | |||
906 | if (ret) { | ||
907 | dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n"); | ||
908 | return ret; | ||
909 | } | ||
910 | |||
911 | sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); | ||
912 | if (IS_ERR(sdc->rstc)) { | ||
913 | dev_err(&pdev->dev, "No reset controller specified\n"); | ||
914 | return PTR_ERR(sdc->rstc); | ||
915 | } | ||
916 | |||
917 | sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | ||
918 | sizeof(struct sun6i_dma_lli), 4, 0); | ||
919 | if (!sdc->pool) { | ||
920 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | ||
921 | return -ENOMEM; | ||
922 | } | ||
923 | |||
924 | platform_set_drvdata(pdev, sdc); | ||
925 | INIT_LIST_HEAD(&sdc->pending); | ||
926 | spin_lock_init(&sdc->lock); | ||
927 | |||
928 | dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); | ||
929 | dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); | ||
930 | dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); | ||
931 | |||
932 | INIT_LIST_HEAD(&sdc->slave.channels); | ||
933 | sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources; | ||
934 | sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; | ||
935 | sdc->slave.device_tx_status = sun6i_dma_tx_status; | ||
936 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; | ||
937 | sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; | ||
938 | sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; | ||
939 | sdc->slave.device_control = sun6i_dma_control; | ||
940 | sdc->slave.chancnt = NR_MAX_VCHANS; | ||
941 | |||
942 | sdc->slave.dev = &pdev->dev; | ||
943 | |||
944 | sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, | ||
945 | sizeof(struct sun6i_pchan), GFP_KERNEL); | ||
946 | if (!sdc->pchans) | ||
947 | return -ENOMEM; | ||
948 | |||
949 | sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, | ||
950 | sizeof(struct sun6i_vchan), GFP_KERNEL); | ||
951 | if (!sdc->vchans) | ||
952 | return -ENOMEM; | ||
953 | |||
954 | tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); | ||
955 | |||
956 | for (i = 0; i < NR_MAX_CHANNELS; i++) { | ||
957 | struct sun6i_pchan *pchan = &sdc->pchans[i]; | ||
958 | |||
959 | pchan->idx = i; | ||
960 | pchan->base = sdc->base + 0x100 + i * 0x40; | ||
961 | } | ||
962 | |||
963 | for (i = 0; i < NR_MAX_VCHANS; i++) { | ||
964 | struct sun6i_vchan *vchan = &sdc->vchans[i]; | ||
965 | |||
966 | INIT_LIST_HEAD(&vchan->node); | ||
967 | vchan->vc.desc_free = sun6i_dma_free_desc; | ||
968 | vchan_init(&vchan->vc, &sdc->slave); | ||
969 | } | ||
970 | |||
971 | ret = reset_control_deassert(sdc->rstc); | ||
972 | if (ret) { | ||
973 | dev_err(&pdev->dev, "Couldn't deassert the device from reset\n"); | ||
974 | goto err_chan_free; | ||
975 | } | ||
976 | |||
977 | ret = clk_prepare_enable(sdc->clk); | ||
978 | if (ret) { | ||
979 | dev_err(&pdev->dev, "Couldn't enable the clock\n"); | ||
980 | goto err_reset_assert; | ||
981 | } | ||
982 | |||
983 | ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, | ||
984 | dev_name(&pdev->dev), sdc); | ||
985 | if (ret) { | ||
986 | dev_err(&pdev->dev, "Cannot request IRQ\n"); | ||
987 | goto err_clk_disable; | ||
988 | } | ||
989 | |||
990 | ret = dma_async_device_register(&sdc->slave); | ||
991 | if (ret) { | ||
992 | dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); | ||
993 | goto err_irq_disable; | ||
994 | } | ||
995 | |||
996 | ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate, | ||
997 | sdc); | ||
998 | if (ret) { | ||
999 | dev_err(&pdev->dev, "of_dma_controller_register failed\n"); | ||
1000 | goto err_dma_unregister; | ||
1001 | } | ||
1002 | |||
1003 | return 0; | ||
1004 | |||
1005 | err_dma_unregister: | ||
1006 | dma_async_device_unregister(&sdc->slave); | ||
1007 | err_irq_disable: | ||
1008 | sun6i_kill_tasklet(sdc); | ||
1009 | err_clk_disable: | ||
1010 | clk_disable_unprepare(sdc->clk); | ||
1011 | err_reset_assert: | ||
1012 | reset_control_assert(sdc->rstc); | ||
1013 | err_chan_free: | ||
1014 | sun6i_dma_free(sdc); | ||
1015 | return ret; | ||
1016 | } | ||
1017 | |||
1018 | static int sun6i_dma_remove(struct platform_device *pdev) | ||
1019 | { | ||
1020 | struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev); | ||
1021 | |||
1022 | of_dma_controller_free(pdev->dev.of_node); | ||
1023 | dma_async_device_unregister(&sdc->slave); | ||
1024 | |||
1025 | sun6i_kill_tasklet(sdc); | ||
1026 | |||
1027 | clk_disable_unprepare(sdc->clk); | ||
1028 | reset_control_assert(sdc->rstc); | ||
1029 | |||
1030 | sun6i_dma_free(sdc); | ||
1031 | |||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | static struct of_device_id sun6i_dma_match[] = { | ||
1036 | { .compatible = "allwinner,sun6i-a31-dma" }, | ||
1037 | { /* sentinel */ } | ||
1038 | }; | ||
1039 | |||
1040 | static struct platform_driver sun6i_dma_driver = { | ||
1041 | .probe = sun6i_dma_probe, | ||
1042 | .remove = sun6i_dma_remove, | ||
1043 | .driver = { | ||
1044 | .name = "sun6i-dma", | ||
1045 | .of_match_table = sun6i_dma_match, | ||
1046 | }, | ||
1047 | }; | ||
1048 | module_platform_driver(sun6i_dma_driver); | ||
1049 | |||
1050 | MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver"); | ||
1051 | MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>"); | ||
1052 | MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); | ||
1053 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 03ad64ecaaf0..16efa603ff65 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -1055,7 +1055,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( | |||
1055 | static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( | 1055 | static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( |
1056 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, | 1056 | struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, |
1057 | size_t period_len, enum dma_transfer_direction direction, | 1057 | size_t period_len, enum dma_transfer_direction direction, |
1058 | unsigned long flags, void *context) | 1058 | unsigned long flags) |
1059 | { | 1059 | { |
1060 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); | 1060 | struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); |
1061 | struct tegra_dma_desc *dma_desc = NULL; | 1061 | struct tegra_dma_desc *dma_desc = NULL; |
diff --git a/include/dt-bindings/dma/nbpfaxi.h b/include/dt-bindings/dma/nbpfaxi.h new file mode 100644 index 000000000000..c1a5b9e0d6a4 --- /dev/null +++ b/include/dt-bindings/dma/nbpfaxi.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. | ||
3 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of version 2 of the GNU General Public License as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef DT_BINDINGS_NBPFAXI_H | ||
11 | #define DT_BINDINGS_NBPFAXI_H | ||
12 | |||
13 | /** | ||
14 | * Use "#dma-cells = <2>;" with the second integer defining slave DMA flags: | ||
15 | */ | ||
16 | #define NBPF_SLAVE_RQ_HIGH 1 | ||
17 | #define NBPF_SLAVE_RQ_LOW 2 | ||
18 | #define NBPF_SLAVE_RQ_LEVEL 4 | ||
19 | |||
20 | #endif | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 3d1c2aa51530..1f9e642c66ad 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -37,7 +37,6 @@ | |||
37 | */ | 37 | */ |
38 | typedef s32 dma_cookie_t; | 38 | typedef s32 dma_cookie_t; |
39 | #define DMA_MIN_COOKIE 1 | 39 | #define DMA_MIN_COOKIE 1 |
40 | #define DMA_MAX_COOKIE INT_MAX | ||
41 | 40 | ||
42 | static inline int dma_submit_error(dma_cookie_t cookie) | 41 | static inline int dma_submit_error(dma_cookie_t cookie) |
43 | { | 42 | { |
@@ -671,7 +670,7 @@ struct dma_device { | |||
671 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( | 670 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( |
672 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 671 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
673 | size_t period_len, enum dma_transfer_direction direction, | 672 | size_t period_len, enum dma_transfer_direction direction, |
674 | unsigned long flags, void *context); | 673 | unsigned long flags); |
675 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | 674 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( |
676 | struct dma_chan *chan, struct dma_interleaved_template *xt, | 675 | struct dma_chan *chan, struct dma_interleaved_template *xt, |
677 | unsigned long flags); | 676 | unsigned long flags); |
@@ -746,7 +745,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( | |||
746 | unsigned long flags) | 745 | unsigned long flags) |
747 | { | 746 | { |
748 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, | 747 | return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, |
749 | period_len, dir, flags, NULL); | 748 | period_len, dir, flags); |
750 | } | 749 | } |
751 | 750 | ||
752 | static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( | 751 | static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index ae36298ba076..56bc026c143f 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
@@ -41,6 +41,8 @@ extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
41 | const char *name); | 41 | const char *name); |
42 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 42 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
43 | struct of_dma *ofdma); | 43 | struct of_dma *ofdma); |
44 | extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, | ||
45 | struct of_dma *ofdma); | ||
44 | #else | 46 | #else |
45 | static inline int of_dma_controller_register(struct device_node *np, | 47 | static inline int of_dma_controller_register(struct device_node *np, |
46 | struct dma_chan *(*of_dma_xlate) | 48 | struct dma_chan *(*of_dma_xlate) |
@@ -66,6 +68,8 @@ static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_s | |||
66 | return NULL; | 68 | return NULL; |
67 | } | 69 | } |
68 | 70 | ||
71 | #define of_dma_xlate_by_chan_id NULL | ||
72 | |||
69 | #endif | 73 | #endif |
70 | 74 | ||
71 | #endif /* __LINUX_OF_DMA_H */ | 75 | #endif /* __LINUX_OF_DMA_H */ |
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index d05542aafa3e..6a1357d31871 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h | |||
@@ -40,6 +40,7 @@ enum sdma_peripheral_type { | |||
40 | IMX_DMATYPE_ASRC, /* ASRC */ | 40 | IMX_DMATYPE_ASRC, /* ASRC */ |
41 | IMX_DMATYPE_ESAI, /* ESAI */ | 41 | IMX_DMATYPE_ESAI, /* ESAI */ |
42 | IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ | 42 | IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ |
43 | IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | enum imx_dma_prio { | 46 | enum imx_dma_prio { |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index eb8d5627d080..bdb2710e2aab 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
@@ -150,6 +150,8 @@ void edma_clear_event(unsigned channel); | |||
150 | void edma_pause(unsigned channel); | 150 | void edma_pause(unsigned channel); |
151 | void edma_resume(unsigned channel); | 151 | void edma_resume(unsigned channel); |
152 | 152 | ||
153 | void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no); | ||
154 | |||
153 | struct edma_rsv_info { | 155 | struct edma_rsv_info { |
154 | 156 | ||
155 | const s16 (*rsv_chans)[2]; | 157 | const s16 (*rsv_chans)[2]; |
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index b7b43b82231e..56b97eed28a4 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h | |||
@@ -95,19 +95,21 @@ struct sh_dmae_pdata { | |||
95 | }; | 95 | }; |
96 | 96 | ||
97 | /* DMAOR definitions */ | 97 | /* DMAOR definitions */ |
98 | #define DMAOR_AE 0x00000004 | 98 | #define DMAOR_AE 0x00000004 /* Address Error Flag */ |
99 | #define DMAOR_NMIF 0x00000002 | 99 | #define DMAOR_NMIF 0x00000002 |
100 | #define DMAOR_DME 0x00000001 | 100 | #define DMAOR_DME 0x00000001 /* DMA Master Enable */ |
101 | 101 | ||
102 | /* Definitions for the SuperH DMAC */ | 102 | /* Definitions for the SuperH DMAC */ |
103 | #define DM_INC 0x00004000 | 103 | #define DM_INC 0x00004000 /* Destination addresses are incremented */ |
104 | #define DM_DEC 0x00008000 | 104 | #define DM_DEC 0x00008000 /* Destination addresses are decremented */ |
105 | #define DM_FIX 0x0000c000 | 105 | #define DM_FIX 0x0000c000 /* Destination address is fixed */ |
106 | #define SM_INC 0x00001000 | 106 | #define SM_INC 0x00001000 /* Source addresses are incremented */ |
107 | #define SM_DEC 0x00002000 | 107 | #define SM_DEC 0x00002000 /* Source addresses are decremented */ |
108 | #define SM_FIX 0x00003000 | 108 | #define SM_FIX 0x00003000 /* Source address is fixed */ |
109 | #define CHCR_DE 0x00000001 | 109 | #define RS_AUTO 0x00000400 /* Auto Request */ |
110 | #define CHCR_TE 0x00000002 | 110 | #define RS_ERS 0x00000800 /* DMA extended resource selector */ |
111 | #define CHCR_IE 0x00000004 | 111 | #define CHCR_DE 0x00000001 /* DMA Enable */ |
112 | #define CHCR_TE 0x00000002 /* Transfer End Flag */ | ||
113 | #define CHCR_IE 0x00000004 /* Interrupt Enable */ | ||
112 | 114 | ||
113 | #endif | 115 | #endif |