diff options
-rw-r--r-- | drivers/dma/Makefile | 2 | ||||
-rw-r--r-- | drivers/dma/sh/Kconfig | 14 | ||||
-rw-r--r-- | drivers/dma/sh/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/sh/rcar-dmac.c | 1503 |
4 files changed, 1518 insertions, 2 deletions
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2022b5451377..b290e6a611d0 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | |||
19 | obj-$(CONFIG_AT_XDMAC) += at_xdmac.o | 19 | obj-$(CONFIG_AT_XDMAC) += at_xdmac.o |
20 | obj-$(CONFIG_MX3_IPU) += ipu/ | 20 | obj-$(CONFIG_MX3_IPU) += ipu/ |
21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
22 | obj-$(CONFIG_SH_DMAE_BASE) += sh/ | 22 | obj-$(CONFIG_RENESAS_DMA) += sh/ |
23 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 23 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
24 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 24 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
25 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 25 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 0349125a2e20..8190ad225a1b 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -2,6 +2,10 @@ | |||
2 | # DMA engine configuration for sh | 2 | # DMA engine configuration for sh |
3 | # | 3 | # |
4 | 4 | ||
5 | config RENESAS_DMA | ||
6 | bool | ||
7 | select DMA_ENGINE | ||
8 | |||
5 | # | 9 | # |
6 | # DMA Engine Helpers | 10 | # DMA Engine Helpers |
7 | # | 11 | # |
@@ -12,7 +16,7 @@ config SH_DMAE_BASE | |||
12 | depends on !SUPERH || SH_DMA | 16 | depends on !SUPERH || SH_DMA |
13 | depends on !SH_DMA_API | 17 | depends on !SH_DMA_API |
14 | default y | 18 | default y |
15 | select DMA_ENGINE | 19 | select RENESAS_DMA |
16 | help | 20 | help |
17 | Enable support for the Renesas SuperH DMA controllers. | 21 | Enable support for the Renesas SuperH DMA controllers. |
18 | 22 | ||
@@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP | |||
52 | depends on SH_DMAE_BASE | 56 | depends on SH_DMAE_BASE |
53 | help | 57 | help |
54 | Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. | 58 | Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. |
59 | |||
60 | config RCAR_DMAC | ||
61 | tristate "Renesas R-Car Gen2 DMA Controller" | ||
62 | depends on ARCH_SHMOBILE || COMPILE_TEST | ||
63 | select RENESAS_DMA | ||
64 | help | ||
65 | This driver supports the general purpose DMA controller found in the | ||
66 | Renesas R-Car second generation SoCs. | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 0a5cfdb76e45..2852f9db61a4 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o | |||
16 | obj-$(CONFIG_SUDMAC) += sudmac.o | 16 | obj-$(CONFIG_SUDMAC) += sudmac.o |
17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | 17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o |
18 | obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o | 18 | obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o |
19 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o | ||
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c new file mode 100644 index 000000000000..89d40f9730ba --- /dev/null +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -0,0 +1,1503 @@ | |||
1 | /* | ||
2 | * Renesas R-Car Gen2 DMA Controller Driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Renesas Electronics Inc. | ||
5 | * | ||
6 | * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> | ||
7 | * | ||
8 | * This is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/mutex.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/of_dma.h> | ||
20 | #include <linux/of_platform.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/pm_runtime.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | |||
26 | #include "../dmaengine.h" | ||
27 | |||
28 | /* | ||
29 | * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer | ||
30 | * @node: entry in the parent's chunks list | ||
31 | * @src_addr: device source address | ||
32 | * @dst_addr: device destination address | ||
33 | * @size: transfer size in bytes | ||
34 | */ | ||
35 | struct rcar_dmac_xfer_chunk { | ||
36 | struct list_head node; | ||
37 | |||
38 | dma_addr_t src_addr; | ||
39 | dma_addr_t dst_addr; | ||
40 | u32 size; | ||
41 | }; | ||
42 | |||
43 | /* | ||
44 | * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor | ||
45 | * @async_tx: base DMA asynchronous transaction descriptor | ||
46 | * @direction: direction of the DMA transfer | ||
47 | * @xfer_shift: log2 of the transfer size | ||
48 | * @chcr: value of the channel configuration register for this transfer | ||
49 | * @node: entry in the channel's descriptors lists | ||
50 | * @chunks: list of transfer chunks for this transfer | ||
51 | * @running: the transfer chunk being currently processed | ||
52 | * @size: transfer size in bytes | ||
53 | * @cyclic: when set indicates that the DMA transfer is cyclic | ||
54 | */ | ||
55 | struct rcar_dmac_desc { | ||
56 | struct dma_async_tx_descriptor async_tx; | ||
57 | enum dma_transfer_direction direction; | ||
58 | unsigned int xfer_shift; | ||
59 | u32 chcr; | ||
60 | |||
61 | struct list_head node; | ||
62 | struct list_head chunks; | ||
63 | struct rcar_dmac_xfer_chunk *running; | ||
64 | |||
65 | unsigned int size; | ||
66 | bool cyclic; | ||
67 | }; | ||
68 | |||
69 | #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) | ||
70 | |||
71 | /* | ||
72 | * struct rcar_dmac_desc_page - One page worth of descriptors | ||
73 | * @node: entry in the channel's pages list | ||
74 | * @descs: array of DMA descriptors | ||
75 | * @chunks: array of transfer chunk descriptors | ||
76 | */ | ||
77 | struct rcar_dmac_desc_page { | ||
78 | struct list_head node; | ||
79 | |||
80 | union { | ||
81 | struct rcar_dmac_desc descs[0]; | ||
82 | struct rcar_dmac_xfer_chunk chunks[0]; | ||
83 | }; | ||
84 | }; | ||
85 | |||
86 | #define RCAR_DMAC_DESCS_PER_PAGE \ | ||
87 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ | ||
88 | sizeof(struct rcar_dmac_desc)) | ||
89 | #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ | ||
90 | ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ | ||
91 | sizeof(struct rcar_dmac_xfer_chunk)) | ||
92 | |||
93 | /* | ||
94 | * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel | ||
95 | * @chan: base DMA channel object | ||
96 | * @iomem: channel I/O memory base | ||
97 | * @index: index of this channel in the controller | ||
98 | * @src_xfer_size: size (in bytes) of hardware transfers on the source side | ||
99 | * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side | ||
100 | * @src_slave_addr: slave source memory address | ||
101 | * @dst_slave_addr: slave destination memory address | ||
102 | * @mid_rid: hardware MID/RID for the DMA client using this channel | ||
103 | * @lock: protects the channel CHCR register and the desc members | ||
104 | * @desc.free: list of free descriptors | ||
105 | * @desc.pending: list of pending descriptors (submitted with tx_submit) | ||
106 | * @desc.active: list of active descriptors (activated with issue_pending) | ||
107 | * @desc.done: list of completed descriptors | ||
108 | * @desc.wait: list of descriptors waiting for an ack | ||
109 | * @desc.running: the descriptor being processed (a member of the active list) | ||
110 | * @desc.chunks_free: list of free transfer chunk descriptors | ||
111 | * @desc.pages: list of pages used by allocated descriptors | ||
112 | */ | ||
113 | struct rcar_dmac_chan { | ||
114 | struct dma_chan chan; | ||
115 | void __iomem *iomem; | ||
116 | unsigned int index; | ||
117 | |||
118 | unsigned int src_xfer_size; | ||
119 | unsigned int dst_xfer_size; | ||
120 | dma_addr_t src_slave_addr; | ||
121 | dma_addr_t dst_slave_addr; | ||
122 | int mid_rid; | ||
123 | |||
124 | spinlock_t lock; | ||
125 | |||
126 | struct { | ||
127 | struct list_head free; | ||
128 | struct list_head pending; | ||
129 | struct list_head active; | ||
130 | struct list_head done; | ||
131 | struct list_head wait; | ||
132 | struct rcar_dmac_desc *running; | ||
133 | |||
134 | struct list_head chunks_free; | ||
135 | |||
136 | struct list_head pages; | ||
137 | } desc; | ||
138 | }; | ||
139 | |||
140 | #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) | ||
141 | |||
142 | /* | ||
143 | * struct rcar_dmac - R-Car Gen2 DMA Controller | ||
144 | * @engine: base DMA engine object | ||
145 | * @dev: the hardware device | ||
146 | * @iomem: remapped I/O memory base | ||
147 | * @n_channels: number of available channels | ||
148 | * @channels: array of DMAC channels | ||
149 | * @modules: bitmask of client modules in use | ||
150 | */ | ||
151 | struct rcar_dmac { | ||
152 | struct dma_device engine; | ||
153 | struct device *dev; | ||
154 | void __iomem *iomem; | ||
155 | |||
156 | unsigned int n_channels; | ||
157 | struct rcar_dmac_chan *channels; | ||
158 | |||
159 | unsigned long modules[256 / BITS_PER_LONG]; | ||
160 | }; | ||
161 | |||
162 | #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) | ||
163 | |||
164 | /* ----------------------------------------------------------------------------- | ||
165 | * Registers | ||
166 | */ | ||
167 | |||
168 | #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i)) | ||
169 | |||
170 | #define RCAR_DMAISTA 0x0020 | ||
171 | #define RCAR_DMASEC 0x0030 | ||
172 | #define RCAR_DMAOR 0x0060 | ||
173 | #define RCAR_DMAOR_PRI_FIXED (0 << 8) | ||
174 | #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) | ||
175 | #define RCAR_DMAOR_AE (1 << 2) | ||
176 | #define RCAR_DMAOR_DME (1 << 0) | ||
177 | #define RCAR_DMACHCLR 0x0080 | ||
178 | #define RCAR_DMADPSEC 0x00a0 | ||
179 | |||
180 | #define RCAR_DMASAR 0x0000 | ||
181 | #define RCAR_DMADAR 0x0004 | ||
182 | #define RCAR_DMATCR 0x0008 | ||
183 | #define RCAR_DMATCR_MASK 0x00ffffff | ||
184 | #define RCAR_DMATSR 0x0028 | ||
185 | #define RCAR_DMACHCR 0x000c | ||
186 | #define RCAR_DMACHCR_CAE (1 << 31) | ||
187 | #define RCAR_DMACHCR_CAIE (1 << 30) | ||
188 | #define RCAR_DMACHCR_DPM_DISABLED (0 << 28) | ||
189 | #define RCAR_DMACHCR_DPM_ENABLED (1 << 28) | ||
190 | #define RCAR_DMACHCR_DPM_REPEAT (2 << 28) | ||
191 | #define RCAR_DMACHCR_DPM_INFINITE (3 << 28) | ||
192 | #define RCAR_DMACHCR_RPT_SAR (1 << 27) | ||
193 | #define RCAR_DMACHCR_RPT_DAR (1 << 26) | ||
194 | #define RCAR_DMACHCR_RPT_TCR (1 << 25) | ||
195 | #define RCAR_DMACHCR_DPB (1 << 22) | ||
196 | #define RCAR_DMACHCR_DSE (1 << 19) | ||
197 | #define RCAR_DMACHCR_DSIE (1 << 18) | ||
198 | #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) | ||
199 | #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) | ||
200 | #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) | ||
201 | #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) | ||
202 | #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) | ||
203 | #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) | ||
204 | #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) | ||
205 | #define RCAR_DMACHCR_DM_FIXED (0 << 14) | ||
206 | #define RCAR_DMACHCR_DM_INC (1 << 14) | ||
207 | #define RCAR_DMACHCR_DM_DEC (2 << 14) | ||
208 | #define RCAR_DMACHCR_SM_FIXED (0 << 12) | ||
209 | #define RCAR_DMACHCR_SM_INC (1 << 12) | ||
210 | #define RCAR_DMACHCR_SM_DEC (2 << 12) | ||
211 | #define RCAR_DMACHCR_RS_AUTO (4 << 8) | ||
212 | #define RCAR_DMACHCR_RS_DMARS (8 << 8) | ||
213 | #define RCAR_DMACHCR_IE (1 << 2) | ||
214 | #define RCAR_DMACHCR_TE (1 << 1) | ||
215 | #define RCAR_DMACHCR_DE (1 << 0) | ||
216 | #define RCAR_DMATCRB 0x0018 | ||
217 | #define RCAR_DMATSRB 0x0038 | ||
218 | #define RCAR_DMACHCRB 0x001c | ||
219 | #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) | ||
220 | #define RCAR_DMACHCRB_DPTR(n) ((n) << 16) | ||
221 | #define RCAR_DMACHCRB_DRST (1 << 15) | ||
222 | #define RCAR_DMACHCRB_DTS (1 << 8) | ||
223 | #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) | ||
224 | #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) | ||
225 | #define RCAR_DMACHCRB_PRI(n) ((n) << 0) | ||
226 | #define RCAR_DMARS 0x0040 | ||
227 | #define RCAR_DMABUFCR 0x0048 | ||
228 | #define RCAR_DMABUFCR_MBU(n) ((n) << 16) | ||
229 | #define RCAR_DMABUFCR_ULB(n) ((n) << 0) | ||
230 | #define RCAR_DMADPBASE 0x0050 | ||
231 | #define RCAR_DMADPBASE_MASK 0xfffffff0 | ||
232 | #define RCAR_DMADPBASE_SEL (1 << 0) | ||
233 | #define RCAR_DMADPCR 0x0054 | ||
234 | #define RCAR_DMADPCR_DIPT(n) ((n) << 24) | ||
235 | #define RCAR_DMAFIXSAR 0x0010 | ||
236 | #define RCAR_DMAFIXDAR 0x0014 | ||
237 | #define RCAR_DMAFIXDPBASE 0x0060 | ||
238 | |||
239 | /* Hardcode the MEMCPY transfer size to 4 bytes. */ | ||
240 | #define RCAR_DMAC_MEMCPY_XFER_SIZE 4 | ||
241 | |||
242 | /* ----------------------------------------------------------------------------- | ||
243 | * Device access | ||
244 | */ | ||
245 | |||
246 | static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) | ||
247 | { | ||
248 | if (reg == RCAR_DMAOR) | ||
249 | writew(data, dmac->iomem + reg); | ||
250 | else | ||
251 | writel(data, dmac->iomem + reg); | ||
252 | } | ||
253 | |||
254 | static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) | ||
255 | { | ||
256 | if (reg == RCAR_DMAOR) | ||
257 | return readw(dmac->iomem + reg); | ||
258 | else | ||
259 | return readl(dmac->iomem + reg); | ||
260 | } | ||
261 | |||
262 | static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) | ||
263 | { | ||
264 | if (reg == RCAR_DMARS) | ||
265 | return readw(chan->iomem + reg); | ||
266 | else | ||
267 | return readl(chan->iomem + reg); | ||
268 | } | ||
269 | |||
270 | static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) | ||
271 | { | ||
272 | if (reg == RCAR_DMARS) | ||
273 | writew(data, chan->iomem + reg); | ||
274 | else | ||
275 | writel(data, chan->iomem + reg); | ||
276 | } | ||
277 | |||
278 | /* ----------------------------------------------------------------------------- | ||
279 | * Initialization and configuration | ||
280 | */ | ||
281 | |||
282 | static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) | ||
283 | { | ||
284 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | ||
285 | |||
286 | return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE; | ||
287 | } | ||
288 | |||
289 | static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) | ||
290 | { | ||
291 | struct rcar_dmac_desc *desc = chan->desc.running; | ||
292 | struct rcar_dmac_xfer_chunk *chunk = desc->running; | ||
293 | |||
294 | dev_dbg(chan->chan.device->dev, | ||
295 | "chan%u: queue chunk %p: %u@%pad -> %pad\n", | ||
296 | chan->index, chunk, chunk->size, &chunk->src_addr, | ||
297 | &chunk->dst_addr); | ||
298 | |||
299 | WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); | ||
300 | |||
301 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
302 | rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, chunk->src_addr >> 32); | ||
303 | rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, chunk->dst_addr >> 32); | ||
304 | #endif | ||
305 | rcar_dmac_chan_write(chan, RCAR_DMASAR, chunk->src_addr & 0xffffffff); | ||
306 | rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); | ||
307 | |||
308 | if (chan->mid_rid >= 0) | ||
309 | rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); | ||
310 | |||
311 | rcar_dmac_chan_write(chan, RCAR_DMATCR, | ||
312 | chunk->size >> desc->xfer_shift); | ||
313 | |||
314 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, desc->chcr | RCAR_DMACHCR_DE | | ||
315 | RCAR_DMACHCR_IE); | ||
316 | } | ||
317 | |||
318 | static int rcar_dmac_init(struct rcar_dmac *dmac) | ||
319 | { | ||
320 | u16 dmaor; | ||
321 | |||
322 | /* Clear all channels and enable the DMAC globally. */ | ||
323 | rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff); | ||
324 | rcar_dmac_write(dmac, RCAR_DMAOR, | ||
325 | RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); | ||
326 | |||
327 | dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); | ||
328 | if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { | ||
329 | dev_warn(dmac->dev, "DMAOR initialization failed.\n"); | ||
330 | return -EIO; | ||
331 | } | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /* ----------------------------------------------------------------------------- | ||
337 | * Descriptors submission | ||
338 | */ | ||
339 | |||
340 | static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) | ||
341 | { | ||
342 | struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); | ||
343 | struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); | ||
344 | unsigned long flags; | ||
345 | dma_cookie_t cookie; | ||
346 | |||
347 | spin_lock_irqsave(&chan->lock, flags); | ||
348 | |||
349 | cookie = dma_cookie_assign(tx); | ||
350 | |||
351 | dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", | ||
352 | chan->index, tx->cookie, desc); | ||
353 | |||
354 | list_add_tail(&desc->node, &chan->desc.pending); | ||
355 | desc->running = list_first_entry(&desc->chunks, | ||
356 | struct rcar_dmac_xfer_chunk, node); | ||
357 | |||
358 | spin_unlock_irqrestore(&chan->lock, flags); | ||
359 | |||
360 | return cookie; | ||
361 | } | ||
362 | |||
363 | /* ----------------------------------------------------------------------------- | ||
364 | * Descriptors allocation and free | ||
365 | */ | ||
366 | |||
367 | /* | ||
368 | * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors | ||
369 | * @chan: the DMA channel | ||
370 | * @gfp: allocation flags | ||
371 | */ | ||
372 | static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) | ||
373 | { | ||
374 | struct rcar_dmac_desc_page *page; | ||
375 | LIST_HEAD(list); | ||
376 | unsigned int i; | ||
377 | |||
378 | page = (void *)get_zeroed_page(gfp); | ||
379 | if (!page) | ||
380 | return -ENOMEM; | ||
381 | |||
382 | for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { | ||
383 | struct rcar_dmac_desc *desc = &page->descs[i]; | ||
384 | |||
385 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); | ||
386 | desc->async_tx.tx_submit = rcar_dmac_tx_submit; | ||
387 | INIT_LIST_HEAD(&desc->chunks); | ||
388 | |||
389 | list_add_tail(&desc->node, &list); | ||
390 | } | ||
391 | |||
392 | spin_lock_irq(&chan->lock); | ||
393 | list_splice_tail(&list, &chan->desc.free); | ||
394 | list_add_tail(&page->node, &chan->desc.pages); | ||
395 | spin_unlock_irq(&chan->lock); | ||
396 | |||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * rcar_dmac_desc_put - Release a DMA transfer descriptor | ||
402 | * @chan: the DMA channel | ||
403 | * @desc: the descriptor | ||
404 | * | ||
405 | * Put the descriptor and its transfer chunk descriptors back in the channel's | ||
406 | * free descriptors lists. The descriptor's chunk will be reinitialized to an | ||
407 | * empty list as a result. | ||
408 | * | ||
409 | * The descriptor must have been removed from the channel's done list before | ||
410 | * calling this function. | ||
411 | * | ||
412 | * Locking: Must be called with the channel lock held. | ||
413 | */ | ||
414 | static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, | ||
415 | struct rcar_dmac_desc *desc) | ||
416 | { | ||
417 | list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); | ||
418 | list_add_tail(&desc->node, &chan->desc.free); | ||
419 | } | ||
420 | |||
421 | static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) | ||
422 | { | ||
423 | struct rcar_dmac_desc *desc, *_desc; | ||
424 | |||
425 | list_for_each_entry_safe(desc, _desc, &chan->desc.wait, node) { | ||
426 | if (async_tx_test_ack(&desc->async_tx)) { | ||
427 | list_del(&desc->node); | ||
428 | rcar_dmac_desc_put(chan, desc); | ||
429 | } | ||
430 | } | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer | ||
435 | * @chan: the DMA channel | ||
436 | * | ||
437 | * Locking: This function must be called in a non-atomic context. | ||
438 | * | ||
439 | * Return: A pointer to the allocated descriptor or NULL if no descriptor can | ||
440 | * be allocated. | ||
441 | */ | ||
442 | static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) | ||
443 | { | ||
444 | struct rcar_dmac_desc *desc; | ||
445 | int ret; | ||
446 | |||
447 | spin_lock_irq(&chan->lock); | ||
448 | |||
449 | /* Recycle acked descriptors before attempting allocation. */ | ||
450 | rcar_dmac_desc_recycle_acked(chan); | ||
451 | |||
452 | do { | ||
453 | if (list_empty(&chan->desc.free)) { | ||
454 | /* | ||
455 | * No free descriptors, allocate a page worth of them | ||
456 | * and try again, as someone else could race us to get | ||
457 | * the newly allocated descriptors. If the allocation | ||
458 | * fails return an error. | ||
459 | */ | ||
460 | spin_unlock_irq(&chan->lock); | ||
461 | ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); | ||
462 | if (ret < 0) | ||
463 | return NULL; | ||
464 | spin_lock_irq(&chan->lock); | ||
465 | continue; | ||
466 | } | ||
467 | |||
468 | desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, | ||
469 | node); | ||
470 | list_del(&desc->node); | ||
471 | } while (!desc); | ||
472 | |||
473 | spin_unlock_irq(&chan->lock); | ||
474 | |||
475 | return desc; | ||
476 | } | ||
477 | |||
478 | /* | ||
479 | * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks | ||
480 | * @chan: the DMA channel | ||
481 | * @gfp: allocation flags | ||
482 | */ | ||
483 | static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) | ||
484 | { | ||
485 | struct rcar_dmac_desc_page *page; | ||
486 | LIST_HEAD(list); | ||
487 | unsigned int i; | ||
488 | |||
489 | page = (void *)get_zeroed_page(gfp); | ||
490 | if (!page) | ||
491 | return -ENOMEM; | ||
492 | |||
493 | for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { | ||
494 | struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; | ||
495 | |||
496 | list_add_tail(&chunk->node, &list); | ||
497 | } | ||
498 | |||
499 | spin_lock_irq(&chan->lock); | ||
500 | list_splice_tail(&list, &chan->desc.chunks_free); | ||
501 | list_add_tail(&page->node, &chan->desc.pages); | ||
502 | spin_unlock_irq(&chan->lock); | ||
503 | |||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | /* | ||
508 | * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer | ||
509 | * @chan: the DMA channel | ||
510 | * | ||
511 | * Locking: This function must be called in a non-atomic context. | ||
512 | * | ||
513 | * Return: A pointer to the allocated transfer chunk descriptor or NULL if no | ||
514 | * descriptor can be allocated. | ||
515 | */ | ||
516 | static struct rcar_dmac_xfer_chunk * | ||
517 | rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) | ||
518 | { | ||
519 | struct rcar_dmac_xfer_chunk *chunk; | ||
520 | int ret; | ||
521 | |||
522 | spin_lock_irq(&chan->lock); | ||
523 | |||
524 | do { | ||
525 | if (list_empty(&chan->desc.chunks_free)) { | ||
526 | /* | ||
527 | * No free descriptors, allocate a page worth of them | ||
528 | * and try again, as someone else could race us to get | ||
529 | * the newly allocated descriptors. If the allocation | ||
530 | * fails return an error. | ||
531 | */ | ||
532 | spin_unlock_irq(&chan->lock); | ||
533 | ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); | ||
534 | if (ret < 0) | ||
535 | return NULL; | ||
536 | spin_lock_irq(&chan->lock); | ||
537 | continue; | ||
538 | } | ||
539 | |||
540 | chunk = list_first_entry(&chan->desc.chunks_free, | ||
541 | struct rcar_dmac_xfer_chunk, node); | ||
542 | list_del(&chunk->node); | ||
543 | } while (!chunk); | ||
544 | |||
545 | spin_unlock_irq(&chan->lock); | ||
546 | |||
547 | return chunk; | ||
548 | } | ||
549 | |||
550 | /* ----------------------------------------------------------------------------- | ||
551 | * Stop and reset | ||
552 | */ | ||
553 | |||
554 | static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) | ||
555 | { | ||
556 | u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | ||
557 | |||
558 | chcr &= ~(RCAR_DMACHCR_IE | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); | ||
559 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); | ||
560 | } | ||
561 | |||
562 | static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) | ||
563 | { | ||
564 | struct rcar_dmac_desc *desc, *_desc; | ||
565 | unsigned long flags; | ||
566 | LIST_HEAD(descs); | ||
567 | |||
568 | spin_lock_irqsave(&chan->lock, flags); | ||
569 | |||
570 | /* Move all non-free descriptors to the local lists. */ | ||
571 | list_splice_init(&chan->desc.pending, &descs); | ||
572 | list_splice_init(&chan->desc.active, &descs); | ||
573 | list_splice_init(&chan->desc.done, &descs); | ||
574 | list_splice_init(&chan->desc.wait, &descs); | ||
575 | |||
576 | chan->desc.running = NULL; | ||
577 | |||
578 | spin_unlock_irqrestore(&chan->lock, flags); | ||
579 | |||
580 | list_for_each_entry_safe(desc, _desc, &descs, node) { | ||
581 | list_del(&desc->node); | ||
582 | rcar_dmac_desc_put(chan, desc); | ||
583 | } | ||
584 | } | ||
585 | |||
586 | static void rcar_dmac_stop(struct rcar_dmac *dmac) | ||
587 | { | ||
588 | rcar_dmac_write(dmac, RCAR_DMAOR, 0); | ||
589 | } | ||
590 | |||
591 | static void rcar_dmac_abort(struct rcar_dmac *dmac) | ||
592 | { | ||
593 | unsigned int i; | ||
594 | |||
595 | /* Stop all channels. */ | ||
596 | for (i = 0; i < dmac->n_channels; ++i) { | ||
597 | struct rcar_dmac_chan *chan = &dmac->channels[i]; | ||
598 | |||
599 | /* Stop and reinitialize the channel. */ | ||
600 | spin_lock(&chan->lock); | ||
601 | rcar_dmac_chan_halt(chan); | ||
602 | spin_unlock(&chan->lock); | ||
603 | |||
604 | rcar_dmac_chan_reinit(chan); | ||
605 | } | ||
606 | } | ||
607 | |||
608 | /* ----------------------------------------------------------------------------- | ||
609 | * Descriptors preparation | ||
610 | */ | ||
611 | |||
612 | static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, | ||
613 | struct rcar_dmac_desc *desc) | ||
614 | { | ||
615 | static const u32 chcr_ts[] = { | ||
616 | RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, | ||
617 | RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, | ||
618 | RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, | ||
619 | RCAR_DMACHCR_TS_64B, | ||
620 | }; | ||
621 | |||
622 | unsigned int xfer_size; | ||
623 | u32 chcr; | ||
624 | |||
625 | switch (desc->direction) { | ||
626 | case DMA_DEV_TO_MEM: | ||
627 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED | ||
628 | | RCAR_DMACHCR_RS_DMARS; | ||
629 | xfer_size = chan->src_xfer_size; | ||
630 | break; | ||
631 | |||
632 | case DMA_MEM_TO_DEV: | ||
633 | chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC | ||
634 | | RCAR_DMACHCR_RS_DMARS; | ||
635 | xfer_size = chan->dst_xfer_size; | ||
636 | break; | ||
637 | |||
638 | case DMA_MEM_TO_MEM: | ||
639 | default: | ||
640 | chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC | ||
641 | | RCAR_DMACHCR_RS_AUTO; | ||
642 | xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; | ||
643 | break; | ||
644 | } | ||
645 | |||
646 | desc->xfer_shift = ilog2(xfer_size); | ||
647 | desc->chcr = chcr | chcr_ts[desc->xfer_shift]; | ||
648 | } | ||
649 | |||
650 | /* | ||
651 | * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list | ||
652 | * | ||
653 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | ||
654 | * converted to scatter-gather to guarantee consistent locking and a correct | ||
655 | * list manipulation. For slave DMA direction carries the usual meaning, and, | ||
656 | * logically, the SG list is RAM and the addr variable contains slave address, | ||
657 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM | ||
658 | * and the SG list contains only one element and points at the source buffer. | ||
659 | */ | ||
660 | static struct dma_async_tx_descriptor * | ||
661 | rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, | ||
662 | unsigned int sg_len, dma_addr_t dev_addr, | ||
663 | enum dma_transfer_direction dir, unsigned long dma_flags, | ||
664 | bool cyclic) | ||
665 | { | ||
666 | struct rcar_dmac_xfer_chunk *chunk; | ||
667 | struct rcar_dmac_desc *desc; | ||
668 | struct scatterlist *sg; | ||
669 | unsigned int max_chunk_size; | ||
670 | unsigned int full_size = 0; | ||
671 | unsigned int i; | ||
672 | |||
673 | desc = rcar_dmac_desc_get(chan); | ||
674 | if (!desc) | ||
675 | return NULL; | ||
676 | |||
677 | desc->async_tx.flags = dma_flags; | ||
678 | desc->async_tx.cookie = -EBUSY; | ||
679 | |||
680 | desc->cyclic = cyclic; | ||
681 | desc->direction = dir; | ||
682 | |||
683 | rcar_dmac_chan_configure_desc(chan, desc); | ||
684 | |||
685 | max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; | ||
686 | |||
687 | /* | ||
688 | * Allocate and fill the transfer chunk descriptors. We own the only | ||
689 | * reference to the DMA descriptor, there's no need for locking. | ||
690 | */ | ||
691 | for_each_sg(sgl, sg, sg_len, i) { | ||
692 | dma_addr_t mem_addr = sg_dma_address(sg); | ||
693 | unsigned int len = sg_dma_len(sg); | ||
694 | |||
695 | full_size += len; | ||
696 | |||
697 | while (len) { | ||
698 | unsigned int size = min(len, max_chunk_size); | ||
699 | |||
700 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | ||
701 | /* | ||
702 | * Prevent individual transfers from crossing 4GB | ||
703 | * boundaries. | ||
704 | */ | ||
705 | if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) | ||
706 | size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; | ||
707 | if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) | ||
708 | size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; | ||
709 | #endif | ||
710 | |||
711 | chunk = rcar_dmac_xfer_chunk_get(chan); | ||
712 | if (!chunk) { | ||
713 | rcar_dmac_desc_put(chan, desc); | ||
714 | return NULL; | ||
715 | } | ||
716 | |||
717 | if (dir == DMA_DEV_TO_MEM) { | ||
718 | chunk->src_addr = dev_addr; | ||
719 | chunk->dst_addr = mem_addr; | ||
720 | } else { | ||
721 | chunk->src_addr = mem_addr; | ||
722 | chunk->dst_addr = dev_addr; | ||
723 | } | ||
724 | |||
725 | chunk->size = size; | ||
726 | |||
727 | dev_dbg(chan->chan.device->dev, | ||
728 | "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", | ||
729 | chan->index, chunk, desc, i, sg, size, len, | ||
730 | &chunk->src_addr, &chunk->dst_addr); | ||
731 | |||
732 | mem_addr += size; | ||
733 | if (dir == DMA_MEM_TO_MEM) | ||
734 | dev_addr += size; | ||
735 | |||
736 | len -= size; | ||
737 | |||
738 | list_add_tail(&chunk->node, &desc->chunks); | ||
739 | } | ||
740 | } | ||
741 | |||
742 | desc->size = full_size; | ||
743 | |||
744 | return &desc->async_tx; | ||
745 | } | ||
746 | |||
747 | /* ----------------------------------------------------------------------------- | ||
748 | * DMA engine operations | ||
749 | */ | ||
750 | |||
751 | static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) | ||
752 | { | ||
753 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
754 | int ret; | ||
755 | |||
756 | INIT_LIST_HEAD(&rchan->desc.free); | ||
757 | INIT_LIST_HEAD(&rchan->desc.pending); | ||
758 | INIT_LIST_HEAD(&rchan->desc.active); | ||
759 | INIT_LIST_HEAD(&rchan->desc.done); | ||
760 | INIT_LIST_HEAD(&rchan->desc.wait); | ||
761 | INIT_LIST_HEAD(&rchan->desc.chunks_free); | ||
762 | INIT_LIST_HEAD(&rchan->desc.pages); | ||
763 | |||
764 | /* Preallocate descriptors. */ | ||
765 | ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); | ||
766 | if (ret < 0) | ||
767 | return -ENOMEM; | ||
768 | |||
769 | ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); | ||
770 | if (ret < 0) | ||
771 | return -ENOMEM; | ||
772 | |||
773 | return pm_runtime_get_sync(chan->device->dev); | ||
774 | } | ||
775 | |||
776 | static void rcar_dmac_free_chan_resources(struct dma_chan *chan) | ||
777 | { | ||
778 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
779 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); | ||
780 | struct rcar_dmac_desc_page *page, *_page; | ||
781 | |||
782 | /* Protect against ISR */ | ||
783 | spin_lock_irq(&rchan->lock); | ||
784 | rcar_dmac_chan_halt(rchan); | ||
785 | spin_unlock_irq(&rchan->lock); | ||
786 | |||
787 | /* Now no new interrupts will occur */ | ||
788 | |||
789 | if (rchan->mid_rid >= 0) { | ||
790 | /* The caller is holding dma_list_mutex */ | ||
791 | clear_bit(rchan->mid_rid, dmac->modules); | ||
792 | rchan->mid_rid = -EINVAL; | ||
793 | } | ||
794 | |||
795 | list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { | ||
796 | list_del(&page->node); | ||
797 | free_page((unsigned long)page); | ||
798 | } | ||
799 | |||
800 | pm_runtime_put(chan->device->dev); | ||
801 | } | ||
802 | |||
803 | static struct dma_async_tx_descriptor * | ||
804 | rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | ||
805 | dma_addr_t dma_src, size_t len, unsigned long flags) | ||
806 | { | ||
807 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
808 | struct scatterlist sgl; | ||
809 | |||
810 | if (!len) | ||
811 | return NULL; | ||
812 | |||
813 | sg_init_table(&sgl, 1); | ||
814 | sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, | ||
815 | offset_in_page(dma_src)); | ||
816 | sg_dma_address(&sgl) = dma_src; | ||
817 | sg_dma_len(&sgl) = len; | ||
818 | |||
819 | return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, | ||
820 | DMA_MEM_TO_MEM, flags, false); | ||
821 | } | ||
822 | |||
823 | static struct dma_async_tx_descriptor * | ||
824 | rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
825 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
826 | unsigned long flags, void *context) | ||
827 | { | ||
828 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
829 | dma_addr_t dev_addr; | ||
830 | |||
831 | /* Someone calling slave DMA on a generic channel? */ | ||
832 | if (rchan->mid_rid < 0 || !sg_len) { | ||
833 | dev_warn(chan->device->dev, | ||
834 | "%s: bad parameter: len=%d, id=%d\n", | ||
835 | __func__, sg_len, rchan->mid_rid); | ||
836 | return NULL; | ||
837 | } | ||
838 | |||
839 | dev_addr = dir == DMA_DEV_TO_MEM | ||
840 | ? rchan->src_slave_addr : rchan->dst_slave_addr; | ||
841 | return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, | ||
842 | dir, flags, false); | ||
843 | } | ||
844 | |||
845 | #define RCAR_DMAC_MAX_SG_LEN 32 | ||
846 | |||
847 | static struct dma_async_tx_descriptor * | ||
848 | rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, | ||
849 | size_t buf_len, size_t period_len, | ||
850 | enum dma_transfer_direction dir, unsigned long flags) | ||
851 | { | ||
852 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
853 | struct dma_async_tx_descriptor *desc; | ||
854 | struct scatterlist *sgl; | ||
855 | dma_addr_t dev_addr; | ||
856 | unsigned int sg_len; | ||
857 | unsigned int i; | ||
858 | |||
859 | /* Someone calling slave DMA on a generic channel? */ | ||
860 | if (rchan->mid_rid < 0 || buf_len < period_len) { | ||
861 | dev_warn(chan->device->dev, | ||
862 | "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", | ||
863 | __func__, buf_len, period_len, rchan->mid_rid); | ||
864 | return NULL; | ||
865 | } | ||
866 | |||
867 | sg_len = buf_len / period_len; | ||
868 | if (sg_len > RCAR_DMAC_MAX_SG_LEN) { | ||
869 | dev_err(chan->device->dev, | ||
870 | "chan%u: sg length %d exceds limit %d", | ||
871 | rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); | ||
872 | return NULL; | ||
873 | } | ||
874 | |||
875 | /* | ||
876 | * Allocate the sg list dynamically as it would consume too much stack | ||
877 | * space. | ||
878 | */ | ||
879 | sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT); | ||
880 | if (!sgl) | ||
881 | return NULL; | ||
882 | |||
883 | sg_init_table(sgl, sg_len); | ||
884 | |||
885 | for (i = 0; i < sg_len; ++i) { | ||
886 | dma_addr_t src = buf_addr + (period_len * i); | ||
887 | |||
888 | sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, | ||
889 | offset_in_page(src)); | ||
890 | sg_dma_address(&sgl[i]) = src; | ||
891 | sg_dma_len(&sgl[i]) = period_len; | ||
892 | } | ||
893 | |||
894 | dev_addr = dir == DMA_DEV_TO_MEM | ||
895 | ? rchan->src_slave_addr : rchan->dst_slave_addr; | ||
896 | desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, | ||
897 | dir, flags, true); | ||
898 | |||
899 | kfree(sgl); | ||
900 | return desc; | ||
901 | } | ||
902 | |||
903 | static int rcar_dmac_device_config(struct dma_chan *chan, | ||
904 | struct dma_slave_config *cfg) | ||
905 | { | ||
906 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
907 | |||
908 | /* | ||
909 | * We could lock this, but you shouldn't be configuring the | ||
910 | * channel, while using it... | ||
911 | */ | ||
912 | rchan->src_slave_addr = cfg->src_addr; | ||
913 | rchan->dst_slave_addr = cfg->dst_addr; | ||
914 | rchan->src_xfer_size = cfg->src_addr_width; | ||
915 | rchan->dst_xfer_size = cfg->dst_addr_width; | ||
916 | |||
917 | return 0; | ||
918 | } | ||
919 | |||
920 | static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) | ||
921 | { | ||
922 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
923 | unsigned long flags; | ||
924 | |||
925 | spin_lock_irqsave(&rchan->lock, flags); | ||
926 | rcar_dmac_chan_halt(rchan); | ||
927 | spin_unlock_irqrestore(&rchan->lock, flags); | ||
928 | |||
929 | /* | ||
930 | * FIXME: No new interrupt can occur now, but the IRQ thread might still | ||
931 | * be running. | ||
932 | */ | ||
933 | |||
934 | rcar_dmac_chan_reinit(rchan); | ||
935 | |||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | ||
940 | dma_cookie_t cookie) | ||
941 | { | ||
942 | struct rcar_dmac_desc *desc = chan->desc.running; | ||
943 | struct rcar_dmac_xfer_chunk *chunk; | ||
944 | unsigned int residue = 0; | ||
945 | |||
946 | if (!desc) | ||
947 | return 0; | ||
948 | |||
949 | /* | ||
950 | * If the cookie doesn't correspond to the currently running transfer | ||
951 | * then the descriptor hasn't been processed yet, and the residue is | ||
952 | * equal to the full descriptor size. | ||
953 | */ | ||
954 | if (cookie != desc->async_tx.cookie) | ||
955 | return desc->size; | ||
956 | |||
957 | /* Compute the size of all chunks still to be transferred. */ | ||
958 | list_for_each_entry_reverse(chunk, &desc->chunks, node) { | ||
959 | if (chunk == desc->running) | ||
960 | break; | ||
961 | |||
962 | residue += chunk->size; | ||
963 | } | ||
964 | |||
965 | /* Add the residue for the current chunk. */ | ||
966 | residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; | ||
967 | |||
968 | return residue; | ||
969 | } | ||
970 | |||
971 | static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, | ||
972 | dma_cookie_t cookie, | ||
973 | struct dma_tx_state *txstate) | ||
974 | { | ||
975 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
976 | enum dma_status status; | ||
977 | unsigned long flags; | ||
978 | unsigned int residue; | ||
979 | |||
980 | status = dma_cookie_status(chan, cookie, txstate); | ||
981 | if (status == DMA_COMPLETE || !txstate) | ||
982 | return status; | ||
983 | |||
984 | spin_lock_irqsave(&rchan->lock, flags); | ||
985 | residue = rcar_dmac_chan_get_residue(rchan, cookie); | ||
986 | spin_unlock_irqrestore(&rchan->lock, flags); | ||
987 | |||
988 | dma_set_residue(txstate, residue); | ||
989 | |||
990 | return status; | ||
991 | } | ||
992 | |||
993 | static void rcar_dmac_issue_pending(struct dma_chan *chan) | ||
994 | { | ||
995 | struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); | ||
996 | unsigned long flags; | ||
997 | |||
998 | spin_lock_irqsave(&rchan->lock, flags); | ||
999 | |||
1000 | if (list_empty(&rchan->desc.pending)) | ||
1001 | goto done; | ||
1002 | |||
1003 | /* Append the pending list to the active list. */ | ||
1004 | list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); | ||
1005 | |||
1006 | /* | ||
1007 | * If no transfer is running pick the first descriptor from the active | ||
1008 | * list and start the transfer. | ||
1009 | */ | ||
1010 | if (!rchan->desc.running) { | ||
1011 | struct rcar_dmac_desc *desc; | ||
1012 | |||
1013 | desc = list_first_entry(&rchan->desc.active, | ||
1014 | struct rcar_dmac_desc, node); | ||
1015 | rchan->desc.running = desc; | ||
1016 | |||
1017 | rcar_dmac_chan_start_xfer(rchan); | ||
1018 | } | ||
1019 | |||
1020 | done: | ||
1021 | spin_unlock_irqrestore(&rchan->lock, flags); | ||
1022 | } | ||
1023 | |||
1024 | /* ----------------------------------------------------------------------------- | ||
1025 | * IRQ handling | ||
1026 | */ | ||
1027 | |||
1028 | static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) | ||
1029 | { | ||
1030 | struct rcar_dmac_desc *desc = chan->desc.running; | ||
1031 | struct rcar_dmac_xfer_chunk *chunk; | ||
1032 | irqreturn_t ret = IRQ_WAKE_THREAD; | ||
1033 | |||
1034 | if (WARN_ON_ONCE(!desc)) { | ||
1035 | /* | ||
1036 | * This should never happen, there should always be | ||
1037 | * a running descriptor when a transfer ends. Warn and | ||
1038 | * return. | ||
1039 | */ | ||
1040 | return IRQ_NONE; | ||
1041 | } | ||
1042 | |||
1043 | /* | ||
1044 | * If we haven't completed the last transfer chunk simply move to the | ||
1045 | * next one. Only wake the IRQ thread if the transfer is cyclic. | ||
1046 | */ | ||
1047 | chunk = desc->running; | ||
1048 | if (!list_is_last(&chunk->node, &desc->chunks)) { | ||
1049 | desc->running = list_next_entry(chunk, node); | ||
1050 | if (!desc->cyclic) | ||
1051 | ret = IRQ_HANDLED; | ||
1052 | goto done; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * We've completed the last transfer chunk. If the transfer is cyclic, | ||
1057 | * move back to the first one. | ||
1058 | */ | ||
1059 | if (desc->cyclic) { | ||
1060 | desc->running = list_first_entry(&desc->chunks, | ||
1061 | struct rcar_dmac_xfer_chunk, | ||
1062 | node); | ||
1063 | goto done; | ||
1064 | } | ||
1065 | |||
1066 | /* The descriptor is complete, move it to the done list. */ | ||
1067 | list_move_tail(&desc->node, &chan->desc.done); | ||
1068 | |||
1069 | /* Queue the next descriptor, if any. */ | ||
1070 | if (!list_empty(&chan->desc.active)) | ||
1071 | chan->desc.running = list_first_entry(&chan->desc.active, | ||
1072 | struct rcar_dmac_desc, | ||
1073 | node); | ||
1074 | else | ||
1075 | chan->desc.running = NULL; | ||
1076 | |||
1077 | done: | ||
1078 | if (chan->desc.running) | ||
1079 | rcar_dmac_chan_start_xfer(chan); | ||
1080 | |||
1081 | return ret; | ||
1082 | } | ||
1083 | |||
1084 | static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) | ||
1085 | { | ||
1086 | struct rcar_dmac_chan *chan = dev; | ||
1087 | irqreturn_t ret = IRQ_NONE; | ||
1088 | u32 chcr; | ||
1089 | |||
1090 | spin_lock(&chan->lock); | ||
1091 | |||
1092 | chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); | ||
1093 | rcar_dmac_chan_write(chan, RCAR_DMACHCR, | ||
1094 | chcr & ~(RCAR_DMACHCR_TE | RCAR_DMACHCR_DE)); | ||
1095 | |||
1096 | if (chcr & RCAR_DMACHCR_TE) | ||
1097 | ret |= rcar_dmac_isr_transfer_end(chan); | ||
1098 | |||
1099 | spin_unlock(&chan->lock); | ||
1100 | |||
1101 | return ret; | ||
1102 | } | ||
1103 | |||
1104 | static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) | ||
1105 | { | ||
1106 | struct rcar_dmac_chan *chan = dev; | ||
1107 | struct rcar_dmac_desc *desc; | ||
1108 | |||
1109 | spin_lock_irq(&chan->lock); | ||
1110 | |||
1111 | /* For cyclic transfers notify the user after every chunk. */ | ||
1112 | if (chan->desc.running && chan->desc.running->cyclic) { | ||
1113 | dma_async_tx_callback callback; | ||
1114 | void *callback_param; | ||
1115 | |||
1116 | desc = chan->desc.running; | ||
1117 | callback = desc->async_tx.callback; | ||
1118 | callback_param = desc->async_tx.callback_param; | ||
1119 | |||
1120 | if (callback) { | ||
1121 | spin_unlock_irq(&chan->lock); | ||
1122 | callback(callback_param); | ||
1123 | spin_lock_irq(&chan->lock); | ||
1124 | } | ||
1125 | } | ||
1126 | |||
1127 | /* | ||
1128 | * Call the callback function for all descriptors on the done list and | ||
1129 | * move them to the ack wait list. | ||
1130 | */ | ||
1131 | while (!list_empty(&chan->desc.done)) { | ||
1132 | desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, | ||
1133 | node); | ||
1134 | dma_cookie_complete(&desc->async_tx); | ||
1135 | list_del(&desc->node); | ||
1136 | |||
1137 | if (desc->async_tx.callback) { | ||
1138 | spin_unlock_irq(&chan->lock); | ||
1139 | /* | ||
1140 | * We own the only reference to this descriptor, we can | ||
1141 | * safely dereference it without holding the channel | ||
1142 | * lock. | ||
1143 | */ | ||
1144 | desc->async_tx.callback(desc->async_tx.callback_param); | ||
1145 | spin_lock_irq(&chan->lock); | ||
1146 | } | ||
1147 | |||
1148 | list_add_tail(&desc->node, &chan->desc.wait); | ||
1149 | } | ||
1150 | |||
1151 | /* Recycle all acked descriptors. */ | ||
1152 | rcar_dmac_desc_recycle_acked(chan); | ||
1153 | |||
1154 | spin_unlock_irq(&chan->lock); | ||
1155 | |||
1156 | return IRQ_HANDLED; | ||
1157 | } | ||
1158 | |||
1159 | static irqreturn_t rcar_dmac_isr_error(int irq, void *data) | ||
1160 | { | ||
1161 | struct rcar_dmac *dmac = data; | ||
1162 | |||
1163 | if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE)) | ||
1164 | return IRQ_NONE; | ||
1165 | |||
1166 | /* | ||
1167 | * An unrecoverable error occurred on an unknown channel. Halt the DMAC, | ||
1168 | * abort transfers on all channels, and reinitialize the DMAC. | ||
1169 | */ | ||
1170 | rcar_dmac_stop(dmac); | ||
1171 | rcar_dmac_abort(dmac); | ||
1172 | rcar_dmac_init(dmac); | ||
1173 | |||
1174 | return IRQ_HANDLED; | ||
1175 | } | ||
1176 | |||
1177 | /* ----------------------------------------------------------------------------- | ||
1178 | * OF xlate and channel filter | ||
1179 | */ | ||
1180 | |||
1181 | static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) | ||
1182 | { | ||
1183 | struct rcar_dmac *dmac = to_rcar_dmac(chan->device); | ||
1184 | struct of_phandle_args *dma_spec = arg; | ||
1185 | |||
1186 | /* | ||
1187 | * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate | ||
1188 | * function knows from which device it wants to allocate a channel from, | ||
1189 | * and would be perfectly capable of selecting the channel it wants. | ||
1190 | * Forcing it to call dma_request_channel() and iterate through all | ||
1191 | * channels from all controllers is just pointless. | ||
1192 | */ | ||
1193 | if (chan->device->device_config != rcar_dmac_device_config || | ||
1194 | dma_spec->np != chan->device->dev->of_node) | ||
1195 | return false; | ||
1196 | |||
1197 | return !test_and_set_bit(dma_spec->args[0], dmac->modules); | ||
1198 | } | ||
1199 | |||
1200 | static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, | ||
1201 | struct of_dma *ofdma) | ||
1202 | { | ||
1203 | struct rcar_dmac_chan *rchan; | ||
1204 | struct dma_chan *chan; | ||
1205 | dma_cap_mask_t mask; | ||
1206 | |||
1207 | if (dma_spec->args_count != 1) | ||
1208 | return NULL; | ||
1209 | |||
1210 | /* Only slave DMA channels can be allocated via DT */ | ||
1211 | dma_cap_zero(mask); | ||
1212 | dma_cap_set(DMA_SLAVE, mask); | ||
1213 | |||
1214 | chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); | ||
1215 | if (!chan) | ||
1216 | return NULL; | ||
1217 | |||
1218 | rchan = to_rcar_dmac_chan(chan); | ||
1219 | rchan->mid_rid = dma_spec->args[0]; | ||
1220 | |||
1221 | return chan; | ||
1222 | } | ||
1223 | |||
1224 | /* ----------------------------------------------------------------------------- | ||
1225 | * Power management | ||
1226 | */ | ||
1227 | |||
1228 | #ifdef CONFIG_PM_SLEEP | ||
1229 | static int rcar_dmac_sleep_suspend(struct device *dev) | ||
1230 | { | ||
1231 | /* | ||
1232 | * TODO: Wait for the current transfer to complete and stop the device. | ||
1233 | */ | ||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static int rcar_dmac_sleep_resume(struct device *dev) | ||
1238 | { | ||
1239 | /* TODO: Resume transfers, if any. */ | ||
1240 | return 0; | ||
1241 | } | ||
1242 | #endif | ||
1243 | |||
1244 | #ifdef CONFIG_PM | ||
1245 | static int rcar_dmac_runtime_suspend(struct device *dev) | ||
1246 | { | ||
1247 | return 0; | ||
1248 | } | ||
1249 | |||
1250 | static int rcar_dmac_runtime_resume(struct device *dev) | ||
1251 | { | ||
1252 | struct rcar_dmac *dmac = dev_get_drvdata(dev); | ||
1253 | |||
1254 | return rcar_dmac_init(dmac); | ||
1255 | } | ||
1256 | #endif | ||
1257 | |||
1258 | static const struct dev_pm_ops rcar_dmac_pm = { | ||
1259 | SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume) | ||
1260 | SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, | ||
1261 | NULL) | ||
1262 | }; | ||
1263 | |||
1264 | /* ----------------------------------------------------------------------------- | ||
1265 | * Probe and remove | ||
1266 | */ | ||
1267 | |||
1268 | static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, | ||
1269 | struct rcar_dmac_chan *rchan, | ||
1270 | unsigned int index) | ||
1271 | { | ||
1272 | struct platform_device *pdev = to_platform_device(dmac->dev); | ||
1273 | struct dma_chan *chan = &rchan->chan; | ||
1274 | char pdev_irqname[5]; | ||
1275 | char *irqname; | ||
1276 | int irq; | ||
1277 | int ret; | ||
1278 | |||
1279 | rchan->index = index; | ||
1280 | rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); | ||
1281 | rchan->mid_rid = -EINVAL; | ||
1282 | |||
1283 | spin_lock_init(&rchan->lock); | ||
1284 | |||
1285 | /* Request the channel interrupt. */ | ||
1286 | sprintf(pdev_irqname, "ch%u", index); | ||
1287 | irq = platform_get_irq_byname(pdev, pdev_irqname); | ||
1288 | if (irq < 0) { | ||
1289 | dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); | ||
1290 | return -ENODEV; | ||
1291 | } | ||
1292 | |||
1293 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", | ||
1294 | dev_name(dmac->dev), index); | ||
1295 | if (!irqname) | ||
1296 | return -ENOMEM; | ||
1297 | |||
1298 | ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel, | ||
1299 | rcar_dmac_isr_channel_thread, 0, | ||
1300 | irqname, rchan); | ||
1301 | if (ret) { | ||
1302 | dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret); | ||
1303 | return ret; | ||
1304 | } | ||
1305 | |||
1306 | /* | ||
1307 | * Initialize the DMA engine channel and add it to the DMA engine | ||
1308 | * channels list. | ||
1309 | */ | ||
1310 | chan->device = &dmac->engine; | ||
1311 | dma_cookie_init(chan); | ||
1312 | |||
1313 | list_add_tail(&chan->device_node, &dmac->engine.channels); | ||
1314 | |||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) | ||
1319 | { | ||
1320 | struct device_node *np = dev->of_node; | ||
1321 | int ret; | ||
1322 | |||
1323 | ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); | ||
1324 | if (ret < 0) { | ||
1325 | dev_err(dev, "unable to read dma-channels property\n"); | ||
1326 | return ret; | ||
1327 | } | ||
1328 | |||
1329 | if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { | ||
1330 | dev_err(dev, "invalid number of channels %u\n", | ||
1331 | dmac->n_channels); | ||
1332 | return -EINVAL; | ||
1333 | } | ||
1334 | |||
1335 | return 0; | ||
1336 | } | ||
1337 | |||
1338 | static int rcar_dmac_probe(struct platform_device *pdev) | ||
1339 | { | ||
1340 | const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | | ||
1341 | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | | ||
1342 | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | | ||
1343 | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; | ||
1344 | struct dma_device *engine; | ||
1345 | struct rcar_dmac *dmac; | ||
1346 | struct resource *mem; | ||
1347 | unsigned int i; | ||
1348 | char *irqname; | ||
1349 | int irq; | ||
1350 | int ret; | ||
1351 | |||
1352 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); | ||
1353 | if (!dmac) | ||
1354 | return -ENOMEM; | ||
1355 | |||
1356 | dmac->dev = &pdev->dev; | ||
1357 | platform_set_drvdata(pdev, dmac); | ||
1358 | |||
1359 | ret = rcar_dmac_parse_of(&pdev->dev, dmac); | ||
1360 | if (ret < 0) | ||
1361 | return ret; | ||
1362 | |||
1363 | dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, | ||
1364 | sizeof(*dmac->channels), GFP_KERNEL); | ||
1365 | if (!dmac->channels) | ||
1366 | return -ENOMEM; | ||
1367 | |||
1368 | /* Request resources. */ | ||
1369 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1370 | dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); | ||
1371 | if (IS_ERR(dmac->iomem)) | ||
1372 | return PTR_ERR(dmac->iomem); | ||
1373 | |||
1374 | irq = platform_get_irq_byname(pdev, "error"); | ||
1375 | if (irq < 0) { | ||
1376 | dev_err(&pdev->dev, "no error IRQ specified\n"); | ||
1377 | return -ENODEV; | ||
1378 | } | ||
1379 | |||
1380 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error", | ||
1381 | dev_name(dmac->dev)); | ||
1382 | if (!irqname) | ||
1383 | return -ENOMEM; | ||
1384 | |||
1385 | ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, | ||
1386 | irqname, dmac); | ||
1387 | if (ret) { | ||
1388 | dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", | ||
1389 | irq, ret); | ||
1390 | return ret; | ||
1391 | } | ||
1392 | |||
1393 | /* Enable runtime PM and initialize the device. */ | ||
1394 | pm_runtime_enable(&pdev->dev); | ||
1395 | ret = pm_runtime_get_sync(&pdev->dev); | ||
1396 | if (ret < 0) { | ||
1397 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); | ||
1398 | return ret; | ||
1399 | } | ||
1400 | |||
1401 | ret = rcar_dmac_init(dmac); | ||
1402 | pm_runtime_put(&pdev->dev); | ||
1403 | |||
1404 | if (ret) { | ||
1405 | dev_err(&pdev->dev, "failed to reset device\n"); | ||
1406 | goto error; | ||
1407 | } | ||
1408 | |||
1409 | /* Initialize the channels. */ | ||
1410 | INIT_LIST_HEAD(&dmac->engine.channels); | ||
1411 | |||
1412 | for (i = 0; i < dmac->n_channels; ++i) { | ||
1413 | ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); | ||
1414 | if (ret < 0) | ||
1415 | goto error; | ||
1416 | } | ||
1417 | |||
1418 | /* Register the DMAC as a DMA provider for DT. */ | ||
1419 | ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, | ||
1420 | NULL); | ||
1421 | if (ret < 0) | ||
1422 | goto error; | ||
1423 | |||
1424 | /* | ||
1425 | * Register the DMA engine device. | ||
1426 | * | ||
1427 | * Default transfer size of 32 bytes requires 32-byte alignment. | ||
1428 | */ | ||
1429 | engine = &dmac->engine; | ||
1430 | dma_cap_set(DMA_MEMCPY, engine->cap_mask); | ||
1431 | dma_cap_set(DMA_SLAVE, engine->cap_mask); | ||
1432 | |||
1433 | engine->dev = &pdev->dev; | ||
1434 | engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); | ||
1435 | |||
1436 | engine->src_addr_widths = widths; | ||
1437 | engine->dst_addr_widths = widths; | ||
1438 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
1439 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
1440 | |||
1441 | engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; | ||
1442 | engine->device_free_chan_resources = rcar_dmac_free_chan_resources; | ||
1443 | engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; | ||
1444 | engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; | ||
1445 | engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; | ||
1446 | engine->device_config = rcar_dmac_device_config; | ||
1447 | engine->device_terminate_all = rcar_dmac_chan_terminate_all; | ||
1448 | engine->device_tx_status = rcar_dmac_tx_status; | ||
1449 | engine->device_issue_pending = rcar_dmac_issue_pending; | ||
1450 | |||
1451 | ret = dma_async_device_register(engine); | ||
1452 | if (ret < 0) | ||
1453 | goto error; | ||
1454 | |||
1455 | return 0; | ||
1456 | |||
1457 | error: | ||
1458 | of_dma_controller_free(pdev->dev.of_node); | ||
1459 | pm_runtime_disable(&pdev->dev); | ||
1460 | return ret; | ||
1461 | } | ||
1462 | |||
1463 | static int rcar_dmac_remove(struct platform_device *pdev) | ||
1464 | { | ||
1465 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); | ||
1466 | |||
1467 | of_dma_controller_free(pdev->dev.of_node); | ||
1468 | dma_async_device_unregister(&dmac->engine); | ||
1469 | |||
1470 | pm_runtime_disable(&pdev->dev); | ||
1471 | |||
1472 | return 0; | ||
1473 | } | ||
1474 | |||
1475 | static void rcar_dmac_shutdown(struct platform_device *pdev) | ||
1476 | { | ||
1477 | struct rcar_dmac *dmac = platform_get_drvdata(pdev); | ||
1478 | |||
1479 | rcar_dmac_stop(dmac); | ||
1480 | } | ||
1481 | |||
1482 | static const struct of_device_id rcar_dmac_of_ids[] = { | ||
1483 | { .compatible = "renesas,rcar-dmac", }, | ||
1484 | { /* Sentinel */ } | ||
1485 | }; | ||
1486 | MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); | ||
1487 | |||
1488 | static struct platform_driver rcar_dmac_driver = { | ||
1489 | .driver = { | ||
1490 | .pm = &rcar_dmac_pm, | ||
1491 | .name = "rcar-dmac", | ||
1492 | .of_match_table = rcar_dmac_of_ids, | ||
1493 | }, | ||
1494 | .probe = rcar_dmac_probe, | ||
1495 | .remove = rcar_dmac_remove, | ||
1496 | .shutdown = rcar_dmac_shutdown, | ||
1497 | }; | ||
1498 | |||
1499 | module_platform_driver(rcar_dmac_driver); | ||
1500 | |||
1501 | MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); | ||
1502 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | ||
1503 | MODULE_LICENSE("GPL v2"); | ||