diff options
author | Andrew Bresticker <abrestic@chromium.org> | 2014-12-11 17:59:17 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-02-04 21:13:32 -0500 |
commit | 5689ba7fd9f1118bc6b9e4020c116e0cfebc4654 (patch) | |
tree | 2b4e17da67a9b608b4ff8cbbecf31abd639cc436 | |
parent | 91d457dd50ea2ea35fe5b6e069169491ad45bffb (diff) |
dmaengine: Add driver for IMG MDC
Add support for the IMG Multi-threaded DMA Controller (MDC) found on
certain IMG SoCs. Currently this driver supports the variant present
on the MIPS-based Pistachio SoC.
Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/Kconfig | 9 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/img-mdc-dma.c | 1011 |
3 files changed, 1021 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f2b2c4e87aef..8990d483988b 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -416,6 +416,15 @@ config NBPFAXI_DMA | |||
416 | help | 416 | help |
417 | Support for "Type-AXI" NBPF DMA IPs from Renesas | 417 | Support for "Type-AXI" NBPF DMA IPs from Renesas |
418 | 418 | ||
419 | config IMG_MDC_DMA | ||
420 | tristate "IMG MDC support" | ||
421 | depends on MIPS || COMPILE_TEST | ||
422 | depends on MFD_SYSCON | ||
423 | select DMA_ENGINE | ||
424 | select DMA_VIRTUAL_CHANNELS | ||
425 | help | ||
426 | Enable support for the IMG multi-threaded DMA controller (MDC). | ||
427 | |||
419 | config DMA_ENGINE | 428 | config DMA_ENGINE |
420 | bool | 429 | bool |
421 | 430 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b290e6a611d0..f915f61ec574 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -50,3 +50,4 @@ obj-y += xilinx/ | |||
50 | obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o | 50 | obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o |
51 | obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o | 51 | obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o |
52 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o | 52 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o |
53 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o | ||
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c new file mode 100644 index 000000000000..ed045a9ad634 --- /dev/null +++ b/drivers/dma/img-mdc-dma.c | |||
@@ -0,0 +1,1011 @@ | |||
1 | /* | ||
2 | * IMG Multi-threaded DMA Controller (MDC) | ||
3 | * | ||
4 | * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd. | ||
5 | * Copyright (C) 2014 Google, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms and conditions of the GNU General Public License, | ||
9 | * version 2, as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/clk.h> | ||
13 | #include <linux/dma-mapping.h> | ||
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/dmapool.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mfd/syscon.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/of_device.h> | ||
24 | #include <linux/of_dma.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/regmap.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | |||
30 | #include "dmaengine.h" | ||
31 | #include "virt-dma.h" | ||
32 | |||
33 | #define MDC_MAX_DMA_CHANNELS 32 | ||
34 | |||
35 | #define MDC_GENERAL_CONFIG 0x000 | ||
36 | #define MDC_GENERAL_CONFIG_LIST_IEN BIT(31) | ||
37 | #define MDC_GENERAL_CONFIG_IEN BIT(29) | ||
38 | #define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28) | ||
39 | #define MDC_GENERAL_CONFIG_INC_W BIT(12) | ||
40 | #define MDC_GENERAL_CONFIG_INC_R BIT(8) | ||
41 | #define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7) | ||
42 | #define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4 | ||
43 | #define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7 | ||
44 | #define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3) | ||
45 | #define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0 | ||
46 | #define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7 | ||
47 | |||
48 | #define MDC_READ_PORT_CONFIG 0x004 | ||
49 | #define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28 | ||
50 | #define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf | ||
51 | #define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24 | ||
52 | #define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf | ||
53 | #define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16 | ||
54 | #define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf | ||
55 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4 | ||
56 | #define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff | ||
57 | #define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1) | ||
58 | |||
59 | #define MDC_READ_ADDRESS 0x008 | ||
60 | |||
61 | #define MDC_WRITE_ADDRESS 0x00c | ||
62 | |||
63 | #define MDC_TRANSFER_SIZE 0x010 | ||
64 | #define MDC_TRANSFER_SIZE_MASK 0xffffff | ||
65 | |||
66 | #define MDC_LIST_NODE_ADDRESS 0x014 | ||
67 | |||
68 | #define MDC_CMDS_PROCESSED 0x018 | ||
69 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16 | ||
70 | #define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f | ||
71 | #define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8) | ||
72 | #define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0 | ||
73 | #define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f | ||
74 | |||
75 | #define MDC_CONTROL_AND_STATUS 0x01c | ||
76 | #define MDC_CONTROL_AND_STATUS_CANCEL BIT(20) | ||
77 | #define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4) | ||
78 | #define MDC_CONTROL_AND_STATUS_EN BIT(0) | ||
79 | |||
80 | #define MDC_ACTIVE_TRANSFER_SIZE 0x030 | ||
81 | |||
82 | #define MDC_GLOBAL_CONFIG_A 0x900 | ||
83 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16 | ||
84 | #define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff | ||
85 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8 | ||
86 | #define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff | ||
87 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0 | ||
88 | #define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff | ||
89 | |||
90 | struct mdc_hw_list_desc { | ||
91 | u32 gen_conf; | ||
92 | u32 readport_conf; | ||
93 | u32 read_addr; | ||
94 | u32 write_addr; | ||
95 | u32 xfer_size; | ||
96 | u32 node_addr; | ||
97 | u32 cmds_done; | ||
98 | u32 ctrl_status; | ||
99 | /* | ||
100 | * Not part of the list descriptor, but instead used by the CPU to | ||
101 | * traverse the list. | ||
102 | */ | ||
103 | struct mdc_hw_list_desc *next_desc; | ||
104 | }; | ||
105 | |||
106 | struct mdc_tx_desc { | ||
107 | struct mdc_chan *chan; | ||
108 | struct virt_dma_desc vd; | ||
109 | dma_addr_t list_phys; | ||
110 | struct mdc_hw_list_desc *list; | ||
111 | bool cyclic; | ||
112 | bool cmd_loaded; | ||
113 | unsigned int list_len; | ||
114 | unsigned int list_period_len; | ||
115 | size_t list_xfer_size; | ||
116 | unsigned int list_cmds_done; | ||
117 | }; | ||
118 | |||
119 | struct mdc_chan { | ||
120 | struct mdc_dma *mdma; | ||
121 | struct virt_dma_chan vc; | ||
122 | struct dma_slave_config config; | ||
123 | struct mdc_tx_desc *desc; | ||
124 | int irq; | ||
125 | unsigned int periph; | ||
126 | unsigned int thread; | ||
127 | unsigned int chan_nr; | ||
128 | }; | ||
129 | |||
130 | struct mdc_dma_soc_data { | ||
131 | void (*enable_chan)(struct mdc_chan *mchan); | ||
132 | void (*disable_chan)(struct mdc_chan *mchan); | ||
133 | }; | ||
134 | |||
135 | struct mdc_dma { | ||
136 | struct dma_device dma_dev; | ||
137 | void __iomem *regs; | ||
138 | struct clk *clk; | ||
139 | struct dma_pool *desc_pool; | ||
140 | struct regmap *periph_regs; | ||
141 | spinlock_t lock; | ||
142 | unsigned int nr_threads; | ||
143 | unsigned int nr_channels; | ||
144 | unsigned int bus_width; | ||
145 | unsigned int max_burst_mult; | ||
146 | unsigned int max_xfer_size; | ||
147 | const struct mdc_dma_soc_data *soc; | ||
148 | struct mdc_chan channels[MDC_MAX_DMA_CHANNELS]; | ||
149 | }; | ||
150 | |||
151 | static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg) | ||
152 | { | ||
153 | return readl(mdma->regs + reg); | ||
154 | } | ||
155 | |||
156 | static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg) | ||
157 | { | ||
158 | writel(val, mdma->regs + reg); | ||
159 | } | ||
160 | |||
161 | static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg) | ||
162 | { | ||
163 | return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg); | ||
164 | } | ||
165 | |||
166 | static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg) | ||
167 | { | ||
168 | mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg); | ||
169 | } | ||
170 | |||
171 | static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c) | ||
172 | { | ||
173 | return container_of(to_virt_chan(c), struct mdc_chan, vc); | ||
174 | } | ||
175 | |||
176 | static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t) | ||
177 | { | ||
178 | struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx); | ||
179 | |||
180 | return container_of(vdesc, struct mdc_tx_desc, vd); | ||
181 | } | ||
182 | |||
183 | static inline struct device *mdma2dev(struct mdc_dma *mdma) | ||
184 | { | ||
185 | return mdma->dma_dev.dev; | ||
186 | } | ||
187 | |||
188 | static inline unsigned int to_mdc_width(unsigned int bytes) | ||
189 | { | ||
190 | return ffs(bytes) - 1; | ||
191 | } | ||
192 | |||
193 | static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc, | ||
194 | unsigned int bytes) | ||
195 | { | ||
196 | ldesc->gen_conf |= to_mdc_width(bytes) << | ||
197 | MDC_GENERAL_CONFIG_WIDTH_R_SHIFT; | ||
198 | } | ||
199 | |||
200 | static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc, | ||
201 | unsigned int bytes) | ||
202 | { | ||
203 | ldesc->gen_conf |= to_mdc_width(bytes) << | ||
204 | MDC_GENERAL_CONFIG_WIDTH_W_SHIFT; | ||
205 | } | ||
206 | |||
207 | static void mdc_list_desc_config(struct mdc_chan *mchan, | ||
208 | struct mdc_hw_list_desc *ldesc, | ||
209 | enum dma_transfer_direction dir, | ||
210 | dma_addr_t src, dma_addr_t dst, size_t len) | ||
211 | { | ||
212 | struct mdc_dma *mdma = mchan->mdma; | ||
213 | unsigned int max_burst, burst_size; | ||
214 | |||
215 | ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN | | ||
216 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | | ||
217 | MDC_GENERAL_CONFIG_PHYSICAL_R; | ||
218 | ldesc->readport_conf = | ||
219 | (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | | ||
220 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | | ||
221 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); | ||
222 | ldesc->read_addr = src; | ||
223 | ldesc->write_addr = dst; | ||
224 | ldesc->xfer_size = len - 1; | ||
225 | ldesc->node_addr = 0; | ||
226 | ldesc->cmds_done = 0; | ||
227 | ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN | | ||
228 | MDC_CONTROL_AND_STATUS_EN; | ||
229 | ldesc->next_desc = NULL; | ||
230 | |||
231 | if (IS_ALIGNED(dst, mdma->bus_width) && | ||
232 | IS_ALIGNED(src, mdma->bus_width)) | ||
233 | max_burst = mdma->bus_width * mdma->max_burst_mult; | ||
234 | else | ||
235 | max_burst = mdma->bus_width * (mdma->max_burst_mult - 1); | ||
236 | |||
237 | if (dir == DMA_MEM_TO_DEV) { | ||
238 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R; | ||
239 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; | ||
240 | mdc_set_read_width(ldesc, mdma->bus_width); | ||
241 | mdc_set_write_width(ldesc, mchan->config.dst_addr_width); | ||
242 | burst_size = min(max_burst, mchan->config.dst_maxburst * | ||
243 | mchan->config.dst_addr_width); | ||
244 | } else if (dir == DMA_DEV_TO_MEM) { | ||
245 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W; | ||
246 | ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; | ||
247 | mdc_set_read_width(ldesc, mchan->config.src_addr_width); | ||
248 | mdc_set_write_width(ldesc, mdma->bus_width); | ||
249 | burst_size = min(max_burst, mchan->config.src_maxburst * | ||
250 | mchan->config.src_addr_width); | ||
251 | } else { | ||
252 | ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R | | ||
253 | MDC_GENERAL_CONFIG_INC_W; | ||
254 | mdc_set_read_width(ldesc, mdma->bus_width); | ||
255 | mdc_set_write_width(ldesc, mdma->bus_width); | ||
256 | burst_size = max_burst; | ||
257 | } | ||
258 | ldesc->readport_conf |= (burst_size - 1) << | ||
259 | MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT; | ||
260 | } | ||
261 | |||
262 | static void mdc_list_desc_free(struct mdc_tx_desc *mdesc) | ||
263 | { | ||
264 | struct mdc_dma *mdma = mdesc->chan->mdma; | ||
265 | struct mdc_hw_list_desc *curr, *next; | ||
266 | dma_addr_t curr_phys, next_phys; | ||
267 | |||
268 | curr = mdesc->list; | ||
269 | curr_phys = mdesc->list_phys; | ||
270 | while (curr) { | ||
271 | next = curr->next_desc; | ||
272 | next_phys = curr->node_addr; | ||
273 | dma_pool_free(mdma->desc_pool, curr, curr_phys); | ||
274 | curr = next; | ||
275 | curr_phys = next_phys; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | static void mdc_desc_free(struct virt_dma_desc *vd) | ||
280 | { | ||
281 | struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx); | ||
282 | |||
283 | mdc_list_desc_free(mdesc); | ||
284 | kfree(mdesc); | ||
285 | } | ||
286 | |||
287 | static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( | ||
288 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, | ||
289 | unsigned long flags) | ||
290 | { | ||
291 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
292 | struct mdc_dma *mdma = mchan->mdma; | ||
293 | struct mdc_tx_desc *mdesc; | ||
294 | struct mdc_hw_list_desc *curr, *prev = NULL; | ||
295 | dma_addr_t curr_phys, prev_phys; | ||
296 | |||
297 | if (!len) | ||
298 | return NULL; | ||
299 | |||
300 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | ||
301 | if (!mdesc) | ||
302 | return NULL; | ||
303 | mdesc->chan = mchan; | ||
304 | mdesc->list_xfer_size = len; | ||
305 | |||
306 | while (len > 0) { | ||
307 | size_t xfer_size; | ||
308 | |||
309 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); | ||
310 | if (!curr) | ||
311 | goto free_desc; | ||
312 | |||
313 | if (prev) { | ||
314 | prev->node_addr = curr_phys; | ||
315 | prev->next_desc = curr; | ||
316 | } else { | ||
317 | mdesc->list_phys = curr_phys; | ||
318 | mdesc->list = curr; | ||
319 | } | ||
320 | |||
321 | xfer_size = min_t(size_t, mdma->max_xfer_size, len); | ||
322 | |||
323 | mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest, | ||
324 | xfer_size); | ||
325 | |||
326 | prev = curr; | ||
327 | prev_phys = curr_phys; | ||
328 | |||
329 | mdesc->list_len++; | ||
330 | src += xfer_size; | ||
331 | dest += xfer_size; | ||
332 | len -= xfer_size; | ||
333 | } | ||
334 | |||
335 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | ||
336 | |||
337 | free_desc: | ||
338 | mdc_desc_free(&mdesc->vd); | ||
339 | |||
340 | return NULL; | ||
341 | } | ||
342 | |||
343 | static int mdc_check_slave_width(struct mdc_chan *mchan, | ||
344 | enum dma_transfer_direction dir) | ||
345 | { | ||
346 | enum dma_slave_buswidth width; | ||
347 | |||
348 | if (dir == DMA_MEM_TO_DEV) | ||
349 | width = mchan->config.dst_addr_width; | ||
350 | else | ||
351 | width = mchan->config.src_addr_width; | ||
352 | |||
353 | switch (width) { | ||
354 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
355 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
356 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
357 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | ||
358 | break; | ||
359 | default: | ||
360 | return -EINVAL; | ||
361 | } | ||
362 | |||
363 | if (width > mchan->mdma->bus_width) | ||
364 | return -EINVAL; | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( | ||
370 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
371 | size_t period_len, enum dma_transfer_direction dir, | ||
372 | unsigned long flags) | ||
373 | { | ||
374 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
375 | struct mdc_dma *mdma = mchan->mdma; | ||
376 | struct mdc_tx_desc *mdesc; | ||
377 | struct mdc_hw_list_desc *curr, *prev = NULL; | ||
378 | dma_addr_t curr_phys, prev_phys; | ||
379 | |||
380 | if (!buf_len && !period_len) | ||
381 | return NULL; | ||
382 | |||
383 | if (!is_slave_direction(dir)) | ||
384 | return NULL; | ||
385 | |||
386 | if (mdc_check_slave_width(mchan, dir) < 0) | ||
387 | return NULL; | ||
388 | |||
389 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | ||
390 | if (!mdesc) | ||
391 | return NULL; | ||
392 | mdesc->chan = mchan; | ||
393 | mdesc->cyclic = true; | ||
394 | mdesc->list_xfer_size = buf_len; | ||
395 | mdesc->list_period_len = DIV_ROUND_UP(period_len, | ||
396 | mdma->max_xfer_size); | ||
397 | |||
398 | while (buf_len > 0) { | ||
399 | size_t remainder = min(period_len, buf_len); | ||
400 | |||
401 | while (remainder > 0) { | ||
402 | size_t xfer_size; | ||
403 | |||
404 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, | ||
405 | &curr_phys); | ||
406 | if (!curr) | ||
407 | goto free_desc; | ||
408 | |||
409 | if (!prev) { | ||
410 | mdesc->list_phys = curr_phys; | ||
411 | mdesc->list = curr; | ||
412 | } else { | ||
413 | prev->node_addr = curr_phys; | ||
414 | prev->next_desc = curr; | ||
415 | } | ||
416 | |||
417 | xfer_size = min_t(size_t, mdma->max_xfer_size, | ||
418 | remainder); | ||
419 | |||
420 | if (dir == DMA_MEM_TO_DEV) { | ||
421 | mdc_list_desc_config(mchan, curr, dir, | ||
422 | buf_addr, | ||
423 | mchan->config.dst_addr, | ||
424 | xfer_size); | ||
425 | } else { | ||
426 | mdc_list_desc_config(mchan, curr, dir, | ||
427 | mchan->config.src_addr, | ||
428 | buf_addr, | ||
429 | xfer_size); | ||
430 | } | ||
431 | |||
432 | prev = curr; | ||
433 | prev_phys = curr_phys; | ||
434 | |||
435 | mdesc->list_len++; | ||
436 | buf_addr += xfer_size; | ||
437 | buf_len -= xfer_size; | ||
438 | remainder -= xfer_size; | ||
439 | } | ||
440 | } | ||
441 | prev->node_addr = mdesc->list_phys; | ||
442 | |||
443 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | ||
444 | |||
445 | free_desc: | ||
446 | mdc_desc_free(&mdesc->vd); | ||
447 | |||
448 | return NULL; | ||
449 | } | ||
450 | |||
451 | static struct dma_async_tx_descriptor *mdc_prep_slave_sg( | ||
452 | struct dma_chan *chan, struct scatterlist *sgl, | ||
453 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
454 | unsigned long flags, void *context) | ||
455 | { | ||
456 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
457 | struct mdc_dma *mdma = mchan->mdma; | ||
458 | struct mdc_tx_desc *mdesc; | ||
459 | struct scatterlist *sg; | ||
460 | struct mdc_hw_list_desc *curr, *prev = NULL; | ||
461 | dma_addr_t curr_phys, prev_phys; | ||
462 | unsigned int i; | ||
463 | |||
464 | if (!sgl) | ||
465 | return NULL; | ||
466 | |||
467 | if (!is_slave_direction(dir)) | ||
468 | return NULL; | ||
469 | |||
470 | if (mdc_check_slave_width(mchan, dir) < 0) | ||
471 | return NULL; | ||
472 | |||
473 | mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); | ||
474 | if (!mdesc) | ||
475 | return NULL; | ||
476 | mdesc->chan = mchan; | ||
477 | |||
478 | for_each_sg(sgl, sg, sg_len, i) { | ||
479 | dma_addr_t buf = sg_dma_address(sg); | ||
480 | size_t buf_len = sg_dma_len(sg); | ||
481 | |||
482 | while (buf_len > 0) { | ||
483 | size_t xfer_size; | ||
484 | |||
485 | curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, | ||
486 | &curr_phys); | ||
487 | if (!curr) | ||
488 | goto free_desc; | ||
489 | |||
490 | if (!prev) { | ||
491 | mdesc->list_phys = curr_phys; | ||
492 | mdesc->list = curr; | ||
493 | } else { | ||
494 | prev->node_addr = curr_phys; | ||
495 | prev->next_desc = curr; | ||
496 | } | ||
497 | |||
498 | xfer_size = min_t(size_t, mdma->max_xfer_size, | ||
499 | buf_len); | ||
500 | |||
501 | if (dir == DMA_MEM_TO_DEV) { | ||
502 | mdc_list_desc_config(mchan, curr, dir, buf, | ||
503 | mchan->config.dst_addr, | ||
504 | xfer_size); | ||
505 | } else { | ||
506 | mdc_list_desc_config(mchan, curr, dir, | ||
507 | mchan->config.src_addr, | ||
508 | buf, xfer_size); | ||
509 | } | ||
510 | |||
511 | prev = curr; | ||
512 | prev_phys = curr_phys; | ||
513 | |||
514 | mdesc->list_len++; | ||
515 | mdesc->list_xfer_size += xfer_size; | ||
516 | buf += xfer_size; | ||
517 | buf_len -= xfer_size; | ||
518 | } | ||
519 | } | ||
520 | |||
521 | return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); | ||
522 | |||
523 | free_desc: | ||
524 | mdc_desc_free(&mdesc->vd); | ||
525 | |||
526 | return NULL; | ||
527 | } | ||
528 | |||
529 | static void mdc_issue_desc(struct mdc_chan *mchan) | ||
530 | { | ||
531 | struct mdc_dma *mdma = mchan->mdma; | ||
532 | struct virt_dma_desc *vd; | ||
533 | struct mdc_tx_desc *mdesc; | ||
534 | u32 val; | ||
535 | |||
536 | vd = vchan_next_desc(&mchan->vc); | ||
537 | if (!vd) | ||
538 | return; | ||
539 | |||
540 | list_del(&vd->node); | ||
541 | |||
542 | mdesc = to_mdc_desc(&vd->tx); | ||
543 | mchan->desc = mdesc; | ||
544 | |||
545 | dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n", | ||
546 | mchan->chan_nr); | ||
547 | |||
548 | mdma->soc->enable_chan(mchan); | ||
549 | |||
550 | val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG); | ||
551 | val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN | | ||
552 | MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | | ||
553 | MDC_GENERAL_CONFIG_PHYSICAL_R; | ||
554 | mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG); | ||
555 | val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | | ||
556 | (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | | ||
557 | (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); | ||
558 | mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG); | ||
559 | mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS); | ||
560 | val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS); | ||
561 | val |= MDC_CONTROL_AND_STATUS_LIST_EN; | ||
562 | mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS); | ||
563 | } | ||
564 | |||
565 | static void mdc_issue_pending(struct dma_chan *chan) | ||
566 | { | ||
567 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
568 | unsigned long flags; | ||
569 | |||
570 | spin_lock_irqsave(&mchan->vc.lock, flags); | ||
571 | if (vchan_issue_pending(&mchan->vc) && !mchan->desc) | ||
572 | mdc_issue_desc(mchan); | ||
573 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | ||
574 | } | ||
575 | |||
576 | static enum dma_status mdc_tx_status(struct dma_chan *chan, | ||
577 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
578 | { | ||
579 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
580 | struct mdc_tx_desc *mdesc; | ||
581 | struct virt_dma_desc *vd; | ||
582 | unsigned long flags; | ||
583 | size_t bytes = 0; | ||
584 | int ret; | ||
585 | |||
586 | ret = dma_cookie_status(chan, cookie, txstate); | ||
587 | if (ret == DMA_COMPLETE) | ||
588 | return ret; | ||
589 | |||
590 | if (!txstate) | ||
591 | return ret; | ||
592 | |||
593 | spin_lock_irqsave(&mchan->vc.lock, flags); | ||
594 | vd = vchan_find_desc(&mchan->vc, cookie); | ||
595 | if (vd) { | ||
596 | mdesc = to_mdc_desc(&vd->tx); | ||
597 | bytes = mdesc->list_xfer_size; | ||
598 | } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) { | ||
599 | struct mdc_hw_list_desc *ldesc; | ||
600 | u32 val1, val2, done, processed, residue; | ||
601 | int i, cmds; | ||
602 | |||
603 | mdesc = mchan->desc; | ||
604 | |||
605 | /* | ||
606 | * Determine the number of commands that haven't been | ||
607 | * processed (handled by the IRQ handler) yet. | ||
608 | */ | ||
609 | do { | ||
610 | val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & | ||
611 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; | ||
612 | residue = mdc_chan_readl(mchan, | ||
613 | MDC_ACTIVE_TRANSFER_SIZE); | ||
614 | val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & | ||
615 | ~MDC_CMDS_PROCESSED_INT_ACTIVE; | ||
616 | } while (val1 != val2); | ||
617 | |||
618 | done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
619 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
620 | processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | ||
621 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | ||
622 | cmds = (done - processed) % | ||
623 | (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1); | ||
624 | |||
625 | /* | ||
626 | * If the command loaded event hasn't been processed yet, then | ||
627 | * the difference above includes an extra command. | ||
628 | */ | ||
629 | if (!mdesc->cmd_loaded) | ||
630 | cmds--; | ||
631 | else | ||
632 | cmds += mdesc->list_cmds_done; | ||
633 | |||
634 | bytes = mdesc->list_xfer_size; | ||
635 | ldesc = mdesc->list; | ||
636 | for (i = 0; i < cmds; i++) { | ||
637 | bytes -= ldesc->xfer_size + 1; | ||
638 | ldesc = ldesc->next_desc; | ||
639 | } | ||
640 | if (ldesc) { | ||
641 | if (residue != MDC_TRANSFER_SIZE_MASK) | ||
642 | bytes -= ldesc->xfer_size - residue; | ||
643 | else | ||
644 | bytes -= ldesc->xfer_size + 1; | ||
645 | } | ||
646 | } | ||
647 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | ||
648 | |||
649 | dma_set_residue(txstate, bytes); | ||
650 | |||
651 | return ret; | ||
652 | } | ||
653 | |||
654 | static int mdc_terminate_all(struct dma_chan *chan) | ||
655 | { | ||
656 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
657 | struct mdc_tx_desc *mdesc; | ||
658 | unsigned long flags; | ||
659 | LIST_HEAD(head); | ||
660 | |||
661 | spin_lock_irqsave(&mchan->vc.lock, flags); | ||
662 | |||
663 | mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, | ||
664 | MDC_CONTROL_AND_STATUS); | ||
665 | |||
666 | mdesc = mchan->desc; | ||
667 | mchan->desc = NULL; | ||
668 | vchan_get_all_descriptors(&mchan->vc, &head); | ||
669 | |||
670 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | ||
671 | |||
672 | if (mdesc) | ||
673 | mdc_desc_free(&mdesc->vd); | ||
674 | vchan_dma_desc_free_list(&mchan->vc, &head); | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | static int mdc_slave_config(struct dma_chan *chan, | ||
680 | struct dma_slave_config *config) | ||
681 | { | ||
682 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
683 | unsigned long flags; | ||
684 | |||
685 | spin_lock_irqsave(&mchan->vc.lock, flags); | ||
686 | mchan->config = *config; | ||
687 | spin_unlock_irqrestore(&mchan->vc.lock, flags); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | |||
692 | static int mdc_alloc_chan_resources(struct dma_chan *chan) | ||
693 | { | ||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | static void mdc_free_chan_resources(struct dma_chan *chan) | ||
698 | { | ||
699 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
700 | struct mdc_dma *mdma = mchan->mdma; | ||
701 | |||
702 | mdc_terminate_all(chan); | ||
703 | |||
704 | mdma->soc->disable_chan(mchan); | ||
705 | } | ||
706 | |||
707 | static irqreturn_t mdc_chan_irq(int irq, void *dev_id) | ||
708 | { | ||
709 | struct mdc_chan *mchan = (struct mdc_chan *)dev_id; | ||
710 | struct mdc_tx_desc *mdesc; | ||
711 | u32 val, processed, done1, done2; | ||
712 | unsigned int i; | ||
713 | |||
714 | spin_lock(&mchan->vc.lock); | ||
715 | |||
716 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
717 | processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & | ||
718 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; | ||
719 | /* | ||
720 | * CMDS_DONE may have incremented between reading CMDS_PROCESSED | ||
721 | * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we | ||
722 | * didn't miss a command completion. | ||
723 | */ | ||
724 | do { | ||
725 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
726 | done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
727 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
728 | val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << | ||
729 | MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | | ||
730 | MDC_CMDS_PROCESSED_INT_ACTIVE); | ||
731 | val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; | ||
732 | mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); | ||
733 | val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); | ||
734 | done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & | ||
735 | MDC_CMDS_PROCESSED_CMDS_DONE_MASK; | ||
736 | } while (done1 != done2); | ||
737 | |||
738 | dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); | ||
739 | |||
740 | mdesc = mchan->desc; | ||
741 | if (!mdesc) { | ||
742 | dev_warn(mdma2dev(mchan->mdma), | ||
743 | "IRQ with no active descriptor on channel %d\n", | ||
744 | mchan->chan_nr); | ||
745 | goto out; | ||
746 | } | ||
747 | |||
748 | for (i = processed; i != done1; | ||
749 | i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) { | ||
750 | /* | ||
751 | * The first interrupt in a transfer indicates that the | ||
752 | * command list has been loaded, not that a command has | ||
753 | * been completed. | ||
754 | */ | ||
755 | if (!mdesc->cmd_loaded) { | ||
756 | mdesc->cmd_loaded = true; | ||
757 | continue; | ||
758 | } | ||
759 | |||
760 | mdesc->list_cmds_done++; | ||
761 | if (mdesc->cyclic) { | ||
762 | mdesc->list_cmds_done %= mdesc->list_len; | ||
763 | if (mdesc->list_cmds_done % mdesc->list_period_len == 0) | ||
764 | vchan_cyclic_callback(&mdesc->vd); | ||
765 | } else if (mdesc->list_cmds_done == mdesc->list_len) { | ||
766 | mchan->desc = NULL; | ||
767 | vchan_cookie_complete(&mdesc->vd); | ||
768 | mdc_issue_desc(mchan); | ||
769 | break; | ||
770 | } | ||
771 | } | ||
772 | out: | ||
773 | spin_unlock(&mchan->vc.lock); | ||
774 | |||
775 | return IRQ_HANDLED; | ||
776 | } | ||
777 | |||
778 | static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec, | ||
779 | struct of_dma *ofdma) | ||
780 | { | ||
781 | struct mdc_dma *mdma = ofdma->of_dma_data; | ||
782 | struct dma_chan *chan; | ||
783 | |||
784 | if (dma_spec->args_count != 3) | ||
785 | return NULL; | ||
786 | |||
787 | list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) { | ||
788 | struct mdc_chan *mchan = to_mdc_chan(chan); | ||
789 | |||
790 | if (!(dma_spec->args[1] & BIT(mchan->chan_nr))) | ||
791 | continue; | ||
792 | if (dma_get_slave_channel(chan)) { | ||
793 | mchan->periph = dma_spec->args[0]; | ||
794 | mchan->thread = dma_spec->args[2]; | ||
795 | return chan; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | return NULL; | ||
800 | } | ||
801 | |||
802 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4)) | ||
803 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4)) | ||
804 | #define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f | ||
805 | |||
806 | static void pistachio_mdc_enable_chan(struct mdc_chan *mchan) | ||
807 | { | ||
808 | struct mdc_dma *mdma = mchan->mdma; | ||
809 | |||
810 | regmap_update_bits(mdma->periph_regs, | ||
811 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), | ||
812 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << | ||
813 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), | ||
814 | mchan->periph << | ||
815 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr)); | ||
816 | } | ||
817 | |||
818 | static void pistachio_mdc_disable_chan(struct mdc_chan *mchan) | ||
819 | { | ||
820 | struct mdc_dma *mdma = mchan->mdma; | ||
821 | |||
822 | regmap_update_bits(mdma->periph_regs, | ||
823 | PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), | ||
824 | PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << | ||
825 | PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), | ||
826 | 0); | ||
827 | } | ||
828 | |||
829 | static const struct mdc_dma_soc_data pistachio_mdc_data = { | ||
830 | .enable_chan = pistachio_mdc_enable_chan, | ||
831 | .disable_chan = pistachio_mdc_disable_chan, | ||
832 | }; | ||
833 | |||
834 | static const struct of_device_id mdc_dma_of_match[] = { | ||
835 | { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, }, | ||
836 | { }, | ||
837 | }; | ||
838 | MODULE_DEVICE_TABLE(of, mdc_dma_of_match); | ||
839 | |||
840 | static int mdc_dma_probe(struct platform_device *pdev) | ||
841 | { | ||
842 | struct mdc_dma *mdma; | ||
843 | struct resource *res; | ||
844 | const struct of_device_id *match; | ||
845 | unsigned int i; | ||
846 | u32 val; | ||
847 | int ret; | ||
848 | |||
849 | mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL); | ||
850 | if (!mdma) | ||
851 | return -ENOMEM; | ||
852 | platform_set_drvdata(pdev, mdma); | ||
853 | |||
854 | match = of_match_device(mdc_dma_of_match, &pdev->dev); | ||
855 | mdma->soc = match->data; | ||
856 | |||
857 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
858 | mdma->regs = devm_ioremap_resource(&pdev->dev, res); | ||
859 | if (IS_ERR(mdma->regs)) | ||
860 | return PTR_ERR(mdma->regs); | ||
861 | |||
862 | mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, | ||
863 | "img,cr-periph"); | ||
864 | if (IS_ERR(mdma->periph_regs)) | ||
865 | return PTR_ERR(mdma->periph_regs); | ||
866 | |||
867 | mdma->clk = devm_clk_get(&pdev->dev, "sys"); | ||
868 | if (IS_ERR(mdma->clk)) | ||
869 | return PTR_ERR(mdma->clk); | ||
870 | |||
871 | ret = clk_prepare_enable(mdma->clk); | ||
872 | if (ret) | ||
873 | return ret; | ||
874 | |||
875 | dma_cap_zero(mdma->dma_dev.cap_mask); | ||
876 | dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); | ||
877 | dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); | ||
878 | dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask); | ||
879 | dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask); | ||
880 | |||
881 | val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A); | ||
882 | mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) & | ||
883 | MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK; | ||
884 | mdma->nr_threads = | ||
885 | 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) & | ||
886 | MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK); | ||
887 | mdma->bus_width = | ||
888 | (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) & | ||
889 | MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8; | ||
890 | /* | ||
891 | * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes | ||
892 | * are supported, this makes it possible for the value reported in | ||
893 | * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size | ||
894 | * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or | ||
895 | * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this | ||
896 | * ambiguity, restrict transfer sizes to one bus-width less than the | ||
897 | * actual maximum. | ||
898 | */ | ||
899 | mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width; | ||
900 | |||
901 | of_property_read_u32(pdev->dev.of_node, "dma-channels", | ||
902 | &mdma->nr_channels); | ||
903 | ret = of_property_read_u32(pdev->dev.of_node, | ||
904 | "img,max-burst-multiplier", | ||
905 | &mdma->max_burst_mult); | ||
906 | if (ret) | ||
907 | goto disable_clk; | ||
908 | |||
909 | mdma->dma_dev.dev = &pdev->dev; | ||
910 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; | ||
911 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; | ||
912 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; | ||
913 | mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; | ||
914 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; | ||
915 | mdma->dma_dev.device_tx_status = mdc_tx_status; | ||
916 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; | ||
917 | mdma->dma_dev.device_terminate_all = mdc_terminate_all; | ||
918 | mdma->dma_dev.device_config = mdc_slave_config; | ||
919 | |||
920 | mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
921 | mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
922 | for (i = 1; i <= mdma->bus_width; i <<= 1) { | ||
923 | mdma->dma_dev.src_addr_widths |= BIT(i); | ||
924 | mdma->dma_dev.dst_addr_widths |= BIT(i); | ||
925 | } | ||
926 | |||
927 | INIT_LIST_HEAD(&mdma->dma_dev.channels); | ||
928 | for (i = 0; i < mdma->nr_channels; i++) { | ||
929 | struct mdc_chan *mchan = &mdma->channels[i]; | ||
930 | |||
931 | mchan->mdma = mdma; | ||
932 | mchan->chan_nr = i; | ||
933 | mchan->irq = platform_get_irq(pdev, i); | ||
934 | if (mchan->irq < 0) { | ||
935 | ret = mchan->irq; | ||
936 | goto disable_clk; | ||
937 | } | ||
938 | ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, | ||
939 | IRQ_TYPE_LEVEL_HIGH, | ||
940 | dev_name(&pdev->dev), mchan); | ||
941 | if (ret < 0) | ||
942 | goto disable_clk; | ||
943 | |||
944 | mchan->vc.desc_free = mdc_desc_free; | ||
945 | vchan_init(&mchan->vc, &mdma->dma_dev); | ||
946 | } | ||
947 | |||
948 | mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, | ||
949 | sizeof(struct mdc_hw_list_desc), | ||
950 | 4, 0); | ||
951 | if (!mdma->desc_pool) { | ||
952 | ret = -ENOMEM; | ||
953 | goto disable_clk; | ||
954 | } | ||
955 | |||
956 | ret = dma_async_device_register(&mdma->dma_dev); | ||
957 | if (ret) | ||
958 | goto disable_clk; | ||
959 | |||
960 | ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); | ||
961 | if (ret) | ||
962 | goto unregister; | ||
963 | |||
964 | dev_info(&pdev->dev, "MDC with %u channels and %u threads\n", | ||
965 | mdma->nr_channels, mdma->nr_threads); | ||
966 | |||
967 | return 0; | ||
968 | |||
969 | unregister: | ||
970 | dma_async_device_unregister(&mdma->dma_dev); | ||
971 | disable_clk: | ||
972 | clk_disable_unprepare(mdma->clk); | ||
973 | return ret; | ||
974 | } | ||
975 | |||
976 | static int mdc_dma_remove(struct platform_device *pdev) | ||
977 | { | ||
978 | struct mdc_dma *mdma = platform_get_drvdata(pdev); | ||
979 | struct mdc_chan *mchan, *next; | ||
980 | |||
981 | of_dma_controller_free(pdev->dev.of_node); | ||
982 | dma_async_device_unregister(&mdma->dma_dev); | ||
983 | |||
984 | list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels, | ||
985 | vc.chan.device_node) { | ||
986 | list_del(&mchan->vc.chan.device_node); | ||
987 | |||
988 | synchronize_irq(mchan->irq); | ||
989 | devm_free_irq(&pdev->dev, mchan->irq, mchan); | ||
990 | |||
991 | tasklet_kill(&mchan->vc.task); | ||
992 | } | ||
993 | |||
994 | clk_disable_unprepare(mdma->clk); | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | static struct platform_driver mdc_dma_driver = { | ||
1000 | .driver = { | ||
1001 | .name = "img-mdc-dma", | ||
1002 | .of_match_table = of_match_ptr(mdc_dma_of_match), | ||
1003 | }, | ||
1004 | .probe = mdc_dma_probe, | ||
1005 | .remove = mdc_dma_remove, | ||
1006 | }; | ||
1007 | module_platform_driver(mdc_dma_driver); | ||
1008 | |||
1009 | MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver"); | ||
1010 | MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>"); | ||
1011 | MODULE_LICENSE("GPL v2"); | ||