aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorAlex Smith <alex.smith@imgtec.com>2015-03-18 12:16:36 -0400
committerVinod Koul <vinod.koul@intel.com>2015-03-31 23:13:49 -0400
commitd894fc6046fecd66b0d8ec35c7d2515781cc030b (patch)
treecb5809e6ace6c1ce30d7a80b73ae5cda8afa10c4 /drivers/dma
parentc8307106f5fa53b8fe8763b488d629e3cce9fae3 (diff)
dmaengine: jz4780: add driver for the Ingenic JZ4780 DMA controller
This patch adds a driver for the DMA controller found in the Ingenic JZ4780. It currently does not implement any support for the programmable firmware feature of the controller - this is not necessary for most uses. It also does not take priority into account when allocating channels, it just allocates the first available channel. This can be implemented later. Signed-off-by: Alex Smith <alex.smith@imgtec.com> Signed-off-by: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com> [Updated for dmaengine api changes, Add residue support, couple of minor fixes] Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dma-jz4780.c877
3 files changed, 888 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a874b6ec6650..ce09734248da 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -358,6 +358,16 @@ config DMA_JZ4740
358 select DMA_ENGINE 358 select DMA_ENGINE
359 select DMA_VIRTUAL_CHANNELS 359 select DMA_VIRTUAL_CHANNELS
360 360
361config DMA_JZ4780
362 tristate "JZ4780 DMA support"
363 depends on MACH_JZ4780
364 select DMA_ENGINE
365 select DMA_VIRTUAL_CHANNELS
366 help
367 This selects support for the DMA controller in Ingenic JZ4780 SoCs.
368 If you have a board based on such a SoC and wish to use DMA for
369 devices which can use the DMA controller, say Y or M here.
370
361config K3_DMA 371config K3_DMA
362 tristate "Hisilicon K3 DMA support" 372 tristate "Hisilicon K3 DMA support"
363 depends on ARCH_HI3xxx 373 depends on ARCH_HI3xxx
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index f915f61ec574..af239e765cbb 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
41obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o 41obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
42obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o 42obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
43obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o 43obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
44obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
44obj-$(CONFIG_TI_CPPI41) += cppi41.o 45obj-$(CONFIG_TI_CPPI41) += cppi41.o
45obj-$(CONFIG_K3_DMA) += k3dma.o 46obj-$(CONFIG_K3_DMA) += k3dma.o
46obj-$(CONFIG_MOXART_DMA) += moxart-dma.o 47obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
new file mode 100644
index 000000000000..26d2f0e09ea3
--- /dev/null
+++ b/drivers/dma/dma-jz4780.c
@@ -0,0 +1,877 @@
1/*
2 * Ingenic JZ4780 DMA controller
3 *
4 * Copyright (c) 2015 Imagination Technologies
5 * Author: Alex Smith <alex@alex-smith.me.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/dmapool.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_dma.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22
23#include "dmaengine.h"
24#include "virt-dma.h"
25
26#define JZ_DMA_NR_CHANNELS 32
27
28/* Global registers. */
29#define JZ_DMA_REG_DMAC 0x1000
30#define JZ_DMA_REG_DIRQP 0x1004
31#define JZ_DMA_REG_DDR 0x1008
32#define JZ_DMA_REG_DDRS 0x100c
33#define JZ_DMA_REG_DMACP 0x101c
34#define JZ_DMA_REG_DSIRQP 0x1020
35#define JZ_DMA_REG_DSIRQM 0x1024
36#define JZ_DMA_REG_DCIRQP 0x1028
37#define JZ_DMA_REG_DCIRQM 0x102c
38
39/* Per-channel registers. */
40#define JZ_DMA_REG_CHAN(n) (n * 0x20)
41#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
42#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
43#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
44#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
45#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
46#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
47#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
48#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
49
50#define JZ_DMA_DMAC_DMAE BIT(0)
51#define JZ_DMA_DMAC_AR BIT(2)
52#define JZ_DMA_DMAC_HLT BIT(3)
53#define JZ_DMA_DMAC_FMSC BIT(31)
54
55#define JZ_DMA_DRT_AUTO 0x8
56
57#define JZ_DMA_DCS_CTE BIT(0)
58#define JZ_DMA_DCS_HLT BIT(2)
59#define JZ_DMA_DCS_TT BIT(3)
60#define JZ_DMA_DCS_AR BIT(4)
61#define JZ_DMA_DCS_DES8 BIT(30)
62
63#define JZ_DMA_DCM_LINK BIT(0)
64#define JZ_DMA_DCM_TIE BIT(1)
65#define JZ_DMA_DCM_STDE BIT(2)
66#define JZ_DMA_DCM_TSZ_SHIFT 8
67#define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
68#define JZ_DMA_DCM_DP_SHIFT 12
69#define JZ_DMA_DCM_SP_SHIFT 14
70#define JZ_DMA_DCM_DAI BIT(22)
71#define JZ_DMA_DCM_SAI BIT(23)
72
73#define JZ_DMA_SIZE_4_BYTE 0x0
74#define JZ_DMA_SIZE_1_BYTE 0x1
75#define JZ_DMA_SIZE_2_BYTE 0x2
76#define JZ_DMA_SIZE_16_BYTE 0x3
77#define JZ_DMA_SIZE_32_BYTE 0x4
78#define JZ_DMA_SIZE_64_BYTE 0x5
79#define JZ_DMA_SIZE_128_BYTE 0x6
80
81#define JZ_DMA_WIDTH_32_BIT 0x0
82#define JZ_DMA_WIDTH_8_BIT 0x1
83#define JZ_DMA_WIDTH_16_BIT 0x2
84
85#define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
86 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
87 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
88
89/**
90 * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
91 * @dcm: value for the DCM (channel command) register
92 * @dsa: source address
93 * @dta: target address
94 * @dtc: transfer count (number of blocks of the transfer size specified in DCM
95 * to transfer) in the low 24 bits, offset of the next descriptor from the
96 * descriptor base address in the upper 8 bits.
97 * @sd: target/source stride difference (in stride transfer mode).
98 * @drt: request type
99 */
100struct jz4780_dma_hwdesc {
101 uint32_t dcm;
102 uint32_t dsa;
103 uint32_t dta;
104 uint32_t dtc;
105 uint32_t sd;
106 uint32_t drt;
107 uint32_t reserved[2];
108};
109
110/* Size of allocations for hardware descriptor blocks. */
111#define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
112#define JZ_DMA_MAX_DESC \
113 (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
114
115struct jz4780_dma_desc {
116 struct virt_dma_desc vdesc;
117
118 struct jz4780_dma_hwdesc *desc;
119 dma_addr_t desc_phys;
120 unsigned int count;
121 enum dma_transaction_type type;
122 uint32_t status;
123};
124
125struct jz4780_dma_chan {
126 struct virt_dma_chan vchan;
127 unsigned int id;
128 struct dma_pool *desc_pool;
129
130 uint32_t transfer_type;
131 uint32_t transfer_shift;
132 struct dma_slave_config config;
133
134 struct jz4780_dma_desc *desc;
135 unsigned int curr_hwdesc;
136};
137
138struct jz4780_dma_dev {
139 struct dma_device dma_device;
140 void __iomem *base;
141 struct clk *clk;
142 unsigned int irq;
143
144 uint32_t chan_reserved;
145 struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
146};
147
148struct jz4780_dma_data {
149 uint32_t transfer_type;
150 int channel;
151};
152
153static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
154{
155 return container_of(chan, struct jz4780_dma_chan, vchan.chan);
156}
157
158static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
159 struct virt_dma_desc *vdesc)
160{
161 return container_of(vdesc, struct jz4780_dma_desc, vdesc);
162}
163
164static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
165 struct jz4780_dma_chan *jzchan)
166{
167 return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
168 dma_device);
169}
170
171static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
172 unsigned int reg)
173{
174 return readl(jzdma->base + reg);
175}
176
177static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
178 unsigned int reg, uint32_t val)
179{
180 writel(val, jzdma->base + reg);
181}
182
183static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
184 struct jz4780_dma_chan *jzchan, unsigned int count,
185 enum dma_transaction_type type)
186{
187 struct jz4780_dma_desc *desc;
188
189 if (count > JZ_DMA_MAX_DESC)
190 return NULL;
191
192 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
193 if (!desc)
194 return NULL;
195
196 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
197 &desc->desc_phys);
198 if (!desc->desc) {
199 kfree(desc);
200 return NULL;
201 }
202
203 desc->count = count;
204 desc->type = type;
205 return desc;
206}
207
208static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
209{
210 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
211 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
212
213 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
214 kfree(desc);
215}
216
217static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
218{
219 *ord = ffs(val) - 1;
220
221 switch (*ord) {
222 case 0:
223 return JZ_DMA_SIZE_1_BYTE;
224 case 1:
225 return JZ_DMA_SIZE_2_BYTE;
226 case 2:
227 return JZ_DMA_SIZE_4_BYTE;
228 case 4:
229 return JZ_DMA_SIZE_16_BYTE;
230 case 5:
231 return JZ_DMA_SIZE_32_BYTE;
232 case 6:
233 return JZ_DMA_SIZE_64_BYTE;
234 case 7:
235 return JZ_DMA_SIZE_128_BYTE;
236 default:
237 return -EINVAL;
238 }
239}
240
241static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
242 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
243 enum dma_transfer_direction direction)
244{
245 struct dma_slave_config *config = &jzchan->config;
246 uint32_t width, maxburst, tsz;
247 int ord;
248
249 if (direction == DMA_MEM_TO_DEV) {
250 desc->dcm = JZ_DMA_DCM_SAI;
251 desc->dsa = addr;
252 desc->dta = config->dst_addr;
253 desc->drt = jzchan->transfer_type;
254
255 width = config->dst_addr_width;
256 maxburst = config->dst_maxburst;
257 } else {
258 desc->dcm = JZ_DMA_DCM_DAI;
259 desc->dsa = config->src_addr;
260 desc->dta = addr;
261 desc->drt = jzchan->transfer_type;
262
263 width = config->src_addr_width;
264 maxburst = config->src_maxburst;
265 }
266
267 /*
268 * This calculates the maximum transfer size that can be used with the
269 * given address, length, width and maximum burst size. The address
270 * must be aligned to the transfer size, the total length must be
271 * divisible by the transfer size, and we must not use more than the
272 * maximum burst specified by the user.
273 */
274 tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
275 jzchan->transfer_shift = ord;
276
277 switch (width) {
278 case DMA_SLAVE_BUSWIDTH_1_BYTE:
279 case DMA_SLAVE_BUSWIDTH_2_BYTES:
280 break;
281 case DMA_SLAVE_BUSWIDTH_4_BYTES:
282 width = JZ_DMA_WIDTH_32_BIT;
283 break;
284 default:
285 return -EINVAL;
286 }
287
288 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
289 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
290 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
291
292 desc->dtc = len >> ord;
293}
294
295static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
296 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
297 enum dma_transfer_direction direction, unsigned long flags)
298{
299 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
300 struct jz4780_dma_desc *desc;
301 unsigned int i;
302 int err;
303
304 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
305 if (!desc)
306 return NULL;
307
308 for (i = 0; i < sg_len; i++) {
309 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
310 sg_dma_address(&sgl[i]),
311 sg_dma_len(&sgl[i]),
312 direction);
313 if (err < 0)
314 return ERR_PTR(err);
315
316
317 desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
318
319 if (i != (sg_len - 1)) {
320 /* Automatically proceeed to the next descriptor. */
321 desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
322
323 /*
324 * The upper 8 bits of the DTC field in the descriptor
325 * must be set to (offset from descriptor base of next
326 * descriptor >> 4).
327 */
328 desc->desc[i].dtc |=
329 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
330 }
331 }
332
333 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
334}
335
336static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
337 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
338 size_t period_len, enum dma_transfer_direction direction,
339 unsigned long flags)
340{
341 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
342 struct jz4780_dma_desc *desc;
343 unsigned int periods, i;
344 int err;
345
346 if (buf_len % period_len)
347 return NULL;
348
349 periods = buf_len / period_len;
350
351 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
352 if (!desc)
353 return NULL;
354
355 for (i = 0; i < periods; i++) {
356 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
357 period_len, direction);
358 if (err < 0)
359 return ERR_PTR(err);
360
361 buf_addr += period_len;
362
363 /*
364 * Set the link bit to indicate that the controller should
365 * automatically proceed to the next descriptor. In
366 * jz4780_dma_begin(), this will be cleared if we need to issue
367 * an interrupt after each period.
368 */
369 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
370
371 /*
372 * The upper 8 bits of the DTC field in the descriptor must be
373 * set to (offset from descriptor base of next descriptor >> 4).
374 * If this is the last descriptor, link it back to the first,
375 * i.e. leave offset set to 0, otherwise point to the next one.
376 */
377 if (i != (periods - 1)) {
378 desc->desc[i].dtc |=
379 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
380 }
381 }
382
383 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
384}
385
386struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
387 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
388 size_t len, unsigned long flags)
389{
390 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
391 struct jz4780_dma_desc *desc;
392 uint32_t tsz;
393 int ord;
394
395 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
396 if (!desc)
397 return NULL;
398
399 tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
400 if (tsz < 0)
401 return ERR_PTR(tsz);
402
403 desc->desc[0].dsa = src;
404 desc->desc[0].dta = dest;
405 desc->desc[0].drt = JZ_DMA_DRT_AUTO;
406 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
407 tsz << JZ_DMA_DCM_TSZ_SHIFT |
408 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
409 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
410 desc->desc[0].dtc = len >> ord;
411
412 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
413}
414
415static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
416{
417 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
418 struct virt_dma_desc *vdesc;
419 unsigned int i;
420 dma_addr_t desc_phys;
421
422 if (!jzchan->desc) {
423 vdesc = vchan_next_desc(&jzchan->vchan);
424 if (!vdesc)
425 return;
426
427 list_del(&vdesc->node);
428
429 jzchan->desc = to_jz4780_dma_desc(vdesc);
430 jzchan->curr_hwdesc = 0;
431
432 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
433 /*
434 * The DMA controller doesn't support triggering an
435 * interrupt after processing each descriptor, only
436 * after processing an entire terminated list of
437 * descriptors. For a cyclic DMA setup the list of
438 * descriptors is not terminated so we can never get an
439 * interrupt.
440 *
441 * If the user requested a callback for a cyclic DMA
442 * setup then we workaround this hardware limitation
443 * here by degrading to a set of unlinked descriptors
444 * which we will submit in sequence in response to the
445 * completion of processing the previous descriptor.
446 */
447 for (i = 0; i < jzchan->desc->count; i++)
448 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
449 }
450 } else {
451 /*
452 * There is an existing transfer, therefore this must be one
453 * for which we unlinked the descriptors above. Advance to the
454 * next one in the list.
455 */
456 jzchan->curr_hwdesc =
457 (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
458 }
459
460 /* Use 8-word descriptors. */
461 jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
462
463 /* Write descriptor address and initiate descriptor fetch. */
464 desc_phys = jzchan->desc->desc_phys +
465 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
466 jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
467 jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
468
469 /* Enable the channel. */
470 jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
471 JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
472}
473
474static void jz4780_dma_issue_pending(struct dma_chan *chan)
475{
476 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
477 unsigned long flags;
478
479 spin_lock_irqsave(&jzchan->vchan.lock, flags);
480
481 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
482 jz4780_dma_begin(jzchan);
483
484 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
485}
486
487static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
488{
489 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
490 unsigned long flags;
491 LIST_HEAD(head);
492
493 spin_lock_irqsave(&jzchan->vchan.lock, flags);
494
495 /* Clear the DMA status and stop the transfer. */
496 jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
497 if (jzchan->desc) {
498 jz4780_dma_desc_free(&jzchan->desc->vdesc);
499 jzchan->desc = NULL;
500 }
501
502 vchan_get_all_descriptors(&jzchan->vchan, &head);
503
504 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
505
506 vchan_dma_desc_free_list(&jzchan->vchan, &head);
507 return 0;
508}
509
510static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
511 const struct dma_slave_config *config)
512{
513 if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
514 || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
515 return -EINVAL;
516
517 /* Copy the reset of the slave configuration, it is used later. */
518 memcpy(&jzchan->config, config, sizeof(jzchan->config));
519
520 return 0;
521}
522
523static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
524 struct jz4780_dma_desc *desc, unsigned int next_sg)
525{
526 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
527 unsigned int residue, count;
528 unsigned int i;
529
530 residue = 0;
531
532 for (i = next_sg; i < desc->count; i++)
533 residue += desc->desc[i].dtc << jzchan->transfer_shift;
534
535 if (next_sg != 0) {
536 count = jz4780_dma_readl(jzdma,
537 JZ_DMA_REG_DTC(jzchan->id));
538 residue += count << jzchan->transfer_shift;
539 }
540
541 return residue;
542}
543
544static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
545 dma_cookie_t cookie, struct dma_tx_state *txstate)
546{
547 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
548 struct virt_dma_desc *vdesc;
549 enum dma_status status;
550 unsigned long flags;
551
552 status = dma_cookie_status(chan, cookie, txstate);
553 if ((status == DMA_COMPLETE) || (txstate == NULL))
554 return status;
555
556 spin_lock_irqsave(&jzchan->vchan.lock, flags);
557
558 vdesc = vchan_find_desc(&jzchan->vchan, cookie);
559 if (vdesc) {
560 /* On the issued list, so hasn't been processed yet */
561 txstate->residue = jz4780_dma_desc_residue(jzchan,
562 to_jz4780_dma_desc(vdesc), 0);
563 } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
564 txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
565 (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
566 } else
567 txstate->residue = 0;
568
569 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
570 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
571 status = DMA_ERROR;
572
573 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
574 return status;
575}
576
577static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
578 struct jz4780_dma_chan *jzchan)
579{
580 uint32_t dcs;
581
582 spin_lock(&jzchan->vchan.lock);
583
584 dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
585 jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
586
587 if (dcs & JZ_DMA_DCS_AR) {
588 dev_warn(&jzchan->vchan.chan.dev->device,
589 "address error (DCS=0x%x)\n", dcs);
590 }
591
592 if (dcs & JZ_DMA_DCS_HLT) {
593 dev_warn(&jzchan->vchan.chan.dev->device,
594 "channel halt (DCS=0x%x)\n", dcs);
595 }
596
597 if (jzchan->desc) {
598 jzchan->desc->status = dcs;
599
600 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
601 if (jzchan->desc->type == DMA_CYCLIC) {
602 vchan_cyclic_callback(&jzchan->desc->vdesc);
603 } else {
604 vchan_cookie_complete(&jzchan->desc->vdesc);
605 jzchan->desc = NULL;
606 }
607
608 jz4780_dma_begin(jzchan);
609 }
610 } else {
611 dev_err(&jzchan->vchan.chan.dev->device,
612 "channel IRQ with no active transfer\n");
613 }
614
615 spin_unlock(&jzchan->vchan.lock);
616}
617
618static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
619{
620 struct jz4780_dma_dev *jzdma = data;
621 uint32_t pending, dmac;
622 int i;
623
624 pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
625
626 for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
627 if (!(pending & (1<<i)))
628 continue;
629
630 jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
631 }
632
633 /* Clear halt and address error status of all channels. */
634 dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
635 dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
636 jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
637
638 /* Clear interrupt pending status. */
639 jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
640
641 return IRQ_HANDLED;
642}
643
644static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
645{
646 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
647
648 jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
649 chan->device->dev,
650 JZ_DMA_DESC_BLOCK_SIZE,
651 PAGE_SIZE, 0);
652 if (!jzchan->desc_pool) {
653 dev_err(&chan->dev->device,
654 "failed to allocate descriptor pool\n");
655 return -ENOMEM;
656 }
657
658 return 0;
659}
660
661static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
662{
663 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
664
665 vchan_free_chan_resources(&jzchan->vchan);
666 dma_pool_destroy(jzchan->desc_pool);
667 jzchan->desc_pool = NULL;
668}
669
670static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
671{
672 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
673 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
674 struct jz4780_dma_data *data = param;
675
676 if (data->channel > -1) {
677 if (data->channel != jzchan->id)
678 return false;
679 } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
680 return false;
681 }
682
683 jzchan->transfer_type = data->transfer_type;
684
685 return true;
686}
687
688static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
689 struct of_dma *ofdma)
690{
691 struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
692 dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
693 struct jz4780_dma_data data;
694
695 if (dma_spec->args_count != 2)
696 return NULL;
697
698 data.transfer_type = dma_spec->args[0];
699 data.channel = dma_spec->args[1];
700
701 if (data.channel > -1) {
702 if (data.channel >= JZ_DMA_NR_CHANNELS) {
703 dev_err(jzdma->dma_device.dev,
704 "device requested non-existent channel %u\n",
705 data.channel);
706 return NULL;
707 }
708
709 /* Can only select a channel marked as reserved. */
710 if (!(jzdma->chan_reserved & BIT(data.channel))) {
711 dev_err(jzdma->dma_device.dev,
712 "device requested unreserved channel %u\n",
713 data.channel);
714 return NULL;
715 }
716 }
717
718 return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
719}
720
721static int jz4780_dma_probe(struct platform_device *pdev)
722{
723 struct device *dev = &pdev->dev;
724 struct jz4780_dma_dev *jzdma;
725 struct jz4780_dma_chan *jzchan;
726 struct dma_device *dd;
727 struct resource *res;
728 int i, ret;
729
730 jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
731 if (!jzdma)
732 return -ENOMEM;
733
734 platform_set_drvdata(pdev, jzdma);
735
736 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
737 if (!res) {
738 dev_err(dev, "failed to get I/O memory\n");
739 return -EINVAL;
740 }
741
742 jzdma->base = devm_ioremap_resource(dev, res);
743 if (IS_ERR(jzdma->base))
744 return PTR_ERR(jzdma->base);
745
746 jzdma->irq = platform_get_irq(pdev, 0);
747 if (jzdma->irq < 0) {
748 dev_err(dev, "failed to get IRQ: %d\n", ret);
749 return jzdma->irq;
750 }
751
752 ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
753 dev_name(dev), jzdma);
754 if (ret) {
755 dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
756 return -EINVAL;
757 }
758
759 jzdma->clk = devm_clk_get(dev, NULL);
760 if (IS_ERR(jzdma->clk)) {
761 dev_err(dev, "failed to get clock\n");
762 return PTR_ERR(jzdma->clk);
763 }
764
765 clk_prepare_enable(jzdma->clk);
766
767 /* Property is optional, if it doesn't exist the value will remain 0. */
768 of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
769 0, &jzdma->chan_reserved);
770
771 dd = &jzdma->dma_device;
772
773 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
774 dma_cap_set(DMA_SLAVE, dd->cap_mask);
775 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
776
777 dd->dev = dev;
778 dd->copy_align = 2; /* 2^2 = 4 byte alignment */
779 dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
780 dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
781 dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
782 dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
783 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
784 dd->device_config = jz4780_dma_slave_config;
785 dd->device_terminate_all = jz4780_dma_terminate_all;
786 dd->device_tx_status = jz4780_dma_tx_status;
787 dd->device_issue_pending = jz4780_dma_issue_pending;
788 dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
789 dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
790 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
791 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
792
793
794 /*
795 * Enable DMA controller, mark all channels as not programmable.
796 * Also set the FMSC bit - it increases MSC performance, so it makes
797 * little sense not to enable it.
798 */
799 jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
800 JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
801 jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
802
803 INIT_LIST_HEAD(&dd->channels);
804
805 for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
806 jzchan = &jzdma->chan[i];
807 jzchan->id = i;
808
809 vchan_init(&jzchan->vchan, dd);
810 jzchan->vchan.desc_free = jz4780_dma_desc_free;
811 }
812
813 ret = dma_async_device_register(dd);
814 if (ret) {
815 dev_err(dev, "failed to register device\n");
816 goto err_disable_clk;
817 }
818
819 /* Register with OF DMA helpers. */
820 ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
821 jzdma);
822 if (ret) {
823 dev_err(dev, "failed to register OF DMA controller\n");
824 goto err_unregister_dev;
825 }
826
827 dev_info(dev, "JZ4780 DMA controller initialised\n");
828 return 0;
829
830err_unregister_dev:
831 dma_async_device_unregister(dd);
832
833err_disable_clk:
834 clk_disable_unprepare(jzdma->clk);
835 return ret;
836}
837
838static int jz4780_dma_remove(struct platform_device *pdev)
839{
840 struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
841
842 of_dma_controller_free(pdev->dev.of_node);
843 devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
844 dma_async_device_unregister(&jzdma->dma_device);
845 return 0;
846}
847
848static const struct of_device_id jz4780_dma_dt_match[] = {
849 { .compatible = "ingenic,jz4780-dma", .data = NULL },
850 {},
851};
852MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
853
854static struct platform_driver jz4780_dma_driver = {
855 .probe = jz4780_dma_probe,
856 .remove = jz4780_dma_remove,
857 .driver = {
858 .name = "jz4780-dma",
859 .of_match_table = of_match_ptr(jz4780_dma_dt_match),
860 },
861};
862
863static int __init jz4780_dma_init(void)
864{
865 return platform_driver_register(&jz4780_dma_driver);
866}
867subsys_initcall(jz4780_dma_init);
868
869static void __exit jz4780_dma_exit(void)
870{
871 platform_driver_unregister(&jz4780_dma_driver);
872}
873module_exit(jz4780_dma_exit);
874
875MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
876MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
877MODULE_LICENSE("GPL");