aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig8
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/sun6i-dma.c1059
3 files changed, 1068 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1eca7b9760e6..4b439270fb11 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -375,6 +375,14 @@ config XILINX_VDMA
375 channels, Memory Mapped to Stream (MM2S) and Stream to 375 channels, Memory Mapped to Stream (MM2S) and Stream to
376 Memory Mapped (S2MM) for the data transfers. 376 Memory Mapped (S2MM) for the data transfers.
377 377
378config DMA_SUN6I
379 tristate "Allwinner A31 SoCs DMA support"
380 depends on MACH_SUN6I || COMPILE_TEST
381 select DMA_ENGINE
382 select DMA_VIRTUAL_CHANNELS
383 help
384 Support for the DMA engine for Allwinner A31 SoCs.
385
378config DMA_ENGINE 386config DMA_ENGINE
379 bool 387 bool
380 388
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index aca5eb577d44..d08bd966da13 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
47obj-$(CONFIG_FSL_EDMA) += fsl-edma.o 47obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
48obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o 48obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
49obj-y += xilinx/ 49obj-y += xilinx/
50obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
new file mode 100644
index 000000000000..ce8d5d1b0ff4
--- /dev/null
+++ b/drivers/dma/sun6i-dma.c
@@ -0,0 +1,1059 @@
1/*
2 * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
3 * Author: Sugar <shuge@allwinnertech.com>
4 *
5 * Copyright (C) 2014 Maxime Ripard
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/dmaengine.h>
17#include <linux/dmapool.h>
18#include <linux/interrupt.h>
19#include <linux/module.h>
20#include <linux/of_dma.h>
21#include <linux/platform_device.h>
22#include <linux/reset.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25
26#include "virt-dma.h"
27
28/*
29 * There's 16 physical channels that can work in parallel.
30 *
31 * However we have 30 different endpoints for our requests.
32 *
33 * Since the channels are able to handle only an unidirectional
34 * transfer, we need to allocate more virtual channels so that
35 * everyone can grab one channel.
36 *
37 * Some devices can't work in both direction (mostly because it
38 * wouldn't make sense), so we have a bit fewer virtual channels than
39 * 2 channels per endpoints.
40 */
41
42#define NR_MAX_CHANNELS 16
43#define NR_MAX_REQUESTS 30
44#define NR_MAX_VCHANS 53
45
46/*
47 * Common registers
48 */
49#define DMA_IRQ_EN(x) ((x) * 0x04)
50#define DMA_IRQ_HALF BIT(0)
51#define DMA_IRQ_PKG BIT(1)
52#define DMA_IRQ_QUEUE BIT(2)
53
54#define DMA_IRQ_CHAN_NR 8
55#define DMA_IRQ_CHAN_WIDTH 4
56
57
58#define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
59
60#define DMA_STAT 0x30
61
62/*
63 * Channels specific registers
64 */
65#define DMA_CHAN_ENABLE 0x00
66#define DMA_CHAN_ENABLE_START BIT(0)
67#define DMA_CHAN_ENABLE_STOP 0
68
69#define DMA_CHAN_PAUSE 0x04
70#define DMA_CHAN_PAUSE_PAUSE BIT(1)
71#define DMA_CHAN_PAUSE_RESUME 0
72
73#define DMA_CHAN_LLI_ADDR 0x08
74
75#define DMA_CHAN_CUR_CFG 0x0c
76#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
77#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
78#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
79#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
80#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
81
82#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
83#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
84#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
85#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
86#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
87
88#define DMA_CHAN_CUR_SRC 0x10
89
90#define DMA_CHAN_CUR_DST 0x14
91
92#define DMA_CHAN_CUR_CNT 0x18
93
94#define DMA_CHAN_CUR_PARA 0x1c
95
96
97/*
98 * Various hardware related defines
99 */
100#define LLI_LAST_ITEM 0xfffff800
101#define NORMAL_WAIT 8
102#define DRQ_SDRAM 1
103
104/*
105 * Hardware representation of the LLI
106 *
107 * The hardware will be fed the physical address of this structure,
108 * and read its content in order to start the transfer.
109 */
110struct sun6i_dma_lli {
111 u32 cfg;
112 u32 src;
113 u32 dst;
114 u32 len;
115 u32 para;
116 u32 p_lli_next;
117
118 /*
119 * This field is not used by the DMA controller, but will be
120 * used by the CPU to go through the list (mostly for dumping
121 * or freeing it).
122 */
123 struct sun6i_dma_lli *v_lli_next;
124};
125
126
127struct sun6i_desc {
128 struct virt_dma_desc vd;
129 dma_addr_t p_lli;
130 struct sun6i_dma_lli *v_lli;
131};
132
133struct sun6i_pchan {
134 u32 idx;
135 void __iomem *base;
136 struct sun6i_vchan *vchan;
137 struct sun6i_desc *desc;
138 struct sun6i_desc *done;
139};
140
141struct sun6i_vchan {
142 struct virt_dma_chan vc;
143 struct list_head node;
144 struct dma_slave_config cfg;
145 struct sun6i_pchan *phy;
146 u8 port;
147};
148
149struct sun6i_dma_dev {
150 struct dma_device slave;
151 void __iomem *base;
152 struct clk *clk;
153 int irq;
154 spinlock_t lock;
155 struct reset_control *rstc;
156 struct tasklet_struct task;
157 atomic_t tasklet_shutdown;
158 struct list_head pending;
159 struct dma_pool *pool;
160 struct sun6i_pchan *pchans;
161 struct sun6i_vchan *vchans;
162};
163
164static struct device *chan2dev(struct dma_chan *chan)
165{
166 return &chan->dev->device;
167}
168
169static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d)
170{
171 return container_of(d, struct sun6i_dma_dev, slave);
172}
173
174static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan)
175{
176 return container_of(chan, struct sun6i_vchan, vc.chan);
177}
178
179static inline struct sun6i_desc *
180to_sun6i_desc(struct dma_async_tx_descriptor *tx)
181{
182 return container_of(tx, struct sun6i_desc, vd.tx);
183}
184
185static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
186{
187 dev_dbg(sdev->slave.dev, "Common register:\n"
188 "\tmask0(%04x): 0x%08x\n"
189 "\tmask1(%04x): 0x%08x\n"
190 "\tpend0(%04x): 0x%08x\n"
191 "\tpend1(%04x): 0x%08x\n"
192 "\tstats(%04x): 0x%08x\n",
193 DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)),
194 DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)),
195 DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)),
196 DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)),
197 DMA_STAT, readl(sdev->base + DMA_STAT));
198}
199
200static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
201 struct sun6i_pchan *pchan)
202{
203 phys_addr_t reg = __virt_to_phys((unsigned long)pchan->base);
204
205 dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
206 "\t___en(%04x): \t0x%08x\n"
207 "\tpause(%04x): \t0x%08x\n"
208 "\tstart(%04x): \t0x%08x\n"
209 "\t__cfg(%04x): \t0x%08x\n"
210 "\t__src(%04x): \t0x%08x\n"
211 "\t__dst(%04x): \t0x%08x\n"
212 "\tcount(%04x): \t0x%08x\n"
213 "\t_para(%04x): \t0x%08x\n\n",
214 pchan->idx, &reg,
215 DMA_CHAN_ENABLE,
216 readl(pchan->base + DMA_CHAN_ENABLE),
217 DMA_CHAN_PAUSE,
218 readl(pchan->base + DMA_CHAN_PAUSE),
219 DMA_CHAN_LLI_ADDR,
220 readl(pchan->base + DMA_CHAN_LLI_ADDR),
221 DMA_CHAN_CUR_CFG,
222 readl(pchan->base + DMA_CHAN_CUR_CFG),
223 DMA_CHAN_CUR_SRC,
224 readl(pchan->base + DMA_CHAN_CUR_SRC),
225 DMA_CHAN_CUR_DST,
226 readl(pchan->base + DMA_CHAN_CUR_DST),
227 DMA_CHAN_CUR_CNT,
228 readl(pchan->base + DMA_CHAN_CUR_CNT),
229 DMA_CHAN_CUR_PARA,
230 readl(pchan->base + DMA_CHAN_CUR_PARA));
231}
232
233static inline int convert_burst(u32 maxburst, u8 *burst)
234{
235 switch (maxburst) {
236 case 1:
237 *burst = 0;
238 break;
239 case 8:
240 *burst = 2;
241 break;
242 default:
243 return -EINVAL;
244 }
245
246 return 0;
247}
248
249static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width)
250{
251 switch (addr_width) {
252 case DMA_SLAVE_BUSWIDTH_1_BYTE:
253 *width = 0;
254 break;
255 case DMA_SLAVE_BUSWIDTH_2_BYTES:
256 *width = 1;
257 break;
258 case DMA_SLAVE_BUSWIDTH_4_BYTES:
259 *width = 2;
260 break;
261 default:
262 return -EINVAL;
263 }
264
265 return 0;
266}
267
268static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
269 struct sun6i_dma_lli *next,
270 dma_addr_t next_phy,
271 struct sun6i_desc *txd)
272{
273 if ((!prev && !txd) || !next)
274 return NULL;
275
276 if (!prev) {
277 txd->p_lli = next_phy;
278 txd->v_lli = next;
279 } else {
280 prev->p_lli_next = next_phy;
281 prev->v_lli_next = next;
282 }
283
284 next->p_lli_next = LLI_LAST_ITEM;
285 next->v_lli_next = NULL;
286
287 return next;
288}
289
290static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
291 dma_addr_t src,
292 dma_addr_t dst, u32 len,
293 struct dma_slave_config *config)
294{
295 u8 src_width, dst_width, src_burst, dst_burst;
296 int ret;
297
298 if (!config)
299 return -EINVAL;
300
301 ret = convert_burst(config->src_maxburst, &src_burst);
302 if (ret)
303 return ret;
304
305 ret = convert_burst(config->dst_maxburst, &dst_burst);
306 if (ret)
307 return ret;
308
309 ret = convert_buswidth(config->src_addr_width, &src_width);
310 if (ret)
311 return ret;
312
313 ret = convert_buswidth(config->dst_addr_width, &dst_width);
314 if (ret)
315 return ret;
316
317 lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
318 DMA_CHAN_CFG_SRC_WIDTH(src_width) |
319 DMA_CHAN_CFG_DST_BURST(dst_burst) |
320 DMA_CHAN_CFG_DST_WIDTH(dst_width);
321
322 lli->src = src;
323 lli->dst = dst;
324 lli->len = len;
325 lli->para = NORMAL_WAIT;
326
327 return 0;
328}
329
330static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
331 struct sun6i_dma_lli *lli)
332{
333 phys_addr_t p_lli = __virt_to_phys((unsigned long)lli);
334
335 dev_dbg(chan2dev(&vchan->vc.chan),
336 "\n\tdesc: p - %pa v - 0x%p\n"
337 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
338 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
339 &p_lli, lli,
340 lli->cfg, lli->src, lli->dst,
341 lli->len, lli->para, lli->p_lli_next);
342}
343
344static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
345{
346 struct sun6i_desc *txd = to_sun6i_desc(&vd->tx);
347 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device);
348 struct sun6i_dma_lli *v_lli, *v_next;
349 dma_addr_t p_lli, p_next;
350
351 if (unlikely(!txd))
352 return;
353
354 p_lli = txd->p_lli;
355 v_lli = txd->v_lli;
356
357 while (v_lli) {
358 v_next = v_lli->v_lli_next;
359 p_next = v_lli->p_lli_next;
360
361 dma_pool_free(sdev->pool, v_lli, p_lli);
362
363 v_lli = v_next;
364 p_lli = p_next;
365 }
366
367 kfree(txd);
368}
369
370static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
371{
372 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
373 struct sun6i_pchan *pchan = vchan->phy;
374 unsigned long flags;
375 LIST_HEAD(head);
376
377 spin_lock(&sdev->lock);
378 list_del_init(&vchan->node);
379 spin_unlock(&sdev->lock);
380
381 spin_lock_irqsave(&vchan->vc.lock, flags);
382
383 vchan_get_all_descriptors(&vchan->vc, &head);
384
385 if (pchan) {
386 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
387 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
388
389 vchan->phy = NULL;
390 pchan->vchan = NULL;
391 pchan->desc = NULL;
392 pchan->done = NULL;
393 }
394
395 spin_unlock_irqrestore(&vchan->vc.lock, flags);
396
397 vchan_dma_desc_free_list(&vchan->vc, &head);
398
399 return 0;
400}
401
402static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
403{
404 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
405 struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc);
406 struct sun6i_pchan *pchan = vchan->phy;
407 u32 irq_val, irq_reg, irq_offset;
408
409 if (!pchan)
410 return -EAGAIN;
411
412 if (!desc) {
413 pchan->desc = NULL;
414 pchan->done = NULL;
415 return -EAGAIN;
416 }
417
418 list_del(&desc->node);
419
420 pchan->desc = to_sun6i_desc(&desc->tx);
421 pchan->done = NULL;
422
423 sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
424
425 irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
426 irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
427
428 irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset));
429 irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH);
430 writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset));
431
432 writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
433 writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
434
435 sun6i_dma_dump_com_regs(sdev);
436 sun6i_dma_dump_chan_regs(sdev, pchan);
437
438 return 0;
439}
440
441static void sun6i_dma_tasklet(unsigned long data)
442{
443 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
444 struct sun6i_vchan *vchan;
445 struct sun6i_pchan *pchan;
446 unsigned int pchan_alloc = 0;
447 unsigned int pchan_idx;
448
449 list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) {
450 spin_lock_irq(&vchan->vc.lock);
451
452 pchan = vchan->phy;
453
454 if (pchan && pchan->done) {
455 if (sun6i_dma_start_desc(vchan)) {
456 /*
457 * No current txd associated with this channel
458 */
459 dev_dbg(sdev->slave.dev, "pchan %u: free\n",
460 pchan->idx);
461
462 /* Mark this channel free */
463 vchan->phy = NULL;
464 pchan->vchan = NULL;
465 }
466 }
467 spin_unlock_irq(&vchan->vc.lock);
468 }
469
470 spin_lock_irq(&sdev->lock);
471 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
472 pchan = &sdev->pchans[pchan_idx];
473
474 if (pchan->vchan || list_empty(&sdev->pending))
475 continue;
476
477 vchan = list_first_entry(&sdev->pending,
478 struct sun6i_vchan, node);
479
480 /* Remove from pending channels */
481 list_del_init(&vchan->node);
482 pchan_alloc |= BIT(pchan_idx);
483
484 /* Mark this channel allocated */
485 pchan->vchan = vchan;
486 vchan->phy = pchan;
487 dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n",
488 pchan->idx, &vchan->vc);
489 }
490 spin_unlock_irq(&sdev->lock);
491
492 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
493 if (!(pchan_alloc & BIT(pchan_idx)))
494 continue;
495
496 pchan = sdev->pchans + pchan_idx;
497 vchan = pchan->vchan;
498 if (vchan) {
499 spin_lock_irq(&vchan->vc.lock);
500 sun6i_dma_start_desc(vchan);
501 spin_unlock_irq(&vchan->vc.lock);
502 }
503 }
504}
505
506static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
507{
508 struct sun6i_dma_dev *sdev = dev_id;
509 struct sun6i_vchan *vchan;
510 struct sun6i_pchan *pchan;
511 int i, j, ret = IRQ_NONE;
512 u32 status;
513
514 for (i = 0; i < 2; i++) {
515 status = readl(sdev->base + DMA_IRQ_STAT(i));
516 if (!status)
517 continue;
518
519 dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
520 i ? "high" : "low", status);
521
522 writel(status, sdev->base + DMA_IRQ_STAT(i));
523
524 for (j = 0; (j < 8) && status; j++) {
525 if (status & DMA_IRQ_QUEUE) {
526 pchan = sdev->pchans + j;
527 vchan = pchan->vchan;
528
529 if (vchan) {
530 spin_lock(&vchan->vc.lock);
531 vchan_cookie_complete(&pchan->desc->vd);
532 pchan->done = pchan->desc;
533 spin_unlock(&vchan->vc.lock);
534 }
535 }
536
537 status = status >> 4;
538 }
539
540 if (!atomic_read(&sdev->tasklet_shutdown))
541 tasklet_schedule(&sdev->task);
542 ret = IRQ_HANDLED;
543 }
544
545 return ret;
546}
547
548static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
549 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
550 size_t len, unsigned long flags)
551{
552 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
553 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
554 struct dma_slave_config *sconfig = &vchan->cfg;
555 struct sun6i_dma_lli *v_lli;
556 struct sun6i_desc *txd;
557 dma_addr_t p_lli;
558 int ret;
559
560 dev_dbg(chan2dev(chan),
561 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
562 __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags);
563
564 if (!len)
565 return NULL;
566
567 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
568 if (!txd)
569 return NULL;
570
571 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
572 if (!v_lli) {
573 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
574 kfree(txd);
575 return NULL;
576 }
577
578 ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig);
579 if (ret)
580 goto err_dma_free;
581
582 v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
583 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
584 DMA_CHAN_CFG_DST_LINEAR_MODE |
585 DMA_CHAN_CFG_SRC_LINEAR_MODE;
586
587 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
588
589 sun6i_dma_dump_lli(vchan, v_lli);
590
591 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
592
593err_dma_free:
594 dma_pool_free(sdev->pool, v_lli, p_lli);
595 return NULL;
596}
597
598static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
599 struct dma_chan *chan, struct scatterlist *sgl,
600 unsigned int sg_len, enum dma_transfer_direction dir,
601 unsigned long flags, void *context)
602{
603 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
604 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
605 struct dma_slave_config *sconfig = &vchan->cfg;
606 struct sun6i_dma_lli *v_lli, *prev = NULL;
607 struct sun6i_desc *txd;
608 struct scatterlist *sg;
609 dma_addr_t p_lli;
610 int i, ret;
611
612 if (!sgl)
613 return NULL;
614
615 if (!is_slave_direction(dir)) {
616 dev_err(chan2dev(chan), "Invalid DMA direction\n");
617 return NULL;
618 }
619
620 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
621 if (!txd)
622 return NULL;
623
624 for_each_sg(sgl, sg, sg_len, i) {
625 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
626 if (!v_lli) {
627 kfree(txd);
628 return NULL;
629 }
630
631 if (dir == DMA_MEM_TO_DEV) {
632 ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg),
633 sconfig->dst_addr, sg_dma_len(sg),
634 sconfig);
635 if (ret)
636 goto err_dma_free;
637
638 v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE |
639 DMA_CHAN_CFG_SRC_LINEAR_MODE |
640 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
641 DMA_CHAN_CFG_DST_DRQ(vchan->port);
642
643 dev_dbg(chan2dev(chan),
644 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
645 __func__, vchan->vc.chan.chan_id,
646 &sconfig->dst_addr, &sg_dma_address(sg),
647 sg_dma_len(sg), flags);
648
649 } else {
650 ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr,
651 sg_dma_address(sg), sg_dma_len(sg),
652 sconfig);
653 if (ret)
654 goto err_dma_free;
655
656 v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
657 DMA_CHAN_CFG_SRC_IO_MODE |
658 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
659 DMA_CHAN_CFG_SRC_DRQ(vchan->port);
660
661 dev_dbg(chan2dev(chan),
662 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
663 __func__, vchan->vc.chan.chan_id,
664 &sg_dma_address(sg), &sconfig->src_addr,
665 sg_dma_len(sg), flags);
666 }
667
668 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
669 }
670
671 dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
672 for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
673 sun6i_dma_dump_lli(vchan, prev);
674
675 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
676
677err_dma_free:
678 dma_pool_free(sdev->pool, v_lli, p_lli);
679 return NULL;
680}
681
682static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
683 unsigned long arg)
684{
685 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
686 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
687 struct sun6i_pchan *pchan = vchan->phy;
688 unsigned long flags;
689 int ret = 0;
690
691 switch (cmd) {
692 case DMA_RESUME:
693 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
694
695 spin_lock_irqsave(&vchan->vc.lock, flags);
696
697 if (pchan) {
698 writel(DMA_CHAN_PAUSE_RESUME,
699 pchan->base + DMA_CHAN_PAUSE);
700 } else if (!list_empty(&vchan->vc.desc_issued)) {
701 spin_lock(&sdev->lock);
702 list_add_tail(&vchan->node, &sdev->pending);
703 spin_unlock(&sdev->lock);
704 }
705
706 spin_unlock_irqrestore(&vchan->vc.lock, flags);
707 break;
708
709 case DMA_PAUSE:
710 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
711
712 if (pchan) {
713 writel(DMA_CHAN_PAUSE_PAUSE,
714 pchan->base + DMA_CHAN_PAUSE);
715 } else {
716 spin_lock(&sdev->lock);
717 list_del_init(&vchan->node);
718 spin_unlock(&sdev->lock);
719 }
720 break;
721
722 case DMA_TERMINATE_ALL:
723 ret = sun6i_dma_terminate_all(vchan);
724 break;
725 case DMA_SLAVE_CONFIG:
726 memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config));
727 break;
728 default:
729 ret = -ENXIO;
730 break;
731 }
732 return ret;
733}
734
735static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
736 dma_cookie_t cookie,
737 struct dma_tx_state *state)
738{
739 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
740 struct sun6i_pchan *pchan = vchan->phy;
741 struct sun6i_dma_lli *lli;
742 struct virt_dma_desc *vd;
743 struct sun6i_desc *txd;
744 enum dma_status ret;
745 unsigned long flags;
746 size_t bytes = 0;
747
748 ret = dma_cookie_status(chan, cookie, state);
749 if (ret == DMA_COMPLETE)
750 return ret;
751
752 spin_lock_irqsave(&vchan->vc.lock, flags);
753
754 vd = vchan_find_desc(&vchan->vc, cookie);
755 txd = to_sun6i_desc(&vd->tx);
756
757 if (vd) {
758 for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next)
759 bytes += lli->len;
760 } else if (!pchan || !pchan->desc) {
761 bytes = 0;
762 } else {
763 bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
764 }
765
766 spin_unlock_irqrestore(&vchan->vc.lock, flags);
767
768 dma_set_residue(state, bytes);
769
770 return ret;
771}
772
773static void sun6i_dma_issue_pending(struct dma_chan *chan)
774{
775 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
776 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
777 unsigned long flags;
778
779 spin_lock_irqsave(&vchan->vc.lock, flags);
780
781 if (vchan_issue_pending(&vchan->vc)) {
782 spin_lock(&sdev->lock);
783
784 if (!vchan->phy && list_empty(&vchan->node)) {
785 list_add_tail(&vchan->node, &sdev->pending);
786 tasklet_schedule(&sdev->task);
787 dev_dbg(chan2dev(chan), "vchan %p: issued\n",
788 &vchan->vc);
789 }
790
791 spin_unlock(&sdev->lock);
792 } else {
793 dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n",
794 &vchan->vc);
795 }
796
797 spin_unlock_irqrestore(&vchan->vc.lock, flags);
798}
799
800static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan)
801{
802 return 0;
803}
804
805static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
806{
807 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
808 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
809 unsigned long flags;
810
811 spin_lock_irqsave(&sdev->lock, flags);
812 list_del_init(&vchan->node);
813 spin_unlock_irqrestore(&sdev->lock, flags);
814
815 vchan_free_chan_resources(&vchan->vc);
816}
817
818static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
819 struct of_dma *ofdma)
820{
821 struct sun6i_dma_dev *sdev = ofdma->of_dma_data;
822 struct sun6i_vchan *vchan;
823 struct dma_chan *chan;
824 u8 port = dma_spec->args[0];
825
826 if (port > NR_MAX_REQUESTS)
827 return NULL;
828
829 chan = dma_get_any_slave_channel(&sdev->slave);
830 if (!chan)
831 return NULL;
832
833 vchan = to_sun6i_vchan(chan);
834 vchan->port = port;
835
836 return chan;
837}
838
839static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev)
840{
841 /* Disable all interrupts from DMA */
842 writel(0, sdev->base + DMA_IRQ_EN(0));
843 writel(0, sdev->base + DMA_IRQ_EN(1));
844
845 /* Prevent spurious interrupts from scheduling the tasklet */
846 atomic_inc(&sdev->tasklet_shutdown);
847
848 /* Make sure all interrupts are handled */
849 synchronize_irq(sdev->irq);
850
851 /* Actually prevent the tasklet from being scheduled */
852 tasklet_kill(&sdev->task);
853}
854
855static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
856{
857 int i;
858
859 for (i = 0; i < NR_MAX_VCHANS; i++) {
860 struct sun6i_vchan *vchan = &sdev->vchans[i];
861
862 list_del(&vchan->vc.chan.device_node);
863 tasklet_kill(&vchan->vc.task);
864 }
865}
866
867static int sun6i_dma_probe(struct platform_device *pdev)
868{
869 struct sun6i_dma_dev *sdc;
870 struct resource *res;
871 struct clk *mux, *pll6;
872 int ret, i;
873
874 sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
875 if (!sdc)
876 return -ENOMEM;
877
878 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
879 sdc->base = devm_ioremap_resource(&pdev->dev, res);
880 if (IS_ERR(sdc->base))
881 return PTR_ERR(sdc->base);
882
883 sdc->irq = platform_get_irq(pdev, 0);
884 if (sdc->irq < 0) {
885 dev_err(&pdev->dev, "Cannot claim IRQ\n");
886 return sdc->irq;
887 }
888
889 sdc->clk = devm_clk_get(&pdev->dev, NULL);
890 if (IS_ERR(sdc->clk)) {
891 dev_err(&pdev->dev, "No clock specified\n");
892 return PTR_ERR(sdc->clk);
893 }
894
895 mux = clk_get(NULL, "ahb1_mux");
896 if (IS_ERR(mux)) {
897 dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n");
898 return PTR_ERR(mux);
899 }
900
901 pll6 = clk_get(NULL, "pll6");
902 if (IS_ERR(pll6)) {
903 dev_err(&pdev->dev, "Couldn't get PLL6\n");
904 clk_put(mux);
905 return PTR_ERR(pll6);
906 }
907
908 ret = clk_set_parent(mux, pll6);
909 clk_put(pll6);
910 clk_put(mux);
911
912 if (ret) {
913 dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n");
914 return ret;
915 }
916
917 sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
918 if (IS_ERR(sdc->rstc)) {
919 dev_err(&pdev->dev, "No reset controller specified\n");
920 return PTR_ERR(sdc->rstc);
921 }
922
923 sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
924 sizeof(struct sun6i_dma_lli), 4, 0);
925 if (!sdc->pool) {
926 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
927 return -ENOMEM;
928 }
929
930 platform_set_drvdata(pdev, sdc);
931 INIT_LIST_HEAD(&sdc->pending);
932 spin_lock_init(&sdc->lock);
933
934 dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
935 dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
936 dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
937
938 INIT_LIST_HEAD(&sdc->slave.channels);
939 sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources;
940 sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
941 sdc->slave.device_tx_status = sun6i_dma_tx_status;
942 sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
943 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
944 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
945 sdc->slave.device_control = sun6i_dma_control;
946 sdc->slave.chancnt = NR_MAX_VCHANS;
947
948 sdc->slave.dev = &pdev->dev;
949
950 sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS,
951 sizeof(struct sun6i_pchan), GFP_KERNEL);
952 if (!sdc->pchans)
953 return -ENOMEM;
954
955 sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS,
956 sizeof(struct sun6i_vchan), GFP_KERNEL);
957 if (!sdc->vchans)
958 return -ENOMEM;
959
960 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
961
962 for (i = 0; i < NR_MAX_CHANNELS; i++) {
963 struct sun6i_pchan *pchan = &sdc->pchans[i];
964
965 pchan->idx = i;
966 pchan->base = sdc->base + 0x100 + i * 0x40;
967 }
968
969 for (i = 0; i < NR_MAX_VCHANS; i++) {
970 struct sun6i_vchan *vchan = &sdc->vchans[i];
971
972 INIT_LIST_HEAD(&vchan->node);
973 vchan->vc.desc_free = sun6i_dma_free_desc;
974 vchan_init(&vchan->vc, &sdc->slave);
975 }
976
977 ret = reset_control_deassert(sdc->rstc);
978 if (ret) {
979 dev_err(&pdev->dev, "Couldn't deassert the device from reset\n");
980 goto err_chan_free;
981 }
982
983 ret = clk_prepare_enable(sdc->clk);
984 if (ret) {
985 dev_err(&pdev->dev, "Couldn't enable the clock\n");
986 goto err_reset_assert;
987 }
988
989 ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
990 dev_name(&pdev->dev), sdc);
991 if (ret) {
992 dev_err(&pdev->dev, "Cannot request IRQ\n");
993 goto err_clk_disable;
994 }
995
996 ret = dma_async_device_register(&sdc->slave);
997 if (ret) {
998 dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
999 goto err_irq_disable;
1000 }
1001
1002 ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate,
1003 sdc);
1004 if (ret) {
1005 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1006 goto err_dma_unregister;
1007 }
1008
1009 return 0;
1010
1011err_dma_unregister:
1012 dma_async_device_unregister(&sdc->slave);
1013err_irq_disable:
1014 sun6i_kill_tasklet(sdc);
1015err_clk_disable:
1016 clk_disable_unprepare(sdc->clk);
1017err_reset_assert:
1018 reset_control_assert(sdc->rstc);
1019err_chan_free:
1020 sun6i_dma_free(sdc);
1021 return ret;
1022}
1023
1024static int sun6i_dma_remove(struct platform_device *pdev)
1025{
1026 struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev);
1027
1028 of_dma_controller_free(pdev->dev.of_node);
1029 dma_async_device_unregister(&sdc->slave);
1030
1031 sun6i_kill_tasklet(sdc);
1032
1033 clk_disable_unprepare(sdc->clk);
1034 reset_control_assert(sdc->rstc);
1035
1036 sun6i_dma_free(sdc);
1037
1038 return 0;
1039}
1040
1041static struct of_device_id sun6i_dma_match[] = {
1042 { .compatible = "allwinner,sun6i-a31-dma" },
1043 { /* sentinel */ }
1044};
1045
1046static struct platform_driver sun6i_dma_driver = {
1047 .probe = sun6i_dma_probe,
1048 .remove = sun6i_dma_remove,
1049 .driver = {
1050 .name = "sun6i-dma",
1051 .of_match_table = sun6i_dma_match,
1052 },
1053};
1054module_platform_driver(sun6i_dma_driver);
1055
1056MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
1057MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
1058MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1059MODULE_LICENSE("GPL");