summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dw-edma/Kconfig9
-rw-r--r--drivers/dma/dw-edma/Makefile4
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c936
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h165
-rw-r--r--include/linux/dma/edma.h47
7 files changed, 1164 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index eaf78f4e07ce..76859aa2688c 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -665,6 +665,8 @@ source "drivers/dma/qcom/Kconfig"
665 665
666source "drivers/dma/dw/Kconfig" 666source "drivers/dma/dw/Kconfig"
667 667
668source "drivers/dma/dw-edma/Kconfig"
669
668source "drivers/dma/hsu/Kconfig" 670source "drivers/dma/hsu/Kconfig"
669 671
670source "drivers/dma/sh/Kconfig" 672source "drivers/dma/sh/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6126e1c3a875..5bddf6f8790f 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
29obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o 29obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
30obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ 30obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
31obj-$(CONFIG_DW_DMAC_CORE) += dw/ 31obj-$(CONFIG_DW_DMAC_CORE) += dw/
32obj-$(CONFIG_DW_EDMA) += dw-edma/
32obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 33obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
33obj-$(CONFIG_FSL_DMA) += fsldma.o 34obj-$(CONFIG_FSL_DMA) += fsldma.o
34obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o 35obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig
new file mode 100644
index 000000000000..3016bed63589
--- /dev/null
+++ b/drivers/dma/dw-edma/Kconfig
@@ -0,0 +1,9 @@
1# SPDX-License-Identifier: GPL-2.0
2
3config DW_EDMA
4 tristate "Synopsys DesignWare eDMA controller driver"
5 select DMA_ENGINE
6 select DMA_VIRTUAL_CHANNELS
7 help
8 Support the Synopsys DesignWare eDMA controller, normally
9 implemented on endpoints SoCs.
diff --git a/drivers/dma/dw-edma/Makefile b/drivers/dma/dw-edma/Makefile
new file mode 100644
index 000000000000..322401089891
--- /dev/null
+++ b/drivers/dma/dw-edma/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2
3obj-$(CONFIG_DW_EDMA) += dw-edma.o
4dw-edma-objs := dw-edma-core.o
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
new file mode 100644
index 000000000000..c9d032f49dc3
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -0,0 +1,936 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/pm_runtime.h>
13#include <linux/dmaengine.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/dma/edma.h>
17#include <linux/pci.h>
18
19#include "dw-edma-core.h"
20#include "../dmaengine.h"
21#include "../virt-dma.h"
22
23static inline
24struct device *dchan2dev(struct dma_chan *dchan)
25{
26 return &dchan->dev->device;
27}
28
29static inline
30struct device *chan2dev(struct dw_edma_chan *chan)
31{
32 return &chan->vc.chan.dev->device;
33}
34
35static inline
36struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
37{
38 return container_of(vd, struct dw_edma_desc, vd);
39}
40
41static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
42{
43 struct dw_edma_burst *burst;
44
45 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
46 if (unlikely(!burst))
47 return NULL;
48
49 INIT_LIST_HEAD(&burst->list);
50 if (chunk->burst) {
51 /* Create and add new element into the linked list */
52 chunk->bursts_alloc++;
53 list_add_tail(&burst->list, &chunk->burst->list);
54 } else {
55 /* List head */
56 chunk->bursts_alloc = 0;
57 chunk->burst = burst;
58 }
59
60 return burst;
61}
62
63static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
64{
65 struct dw_edma_chan *chan = desc->chan;
66 struct dw_edma *dw = chan->chip->dw;
67 struct dw_edma_chunk *chunk;
68
69 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
70 if (unlikely(!chunk))
71 return NULL;
72
73 INIT_LIST_HEAD(&chunk->list);
74 chunk->chan = chan;
75 /* Toggling change bit (CB) in each chunk, this is a mechanism to
76 * inform the eDMA HW block that this is a new linked list ready
77 * to be consumed.
78 * - Odd chunks originate CB equal to 0
79 * - Even chunks originate CB equal to 1
80 */
81 chunk->cb = !(desc->chunks_alloc % 2);
82 chunk->ll_region.paddr = dw->ll_region.paddr + chan->ll_off;
83 chunk->ll_region.vaddr = dw->ll_region.vaddr + chan->ll_off;
84
85 if (desc->chunk) {
86 /* Create and add new element into the linked list */
87 desc->chunks_alloc++;
88 list_add_tail(&chunk->list, &desc->chunk->list);
89 if (!dw_edma_alloc_burst(chunk)) {
90 kfree(chunk);
91 return NULL;
92 }
93 } else {
94 /* List head */
95 chunk->burst = NULL;
96 desc->chunks_alloc = 0;
97 desc->chunk = chunk;
98 }
99
100 return chunk;
101}
102
103static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
104{
105 struct dw_edma_desc *desc;
106
107 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
108 if (unlikely(!desc))
109 return NULL;
110
111 desc->chan = chan;
112 if (!dw_edma_alloc_chunk(desc)) {
113 kfree(desc);
114 return NULL;
115 }
116
117 return desc;
118}
119
120static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
121{
122 struct dw_edma_burst *child, *_next;
123
124 /* Remove all the list elements */
125 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
126 list_del(&child->list);
127 kfree(child);
128 chunk->bursts_alloc--;
129 }
130
131 /* Remove the list head */
132 kfree(child);
133 chunk->burst = NULL;
134}
135
136static void dw_edma_free_chunk(struct dw_edma_desc *desc)
137{
138 struct dw_edma_chunk *child, *_next;
139
140 if (!desc->chunk)
141 return;
142
143 /* Remove all the list elements */
144 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
145 dw_edma_free_burst(child);
146 list_del(&child->list);
147 kfree(child);
148 desc->chunks_alloc--;
149 }
150
151 /* Remove the list head */
152 kfree(child);
153 desc->chunk = NULL;
154}
155
156static void dw_edma_free_desc(struct dw_edma_desc *desc)
157{
158 dw_edma_free_chunk(desc);
159 kfree(desc);
160}
161
162static void vchan_free_desc(struct virt_dma_desc *vdesc)
163{
164 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
165}
166
167static void dw_edma_start_transfer(struct dw_edma_chan *chan)
168{
169 struct dw_edma_chunk *child;
170 struct dw_edma_desc *desc;
171 struct virt_dma_desc *vd;
172
173 vd = vchan_next_desc(&chan->vc);
174 if (!vd)
175 return;
176
177 desc = vd2dw_edma_desc(vd);
178 if (!desc)
179 return;
180
181 child = list_first_entry_or_null(&desc->chunk->list,
182 struct dw_edma_chunk, list);
183 if (!child)
184 return;
185
186 dw_edma_v0_core_start(child, !desc->xfer_sz);
187 desc->xfer_sz += child->ll_region.sz;
188 dw_edma_free_burst(child);
189 list_del(&child->list);
190 kfree(child);
191 desc->chunks_alloc--;
192}
193
194static int dw_edma_device_config(struct dma_chan *dchan,
195 struct dma_slave_config *config)
196{
197 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
198
199 memcpy(&chan->config, config, sizeof(*config));
200 chan->configured = true;
201
202 return 0;
203}
204
205static int dw_edma_device_pause(struct dma_chan *dchan)
206{
207 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
208 int err = 0;
209
210 if (!chan->configured)
211 err = -EPERM;
212 else if (chan->status != EDMA_ST_BUSY)
213 err = -EPERM;
214 else if (chan->request != EDMA_REQ_NONE)
215 err = -EPERM;
216 else
217 chan->request = EDMA_REQ_PAUSE;
218
219 return err;
220}
221
222static int dw_edma_device_resume(struct dma_chan *dchan)
223{
224 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
225 int err = 0;
226
227 if (!chan->configured) {
228 err = -EPERM;
229 } else if (chan->status != EDMA_ST_PAUSE) {
230 err = -EPERM;
231 } else if (chan->request != EDMA_REQ_NONE) {
232 err = -EPERM;
233 } else {
234 chan->status = EDMA_ST_BUSY;
235 dw_edma_start_transfer(chan);
236 }
237
238 return err;
239}
240
241static int dw_edma_device_terminate_all(struct dma_chan *dchan)
242{
243 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
244 int err = 0;
245 LIST_HEAD(head);
246
247 if (!chan->configured) {
248 /* Do nothing */
249 } else if (chan->status == EDMA_ST_PAUSE) {
250 chan->status = EDMA_ST_IDLE;
251 chan->configured = false;
252 } else if (chan->status == EDMA_ST_IDLE) {
253 chan->configured = false;
254 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
255 /*
256 * The channel is in a false BUSY state, probably didn't
257 * receive or lost an interrupt
258 */
259 chan->status = EDMA_ST_IDLE;
260 chan->configured = false;
261 } else if (chan->request > EDMA_REQ_PAUSE) {
262 err = -EPERM;
263 } else {
264 chan->request = EDMA_REQ_STOP;
265 }
266
267 return err;
268}
269
270static void dw_edma_device_issue_pending(struct dma_chan *dchan)
271{
272 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
273 unsigned long flags;
274
275 spin_lock_irqsave(&chan->vc.lock, flags);
276 if (chan->configured && chan->request == EDMA_REQ_NONE &&
277 chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
278 chan->status = EDMA_ST_BUSY;
279 dw_edma_start_transfer(chan);
280 }
281 spin_unlock_irqrestore(&chan->vc.lock, flags);
282}
283
284static enum dma_status
285dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
286 struct dma_tx_state *txstate)
287{
288 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
289 struct dw_edma_desc *desc;
290 struct virt_dma_desc *vd;
291 unsigned long flags;
292 enum dma_status ret;
293 u32 residue = 0;
294
295 ret = dma_cookie_status(dchan, cookie, txstate);
296 if (ret == DMA_COMPLETE)
297 return ret;
298
299 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
300 ret = DMA_PAUSED;
301
302 if (!txstate)
303 goto ret_residue;
304
305 spin_lock_irqsave(&chan->vc.lock, flags);
306 vd = vchan_find_desc(&chan->vc, cookie);
307 if (vd) {
308 desc = vd2dw_edma_desc(vd);
309 if (desc)
310 residue = desc->alloc_sz - desc->xfer_sz;
311 }
312 spin_unlock_irqrestore(&chan->vc.lock, flags);
313
314ret_residue:
315 dma_set_residue(txstate, residue);
316
317 return ret;
318}
319
320static struct dma_async_tx_descriptor *
321dw_edma_device_transfer(struct dw_edma_transfer *xfer)
322{
323 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
324 enum dma_transfer_direction direction = xfer->direction;
325 phys_addr_t src_addr, dst_addr;
326 struct scatterlist *sg = NULL;
327 struct dw_edma_chunk *chunk;
328 struct dw_edma_burst *burst;
329 struct dw_edma_desc *desc;
330 u32 cnt;
331 int i;
332
333 if ((direction == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE) ||
334 (direction == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ))
335 return NULL;
336
337 if (xfer->cyclic) {
338 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
339 return NULL;
340 } else {
341 if (xfer->xfer.sg.len < 1)
342 return NULL;
343 }
344
345 if (!chan->configured)
346 return NULL;
347
348 desc = dw_edma_alloc_desc(chan);
349 if (unlikely(!desc))
350 goto err_alloc;
351
352 chunk = dw_edma_alloc_chunk(desc);
353 if (unlikely(!chunk))
354 goto err_alloc;
355
356 src_addr = chan->config.src_addr;
357 dst_addr = chan->config.dst_addr;
358
359 if (xfer->cyclic) {
360 cnt = xfer->xfer.cyclic.cnt;
361 } else {
362 cnt = xfer->xfer.sg.len;
363 sg = xfer->xfer.sg.sgl;
364 }
365
366 for (i = 0; i < cnt; i++) {
367 if (!xfer->cyclic && !sg)
368 break;
369
370 if (chunk->bursts_alloc == chan->ll_max) {
371 chunk = dw_edma_alloc_chunk(desc);
372 if (unlikely(!chunk))
373 goto err_alloc;
374 }
375
376 burst = dw_edma_alloc_burst(chunk);
377 if (unlikely(!burst))
378 goto err_alloc;
379
380 if (xfer->cyclic)
381 burst->sz = xfer->xfer.cyclic.len;
382 else
383 burst->sz = sg_dma_len(sg);
384
385 chunk->ll_region.sz += burst->sz;
386 desc->alloc_sz += burst->sz;
387
388 if (direction == DMA_DEV_TO_MEM) {
389 burst->sar = src_addr;
390 if (xfer->cyclic) {
391 burst->dar = xfer->xfer.cyclic.paddr;
392 } else {
393 burst->dar = sg_dma_address(sg);
394 /* Unlike the typical assumption by other
395 * drivers/IPs the peripheral memory isn't
396 * a FIFO memory, in this case, it's a
397 * linear memory and that why the source
398 * and destination addresses are increased
399 * by the same portion (data length)
400 */
401 src_addr += sg_dma_len(sg);
402 }
403 } else {
404 burst->dar = dst_addr;
405 if (xfer->cyclic) {
406 burst->sar = xfer->xfer.cyclic.paddr;
407 } else {
408 burst->sar = sg_dma_address(sg);
409 /* Unlike the typical assumption by other
410 * drivers/IPs the peripheral memory isn't
411 * a FIFO memory, in this case, it's a
412 * linear memory and that why the source
413 * and destination addresses are increased
414 * by the same portion (data length)
415 */
416 dst_addr += sg_dma_len(sg);
417 }
418 }
419
420 if (!xfer->cyclic)
421 sg = sg_next(sg);
422 }
423
424 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
425
426err_alloc:
427 if (desc)
428 dw_edma_free_desc(desc);
429
430 return NULL;
431}
432
433static struct dma_async_tx_descriptor *
434dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
435 unsigned int len,
436 enum dma_transfer_direction direction,
437 unsigned long flags, void *context)
438{
439 struct dw_edma_transfer xfer;
440
441 xfer.dchan = dchan;
442 xfer.direction = direction;
443 xfer.xfer.sg.sgl = sgl;
444 xfer.xfer.sg.len = len;
445 xfer.flags = flags;
446 xfer.cyclic = false;
447
448 return dw_edma_device_transfer(&xfer);
449}
450
451static struct dma_async_tx_descriptor *
452dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
453 size_t len, size_t count,
454 enum dma_transfer_direction direction,
455 unsigned long flags)
456{
457 struct dw_edma_transfer xfer;
458
459 xfer.dchan = dchan;
460 xfer.direction = direction;
461 xfer.xfer.cyclic.paddr = paddr;
462 xfer.xfer.cyclic.len = len;
463 xfer.xfer.cyclic.cnt = count;
464 xfer.flags = flags;
465 xfer.cyclic = true;
466
467 return dw_edma_device_transfer(&xfer);
468}
469
470static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
471{
472 struct dw_edma_desc *desc;
473 struct virt_dma_desc *vd;
474 unsigned long flags;
475
476 dw_edma_v0_core_clear_done_int(chan);
477
478 spin_lock_irqsave(&chan->vc.lock, flags);
479 vd = vchan_next_desc(&chan->vc);
480 if (vd) {
481 switch (chan->request) {
482 case EDMA_REQ_NONE:
483 desc = vd2dw_edma_desc(vd);
484 if (desc->chunks_alloc) {
485 chan->status = EDMA_ST_BUSY;
486 dw_edma_start_transfer(chan);
487 } else {
488 list_del(&vd->node);
489 vchan_cookie_complete(vd);
490 chan->status = EDMA_ST_IDLE;
491 }
492 break;
493
494 case EDMA_REQ_STOP:
495 list_del(&vd->node);
496 vchan_cookie_complete(vd);
497 chan->request = EDMA_REQ_NONE;
498 chan->status = EDMA_ST_IDLE;
499 break;
500
501 case EDMA_REQ_PAUSE:
502 chan->request = EDMA_REQ_NONE;
503 chan->status = EDMA_ST_PAUSE;
504 break;
505
506 default:
507 break;
508 }
509 }
510 spin_unlock_irqrestore(&chan->vc.lock, flags);
511}
512
513static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
514{
515 struct virt_dma_desc *vd;
516 unsigned long flags;
517
518 dw_edma_v0_core_clear_abort_int(chan);
519
520 spin_lock_irqsave(&chan->vc.lock, flags);
521 vd = vchan_next_desc(&chan->vc);
522 if (vd) {
523 list_del(&vd->node);
524 vchan_cookie_complete(vd);
525 }
526 spin_unlock_irqrestore(&chan->vc.lock, flags);
527 chan->request = EDMA_REQ_NONE;
528 chan->status = EDMA_ST_IDLE;
529}
530
531static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
532{
533 struct dw_edma_irq *dw_irq = data;
534 struct dw_edma *dw = dw_irq->dw;
535 unsigned long total, pos, val;
536 unsigned long off;
537 u32 mask;
538
539 if (write) {
540 total = dw->wr_ch_cnt;
541 off = 0;
542 mask = dw_irq->wr_mask;
543 } else {
544 total = dw->rd_ch_cnt;
545 off = dw->wr_ch_cnt;
546 mask = dw_irq->rd_mask;
547 }
548
549 val = dw_edma_v0_core_status_done_int(dw, write ?
550 EDMA_DIR_WRITE :
551 EDMA_DIR_READ);
552 val &= mask;
553 for_each_set_bit(pos, &val, total) {
554 struct dw_edma_chan *chan = &dw->chan[pos + off];
555
556 dw_edma_done_interrupt(chan);
557 }
558
559 val = dw_edma_v0_core_status_abort_int(dw, write ?
560 EDMA_DIR_WRITE :
561 EDMA_DIR_READ);
562 val &= mask;
563 for_each_set_bit(pos, &val, total) {
564 struct dw_edma_chan *chan = &dw->chan[pos + off];
565
566 dw_edma_abort_interrupt(chan);
567 }
568
569 return IRQ_HANDLED;
570}
571
572static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
573{
574 return dw_edma_interrupt(irq, data, true);
575}
576
577static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
578{
579 return dw_edma_interrupt(irq, data, false);
580}
581
582static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
583{
584 dw_edma_interrupt(irq, data, true);
585 dw_edma_interrupt(irq, data, false);
586
587 return IRQ_HANDLED;
588}
589
590static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
591{
592 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
593
594 if (chan->status != EDMA_ST_IDLE)
595 return -EBUSY;
596
597 pm_runtime_get(chan->chip->dev);
598
599 return 0;
600}
601
602static void dw_edma_free_chan_resources(struct dma_chan *dchan)
603{
604 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
605 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
606 int ret;
607
608 while (time_before(jiffies, timeout)) {
609 ret = dw_edma_device_terminate_all(dchan);
610 if (!ret)
611 break;
612
613 if (time_after_eq(jiffies, timeout))
614 return;
615
616 cpu_relax();
617 };
618
619 pm_runtime_put(chan->chip->dev);
620}
621
622static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
623 u32 wr_alloc, u32 rd_alloc)
624{
625 struct dw_edma_region *dt_region;
626 struct device *dev = chip->dev;
627 struct dw_edma *dw = chip->dw;
628 struct dw_edma_chan *chan;
629 size_t ll_chunk, dt_chunk;
630 struct dw_edma_irq *irq;
631 struct dma_device *dma;
632 u32 i, j, cnt, ch_cnt;
633 u32 alloc, off_alloc;
634 int err = 0;
635 u32 pos;
636
637 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
638 ll_chunk = dw->ll_region.sz;
639 dt_chunk = dw->dt_region.sz;
640
641 /* Calculate linked list chunk for each channel */
642 ll_chunk /= roundup_pow_of_two(ch_cnt);
643
644 /* Calculate linked list chunk for each channel */
645 dt_chunk /= roundup_pow_of_two(ch_cnt);
646
647 if (write) {
648 i = 0;
649 cnt = dw->wr_ch_cnt;
650 dma = &dw->wr_edma;
651 alloc = wr_alloc;
652 off_alloc = 0;
653 } else {
654 i = dw->wr_ch_cnt;
655 cnt = dw->rd_ch_cnt;
656 dma = &dw->rd_edma;
657 alloc = rd_alloc;
658 off_alloc = wr_alloc;
659 }
660
661 INIT_LIST_HEAD(&dma->channels);
662 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
663 chan = &dw->chan[i];
664
665 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
666 if (!dt_region)
667 return -ENOMEM;
668
669 chan->vc.chan.private = dt_region;
670
671 chan->chip = chip;
672 chan->id = j;
673 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
674 chan->configured = false;
675 chan->request = EDMA_REQ_NONE;
676 chan->status = EDMA_ST_IDLE;
677
678 chan->ll_off = (ll_chunk * i);
679 chan->ll_max = (ll_chunk / EDMA_LL_SZ) - 1;
680
681 chan->dt_off = (dt_chunk * i);
682
683 dev_vdbg(dev, "L. List:\tChannel %s[%u] off=0x%.8lx, max_cnt=%u\n",
684 write ? "write" : "read", j,
685 chan->ll_off, chan->ll_max);
686
687 if (dw->nr_irqs == 1)
688 pos = 0;
689 else
690 pos = off_alloc + (j % alloc);
691
692 irq = &dw->irq[pos];
693
694 if (write)
695 irq->wr_mask |= BIT(j);
696 else
697 irq->rd_mask |= BIT(j);
698
699 irq->dw = dw;
700 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
701
702 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
703 write ? "write" : "read", j,
704 chan->msi.address_hi, chan->msi.address_lo,
705 chan->msi.data);
706
707 chan->vc.desc_free = vchan_free_desc;
708 vchan_init(&chan->vc, dma);
709
710 dt_region->paddr = dw->dt_region.paddr + chan->dt_off;
711 dt_region->vaddr = dw->dt_region.vaddr + chan->dt_off;
712 dt_region->sz = dt_chunk;
713
714 dev_vdbg(dev, "Data:\tChannel %s[%u] off=0x%.8lx\n",
715 write ? "write" : "read", j, chan->dt_off);
716
717 dw_edma_v0_core_device_config(chan);
718 }
719
720 /* Set DMA channel capabilities */
721 dma_cap_zero(dma->cap_mask);
722 dma_cap_set(DMA_SLAVE, dma->cap_mask);
723 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
724 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
725 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
726 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
727 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
728 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
729 dma->chancnt = cnt;
730
731 /* Set DMA channel callbacks */
732 dma->dev = chip->dev;
733 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
734 dma->device_free_chan_resources = dw_edma_free_chan_resources;
735 dma->device_config = dw_edma_device_config;
736 dma->device_pause = dw_edma_device_pause;
737 dma->device_resume = dw_edma_device_resume;
738 dma->device_terminate_all = dw_edma_device_terminate_all;
739 dma->device_issue_pending = dw_edma_device_issue_pending;
740 dma->device_tx_status = dw_edma_device_tx_status;
741 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
742 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
743
744 dma_set_max_seg_size(dma->dev, U32_MAX);
745
746 /* Register DMA device */
747 err = dma_async_device_register(dma);
748
749 return err;
750}
751
752static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
753{
754 if (*nr_irqs && *alloc < cnt) {
755 (*alloc)++;
756 (*nr_irqs)--;
757 }
758}
759
760static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
761{
762 while (*mask * alloc < cnt)
763 (*mask)++;
764}
765
766static int dw_edma_irq_request(struct dw_edma_chip *chip,
767 u32 *wr_alloc, u32 *rd_alloc)
768{
769 struct device *dev = chip->dev;
770 struct dw_edma *dw = chip->dw;
771 u32 wr_mask = 1;
772 u32 rd_mask = 1;
773 int i, err = 0;
774 u32 ch_cnt;
775
776 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
777
778 if (dw->nr_irqs < 1)
779 return -EINVAL;
780
781 if (dw->nr_irqs == 1) {
782 /* Common IRQ shared among all channels */
783 err = request_irq(pci_irq_vector(to_pci_dev(dev), 0),
784 dw_edma_interrupt_common,
785 IRQF_SHARED, dw->name, &dw->irq[0]);
786 if (err) {
787 dw->nr_irqs = 0;
788 return err;
789 }
790
791 get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), 0),
792 &dw->irq[0].msi);
793 } else {
794 /* Distribute IRQs equally among all channels */
795 int tmp = dw->nr_irqs;
796
797 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
798 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
799 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
800 }
801
802 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
803 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
804
805 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
806 err = request_irq(pci_irq_vector(to_pci_dev(dev), i),
807 i < *wr_alloc ?
808 dw_edma_interrupt_write :
809 dw_edma_interrupt_read,
810 IRQF_SHARED, dw->name,
811 &dw->irq[i]);
812 if (err) {
813 dw->nr_irqs = i;
814 return err;
815 }
816
817 get_cached_msi_msg(pci_irq_vector(to_pci_dev(dev), i),
818 &dw->irq[i].msi);
819 }
820
821 dw->nr_irqs = i;
822 }
823
824 return err;
825}
826
827int dw_edma_probe(struct dw_edma_chip *chip)
828{
829 struct device *dev = chip->dev;
830 struct dw_edma *dw = chip->dw;
831 u32 wr_alloc = 0;
832 u32 rd_alloc = 0;
833 int i, err;
834
835 raw_spin_lock_init(&dw->lock);
836
837 /* Find out how many write channels are supported by hardware */
838 dw->wr_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE);
839 if (!dw->wr_ch_cnt)
840 return -EINVAL;
841
842 /* Find out how many read channels are supported by hardware */
843 dw->rd_ch_cnt = dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ);
844 if (!dw->rd_ch_cnt)
845 return -EINVAL;
846
847 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
848 dw->wr_ch_cnt, dw->rd_ch_cnt);
849
850 /* Allocate channels */
851 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
852 sizeof(*dw->chan), GFP_KERNEL);
853 if (!dw->chan)
854 return -ENOMEM;
855
856 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
857
858 /* Disable eDMA, only to establish the ideal initial conditions */
859 dw_edma_v0_core_off(dw);
860
861 /* Request IRQs */
862 err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
863 if (err)
864 return err;
865
866 /* Setup write channels */
867 err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
868 if (err)
869 goto err_irq_free;
870
871 /* Setup read channels */
872 err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
873 if (err)
874 goto err_irq_free;
875
876 /* Power management */
877 pm_runtime_enable(dev);
878
879 /* Turn debugfs on */
880 dw_edma_v0_core_debugfs_on(chip);
881
882 return 0;
883
884err_irq_free:
885 for (i = (dw->nr_irqs - 1); i >= 0; i--)
886 free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
887
888 dw->nr_irqs = 0;
889
890 return err;
891}
892EXPORT_SYMBOL_GPL(dw_edma_probe);
893
894int dw_edma_remove(struct dw_edma_chip *chip)
895{
896 struct dw_edma_chan *chan, *_chan;
897 struct device *dev = chip->dev;
898 struct dw_edma *dw = chip->dw;
899 int i;
900
901 /* Disable eDMA */
902 dw_edma_v0_core_off(dw);
903
904 /* Free irqs */
905 for (i = (dw->nr_irqs - 1); i >= 0; i--)
906 free_irq(pci_irq_vector(to_pci_dev(dev), i), &dw->irq[i]);
907
908 /* Power management */
909 pm_runtime_disable(dev);
910
911 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
912 vc.chan.device_node) {
913 list_del(&chan->vc.chan.device_node);
914 tasklet_kill(&chan->vc.task);
915 }
916
917 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
918 vc.chan.device_node) {
919 list_del(&chan->vc.chan.device_node);
920 tasklet_kill(&chan->vc.task);
921 }
922
923 /* Deregister eDMA device */
924 dma_async_device_unregister(&dw->wr_edma);
925 dma_async_device_unregister(&dw->rd_edma);
926
927 /* Turn debugfs off */
928 dw_edma_v0_core_debugfs_off();
929
930 return 0;
931}
932EXPORT_SYMBOL_GPL(dw_edma_remove);
933
934MODULE_LICENSE("GPL v2");
935MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
936MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
new file mode 100644
index 000000000000..b6cc90cbc9dc
--- /dev/null
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -0,0 +1,165 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9#ifndef _DW_EDMA_CORE_H
10#define _DW_EDMA_CORE_H
11
12#include <linux/msi.h>
13#include <linux/dma/edma.h>
14
15#include "../virt-dma.h"
16
17#define EDMA_LL_SZ 24
18
19enum dw_edma_dir {
20 EDMA_DIR_WRITE = 0,
21 EDMA_DIR_READ
22};
23
24enum dw_edma_mode {
25 EDMA_MODE_LEGACY = 0,
26 EDMA_MODE_UNROLL
27};
28
29enum dw_edma_request {
30 EDMA_REQ_NONE = 0,
31 EDMA_REQ_STOP,
32 EDMA_REQ_PAUSE
33};
34
35enum dw_edma_status {
36 EDMA_ST_IDLE = 0,
37 EDMA_ST_PAUSE,
38 EDMA_ST_BUSY
39};
40
41struct dw_edma_chan;
42struct dw_edma_chunk;
43
44struct dw_edma_burst {
45 struct list_head list;
46 u64 sar;
47 u64 dar;
48 u32 sz;
49};
50
51struct dw_edma_region {
52 phys_addr_t paddr;
53 dma_addr_t vaddr;
54 size_t sz;
55};
56
57struct dw_edma_chunk {
58 struct list_head list;
59 struct dw_edma_chan *chan;
60 struct dw_edma_burst *burst;
61
62 u32 bursts_alloc;
63
64 u8 cb;
65 struct dw_edma_region ll_region; /* Linked list */
66};
67
68struct dw_edma_desc {
69 struct virt_dma_desc vd;
70 struct dw_edma_chan *chan;
71 struct dw_edma_chunk *chunk;
72
73 u32 chunks_alloc;
74
75 u32 alloc_sz;
76 u32 xfer_sz;
77};
78
79struct dw_edma_chan {
80 struct virt_dma_chan vc;
81 struct dw_edma_chip *chip;
82 int id;
83 enum dw_edma_dir dir;
84
85 off_t ll_off;
86 u32 ll_max;
87
88 off_t dt_off;
89
90 struct msi_msg msi;
91
92 enum dw_edma_request request;
93 enum dw_edma_status status;
94 u8 configured;
95
96 struct dma_slave_config config;
97};
98
99struct dw_edma_irq {
100 struct msi_msg msi;
101 u32 wr_mask;
102 u32 rd_mask;
103 struct dw_edma *dw;
104};
105
106struct dw_edma {
107 char name[20];
108
109 struct dma_device wr_edma;
110 u16 wr_ch_cnt;
111
112 struct dma_device rd_edma;
113 u16 rd_ch_cnt;
114
115 struct dw_edma_region rg_region; /* Registers */
116 struct dw_edma_region ll_region; /* Linked list */
117 struct dw_edma_region dt_region; /* Data */
118
119 struct dw_edma_irq *irq;
120 int nr_irqs;
121
122 u32 version;
123 enum dw_edma_mode mode;
124
125 struct dw_edma_chan *chan;
126 const struct dw_edma_core_ops *ops;
127
128 raw_spinlock_t lock; /* Only for legacy */
129};
130
131struct dw_edma_sg {
132 struct scatterlist *sgl;
133 unsigned int len;
134};
135
136struct dw_edma_cyclic {
137 dma_addr_t paddr;
138 size_t len;
139 size_t cnt;
140};
141
142struct dw_edma_transfer {
143 struct dma_chan *dchan;
144 union dw_edma_xfer {
145 struct dw_edma_sg sg;
146 struct dw_edma_cyclic cyclic;
147 } xfer;
148 enum dma_transfer_direction direction;
149 unsigned long flags;
150 bool cyclic;
151};
152
153static inline
154struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc)
155{
156 return container_of(vc, struct dw_edma_chan, vc);
157}
158
159static inline
160struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan)
161{
162 return vc2dw_edma_chan(to_virt_chan(dchan));
163}
164
165#endif /* _DW_EDMA_CORE_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
new file mode 100644
index 000000000000..cab6e18773da
--- /dev/null
+++ b/include/linux/dma/edma.h
@@ -0,0 +1,47 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
5 *
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7 */
8
9#ifndef _DW_EDMA_H
10#define _DW_EDMA_H
11
12#include <linux/device.h>
13#include <linux/dmaengine.h>
14
15struct dw_edma;
16
17/**
18 * struct dw_edma_chip - representation of DesignWare eDMA controller hardware
19 * @dev: struct device of the eDMA controller
20 * @id: instance ID
21 * @irq: irq line
22 * @dw: struct dw_edma that is filed by dw_edma_probe()
23 */
24struct dw_edma_chip {
25 struct device *dev;
26 int id;
27 int irq;
28 struct dw_edma *dw;
29};
30
31/* Export to the platform drivers */
32#if IS_ENABLED(CONFIG_DW_EDMA)
33int dw_edma_probe(struct dw_edma_chip *chip);
34int dw_edma_remove(struct dw_edma_chip *chip);
35#else
36static inline int dw_edma_probe(struct dw_edma_chip *chip)
37{
38 return -ENODEV;
39}
40
41static inline int dw_edma_remove(struct dw_edma_chip *chip)
42{
43 return 0;
44}
45#endif /* CONFIG_DW_EDMA */
46
47#endif /* _DW_EDMA_H */