aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Griffin <peter.griffin@linaro.org>2016-10-18 05:39:11 -0400
committerVinod Koul <vinod.koul@intel.com>2016-10-18 10:42:06 -0400
commit6b4cd727eaf15ed225b9a3a96ec1d64761ee728a (patch)
tree60e98605d16aedff9cb754d2b9b29e4f835bd511
parent812ab065ea95f42a51fee4d40e436468199bb87d (diff)
dmaengine: st_fdma: Add STMicroelectronics FDMA engine driver support
This patch adds support for the Flexible Direct Memory Access (FDMA) core driver. The FDMA is a slim core CPU with a dedicated firmware. It is a general purpose DMA controller capable of supporting 16 independent DMA channels. Data moves maybe from memory to memory or between memory and paced latency critical real time targets and it is found on al STi based chipsets. Signed-off-by: Ludovic Barre <ludovic.barre@st.com> Signed-off-by: Peter Griffin <peter.griffin@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/Kconfig13
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/st_fdma.c899
3 files changed, 913 insertions, 0 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index af63a6bcf564..661f21791fee 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -435,6 +435,19 @@ config STE_DMA40
435 help 435 help
436 Support for ST-Ericsson DMA40 controller 436 Support for ST-Ericsson DMA40 controller
437 437
438config ST_FDMA
439 tristate "ST FDMA dmaengine support"
440 depends on ARCH_STI
441 select ST_SLIM_REMOTEPROC
442 select DMA_ENGINE
443 select DMA_VIRTUAL_CHANNELS
444 help
445 Enable support for ST FDMA controller.
446 It supports 16 independent DMA channels, accepts up to 32 DMA requests
447
448 Say Y here if you have such a chipset.
449 If unsure, say N.
450
438config STM32_DMA 451config STM32_DMA
439 bool "STMicroelectronics STM32 DMA support" 452 bool "STMicroelectronics STM32 DMA support"
440 depends on ARCH_STM32 || COMPILE_TEST 453 depends on ARCH_STM32 || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index e4dc9cac7ee8..a4fa3360e609 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
67obj-$(CONFIG_TI_EDMA) += edma.o 67obj-$(CONFIG_TI_EDMA) += edma.o
68obj-$(CONFIG_XGENE_DMA) += xgene-dma.o 68obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
69obj-$(CONFIG_ZX_DMA) += zx296702_dma.o 69obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
70obj-$(CONFIG_ST_FDMA) += st_fdma.o
70 71
71obj-y += qcom/ 72obj-y += qcom/
72obj-y += xilinx/ 73obj-y += xilinx/
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
new file mode 100644
index 000000000000..515e1d4c43e8
--- /dev/null
+++ b/drivers/dma/st_fdma.c
@@ -0,0 +1,899 @@
1/*
2 * DMA driver for STMicroelectronics STi FDMA controller
3 *
4 * Copyright (C) 2014 STMicroelectronics
5 *
6 * Author: Ludovic Barre <Ludovic.barre@st.com>
7 * Peter Griffin <peter.griffin@linaro.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/of_device.h>
18#include <linux/of_dma.h>
19#include <linux/platform_device.h>
20#include <linux/interrupt.h>
21#include <linux/remoteproc.h>
22
23#include "st_fdma.h"
24
25static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
26{
27 return container_of(c, struct st_fdma_chan, vchan.chan);
28}
29
30static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
31{
32 return container_of(vd, struct st_fdma_desc, vdesc);
33}
34
35static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
36{
37 struct st_fdma_dev *fdev = fchan->fdev;
38 u32 req_line_cfg = fchan->cfg.req_line;
39 u32 dreq_line;
40 int try = 0;
41
42 /*
43 * dreq_mask is shared for n channels of fdma, so all accesses must be
44 * atomic. if the dreq_mask is changed between ffz and set_bit,
45 * we retry
46 */
47 do {
48 if (fdev->dreq_mask == ~0L) {
49 dev_err(fdev->dev, "No req lines available\n");
50 return -EINVAL;
51 }
52
53 if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
54 dev_err(fdev->dev, "Invalid or used req line\n");
55 return -EINVAL;
56 } else {
57 dreq_line = req_line_cfg;
58 }
59
60 try++;
61 } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
62
63 dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
64 dreq_line, fdev->dreq_mask);
65
66 return dreq_line;
67}
68
69static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
70{
71 struct st_fdma_dev *fdev = fchan->fdev;
72
73 dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
74 clear_bit(fchan->dreq_line, &fdev->dreq_mask);
75}
76
77static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
78{
79 struct virt_dma_desc *vdesc;
80 unsigned long nbytes, ch_cmd, cmd;
81
82 vdesc = vchan_next_desc(&fchan->vchan);
83 if (!vdesc)
84 return;
85
86 fchan->fdesc = to_st_fdma_desc(vdesc);
87 nbytes = fchan->fdesc->node[0].desc->nbytes;
88 cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
89 ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
90
91 /* start the channel for the descriptor */
92 fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
93 fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
94 writel(cmd,
95 fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
96
97 dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
98}
99
100static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
101 unsigned long int_sta)
102{
103 unsigned long ch_sta, ch_err;
104 int ch_id = fchan->vchan.chan.chan_id;
105 struct st_fdma_dev *fdev = fchan->fdev;
106
107 ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
108 ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
109 ch_sta &= FDMA_CH_CMD_STA_MASK;
110
111 if (int_sta & FDMA_INT_STA_ERR) {
112 dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
113 fchan->status = DMA_ERROR;
114 return;
115 }
116
117 switch (ch_sta) {
118 case FDMA_CH_CMD_STA_PAUSED:
119 fchan->status = DMA_PAUSED;
120 break;
121
122 case FDMA_CH_CMD_STA_RUNNING:
123 fchan->status = DMA_IN_PROGRESS;
124 break;
125 }
126}
127
128static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
129{
130 struct st_fdma_dev *fdev = dev_id;
131 irqreturn_t ret = IRQ_NONE;
132 struct st_fdma_chan *fchan = &fdev->chans[0];
133 unsigned long int_sta, clr;
134
135 int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
136 clr = int_sta;
137
138 for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
139 if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
140 continue;
141
142 spin_lock(&fchan->vchan.lock);
143 st_fdma_ch_sta_update(fchan, int_sta);
144
145 if (fchan->fdesc) {
146 if (!fchan->fdesc->iscyclic) {
147 list_del(&fchan->fdesc->vdesc.node);
148 vchan_cookie_complete(&fchan->fdesc->vdesc);
149 fchan->fdesc = NULL;
150 fchan->status = DMA_COMPLETE;
151 } else {
152 vchan_cyclic_callback(&fchan->fdesc->vdesc);
153 }
154
155 /* Start the next descriptor (if available) */
156 if (!fchan->fdesc)
157 st_fdma_xfer_desc(fchan);
158 }
159
160 spin_unlock(&fchan->vchan.lock);
161 ret = IRQ_HANDLED;
162 }
163
164 fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
165
166 return ret;
167}
168
169static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
170 struct of_dma *ofdma)
171{
172 struct st_fdma_dev *fdev = ofdma->of_dma_data;
173 struct dma_chan *chan;
174 struct st_fdma_chan *fchan;
175 int ret;
176
177 if (dma_spec->args_count < 1)
178 return ERR_PTR(-EINVAL);
179
180 if (fdev->dma_device.dev->of_node != dma_spec->np)
181 return ERR_PTR(-EINVAL);
182
183 ret = rproc_boot(fdev->slim_rproc->rproc);
184 if (ret == -ENOENT)
185 return ERR_PTR(-EPROBE_DEFER);
186 else if (ret)
187 return ERR_PTR(ret);
188
189 chan = dma_get_any_slave_channel(&fdev->dma_device);
190 if (!chan)
191 goto err_chan;
192
193 fchan = to_st_fdma_chan(chan);
194
195 fchan->cfg.of_node = dma_spec->np;
196 fchan->cfg.req_line = dma_spec->args[0];
197 fchan->cfg.req_ctrl = 0;
198 fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
199
200 if (dma_spec->args_count > 1)
201 fchan->cfg.req_ctrl = dma_spec->args[1]
202 & FDMA_REQ_CTRL_CFG_MASK;
203
204 if (dma_spec->args_count > 2)
205 fchan->cfg.type = dma_spec->args[2];
206
207 if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
208 fchan->dreq_line = 0;
209 } else {
210 fchan->dreq_line = st_fdma_dreq_get(fchan);
211 if (IS_ERR_VALUE(fchan->dreq_line)) {
212 chan = ERR_PTR(fchan->dreq_line);
213 goto err_chan;
214 }
215 }
216
217 dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
218 fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
219
220 return chan;
221
222err_chan:
223 rproc_shutdown(fdev->slim_rproc->rproc);
224 return chan;
225
226}
227
228static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
229{
230 struct st_fdma_desc *fdesc;
231 int i;
232
233 fdesc = to_st_fdma_desc(vdesc);
234 for (i = 0; i < fdesc->n_nodes; i++)
235 dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
236 fdesc->node[i].pdesc);
237 kfree(fdesc);
238}
239
240static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
241 int sg_len)
242{
243 struct st_fdma_desc *fdesc;
244 int i;
245
246 fdesc = kzalloc(sizeof(*fdesc) +
247 sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
248 if (!fdesc)
249 return NULL;
250
251 fdesc->fchan = fchan;
252 fdesc->n_nodes = sg_len;
253 for (i = 0; i < sg_len; i++) {
254 fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
255 GFP_NOWAIT, &fdesc->node[i].pdesc);
256 if (!fdesc->node[i].desc)
257 goto err;
258 }
259 return fdesc;
260
261err:
262 while (--i >= 0)
263 dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
264 fdesc->node[i].pdesc);
265 kfree(fdesc);
266 return NULL;
267}
268
269static int st_fdma_alloc_chan_res(struct dma_chan *chan)
270{
271 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
272
273 /* Create the dma pool for descriptor allocation */
274 fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
275 fchan->fdev->dev,
276 sizeof(struct st_fdma_hw_node),
277 __alignof__(struct st_fdma_hw_node),
278 0);
279
280 if (!fchan->node_pool) {
281 dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
282 return -ENOMEM;
283 }
284
285 dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
286 fchan->vchan.chan.chan_id, fchan->cfg.type);
287
288 return 0;
289}
290
291static void st_fdma_free_chan_res(struct dma_chan *chan)
292{
293 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
294 struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
295 unsigned long flags;
296
297 LIST_HEAD(head);
298
299 dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
300 __func__, fchan->vchan.chan.chan_id);
301
302 if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
303 st_fdma_dreq_put(fchan);
304
305 spin_lock_irqsave(&fchan->vchan.lock, flags);
306 fchan->fdesc = NULL;
307 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
308
309 dma_pool_destroy(fchan->node_pool);
310 fchan->node_pool = NULL;
311 memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
312
313 rproc_shutdown(rproc);
314}
315
316static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
317 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
318 size_t len, unsigned long flags)
319{
320 struct st_fdma_chan *fchan;
321 struct st_fdma_desc *fdesc;
322 struct st_fdma_hw_node *hw_node;
323
324 if (!len)
325 return NULL;
326
327 fchan = to_st_fdma_chan(chan);
328
329 /* We only require a single descriptor */
330 fdesc = st_fdma_alloc_desc(fchan, 1);
331 if (!fdesc) {
332 dev_err(fchan->fdev->dev, "no memory for desc\n");
333 return NULL;
334 }
335
336 hw_node = fdesc->node[0].desc;
337 hw_node->next = 0;
338 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
339 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
340 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
341 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
342 hw_node->nbytes = len;
343 hw_node->saddr = src;
344 hw_node->daddr = dst;
345 hw_node->generic.length = len;
346 hw_node->generic.sstride = 0;
347 hw_node->generic.dstride = 0;
348
349 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
350}
351
352static int config_reqctrl(struct st_fdma_chan *fchan,
353 enum dma_transfer_direction direction)
354{
355 u32 maxburst = 0, addr = 0;
356 enum dma_slave_buswidth width;
357 int ch_id = fchan->vchan.chan.chan_id;
358 struct st_fdma_dev *fdev = fchan->fdev;
359
360 switch (direction) {
361
362 case DMA_DEV_TO_MEM:
363 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
364 maxburst = fchan->scfg.src_maxburst;
365 width = fchan->scfg.src_addr_width;
366 addr = fchan->scfg.src_addr;
367 break;
368
369 case DMA_MEM_TO_DEV:
370 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
371 maxburst = fchan->scfg.dst_maxburst;
372 width = fchan->scfg.dst_addr_width;
373 addr = fchan->scfg.dst_addr;
374 break;
375
376 default:
377 return -EINVAL;
378 }
379
380 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
381
382 switch (width) {
383
384 case DMA_SLAVE_BUSWIDTH_1_BYTE:
385 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
386 break;
387
388 case DMA_SLAVE_BUSWIDTH_2_BYTES:
389 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
390 break;
391
392 case DMA_SLAVE_BUSWIDTH_4_BYTES:
393 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
394 break;
395
396 case DMA_SLAVE_BUSWIDTH_8_BYTES:
397 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
398 break;
399
400 default:
401 return -EINVAL;
402 }
403
404 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
405 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
406 dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
407
408 fchan->cfg.dev_addr = addr;
409 fchan->cfg.dir = direction;
410
411 dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
412 ch_id, addr, fchan->cfg.req_ctrl);
413
414 return 0;
415}
416
417static void fill_hw_node(struct st_fdma_hw_node *hw_node,
418 struct st_fdma_chan *fchan,
419 enum dma_transfer_direction direction)
420{
421 if (direction == DMA_MEM_TO_DEV) {
422 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
423 hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
424 hw_node->daddr = fchan->cfg.dev_addr;
425 } else {
426 hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
427 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
428 hw_node->saddr = fchan->cfg.dev_addr;
429 }
430
431 hw_node->generic.sstride = 0;
432 hw_node->generic.dstride = 0;
433}
434
435static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
436 size_t len, enum dma_transfer_direction direction)
437{
438 struct st_fdma_chan *fchan;
439
440 if (!chan || !len)
441 return NULL;
442
443 fchan = to_st_fdma_chan(chan);
444
445 if (!is_slave_direction(direction)) {
446 dev_err(fchan->fdev->dev, "bad direction?\n");
447 return NULL;
448 }
449
450 return fchan;
451}
452
453static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
454 struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
455 size_t period_len, enum dma_transfer_direction direction,
456 unsigned long flags)
457{
458 struct st_fdma_chan *fchan;
459 struct st_fdma_desc *fdesc;
460 int sg_len, i;
461
462 fchan = st_fdma_prep_common(chan, len, direction);
463 if (!fchan)
464 return NULL;
465
466 if (!period_len)
467 return NULL;
468
469 if (config_reqctrl(fchan, direction)) {
470 dev_err(fchan->fdev->dev, "bad width or direction\n");
471 return NULL;
472 }
473
474 /* the buffer length must be a multiple of period_len */
475 if (len % period_len != 0) {
476 dev_err(fchan->fdev->dev, "len is not multiple of period\n");
477 return NULL;
478 }
479
480 sg_len = len / period_len;
481 fdesc = st_fdma_alloc_desc(fchan, sg_len);
482 if (!fdesc) {
483 dev_err(fchan->fdev->dev, "no memory for desc\n");
484 return NULL;
485 }
486
487 fdesc->iscyclic = true;
488
489 for (i = 0; i < sg_len; i++) {
490 struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
491
492 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
493
494 hw_node->control =
495 FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
496 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
497
498 fill_hw_node(hw_node, fchan, direction);
499
500 if (direction == DMA_MEM_TO_DEV)
501 hw_node->saddr = buf_addr + (i * period_len);
502 else
503 hw_node->daddr = buf_addr + (i * period_len);
504
505 hw_node->nbytes = period_len;
506 hw_node->generic.length = period_len;
507 }
508
509 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
510}
511
512static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
513 struct dma_chan *chan, struct scatterlist *sgl,
514 unsigned int sg_len, enum dma_transfer_direction direction,
515 unsigned long flags, void *context)
516{
517 struct st_fdma_chan *fchan;
518 struct st_fdma_desc *fdesc;
519 struct st_fdma_hw_node *hw_node;
520 struct scatterlist *sg;
521 int i;
522
523 fchan = st_fdma_prep_common(chan, sg_len, direction);
524 if (!fchan)
525 return NULL;
526
527 if (!sgl)
528 return NULL;
529
530 fdesc = st_fdma_alloc_desc(fchan, sg_len);
531 if (!fdesc) {
532 dev_err(fchan->fdev->dev, "no memory for desc\n");
533 return NULL;
534 }
535
536 fdesc->iscyclic = false;
537
538 for_each_sg(sgl, sg, sg_len, i) {
539 hw_node = fdesc->node[i].desc;
540
541 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
542 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
543
544 fill_hw_node(hw_node, fchan, direction);
545
546 if (direction == DMA_MEM_TO_DEV)
547 hw_node->saddr = sg_dma_address(sg);
548 else
549 hw_node->daddr = sg_dma_address(sg);
550
551 hw_node->nbytes = sg_dma_len(sg);
552 hw_node->generic.length = sg_dma_len(sg);
553 }
554
555 /* interrupt at end of last node */
556 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
557
558 return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
559}
560
561static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
562 struct virt_dma_desc *vdesc,
563 bool in_progress)
564{
565 struct st_fdma_desc *fdesc = fchan->fdesc;
566 size_t residue = 0;
567 dma_addr_t cur_addr = 0;
568 int i;
569
570 if (in_progress) {
571 cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
572 cur_addr &= FDMA_CH_CMD_DATA_MASK;
573 }
574
575 for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
576 if (cur_addr == fdesc->node[i].pdesc) {
577 residue += fnode_read(fchan, FDMA_CNTN_OFST);
578 break;
579 }
580 residue += fdesc->node[i].desc->nbytes;
581 }
582
583 return residue;
584}
585
586static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
587 dma_cookie_t cookie,
588 struct dma_tx_state *txstate)
589{
590 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
591 struct virt_dma_desc *vd;
592 enum dma_status ret;
593 unsigned long flags;
594
595 ret = dma_cookie_status(chan, cookie, txstate);
596 if (ret == DMA_COMPLETE || !txstate)
597 return ret;
598
599 spin_lock_irqsave(&fchan->vchan.lock, flags);
600 vd = vchan_find_desc(&fchan->vchan, cookie);
601 if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
602 txstate->residue = st_fdma_desc_residue(fchan, vd, true);
603 else if (vd)
604 txstate->residue = st_fdma_desc_residue(fchan, vd, false);
605 else
606 txstate->residue = 0;
607
608 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
609
610 return ret;
611}
612
613static void st_fdma_issue_pending(struct dma_chan *chan)
614{
615 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
616 unsigned long flags;
617
618 spin_lock_irqsave(&fchan->vchan.lock, flags);
619
620 if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
621 st_fdma_xfer_desc(fchan);
622
623 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
624}
625
626static int st_fdma_pause(struct dma_chan *chan)
627{
628 unsigned long flags;
629 LIST_HEAD(head);
630 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
631 int ch_id = fchan->vchan.chan.chan_id;
632 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
633
634 dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
635
636 spin_lock_irqsave(&fchan->vchan.lock, flags);
637 if (fchan->fdesc)
638 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
639 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
640
641 return 0;
642}
643
644static int st_fdma_resume(struct dma_chan *chan)
645{
646 unsigned long flags;
647 unsigned long val;
648 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
649 int ch_id = fchan->vchan.chan.chan_id;
650
651 dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
652
653 spin_lock_irqsave(&fchan->vchan.lock, flags);
654 if (fchan->fdesc) {
655 val = fchan_read(fchan, FDMA_CH_CMD_OFST);
656 val &= FDMA_CH_CMD_DATA_MASK;
657 fchan_write(fchan, val, FDMA_CH_CMD_OFST);
658 }
659 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
660
661 return 0;
662}
663
664static int st_fdma_terminate_all(struct dma_chan *chan)
665{
666 unsigned long flags;
667 LIST_HEAD(head);
668 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
669 int ch_id = fchan->vchan.chan.chan_id;
670 unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
671
672 dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
673
674 spin_lock_irqsave(&fchan->vchan.lock, flags);
675 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
676 fchan->fdesc = NULL;
677 vchan_get_all_descriptors(&fchan->vchan, &head);
678 spin_unlock_irqrestore(&fchan->vchan.lock, flags);
679 vchan_dma_desc_free_list(&fchan->vchan, &head);
680
681 return 0;
682}
683
684static int st_fdma_slave_config(struct dma_chan *chan,
685 struct dma_slave_config *slave_cfg)
686{
687 struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
688
689 memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
690 return 0;
691}
692
693static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
694 .name = "STiH407",
695 .id = 0,
696};
697
698static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
699 .name = "STiH407",
700 .id = 1,
701};
702
703static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
704 .name = "STiH407",
705 .id = 2,
706};
707
708static const struct of_device_id st_fdma_match[] = {
709 { .compatible = "st,stih407-fdma-mpe31-11"
710 , .data = &fdma_mpe31_stih407_11 },
711 { .compatible = "st,stih407-fdma-mpe31-12"
712 , .data = &fdma_mpe31_stih407_12 },
713 { .compatible = "st,stih407-fdma-mpe31-13"
714 , .data = &fdma_mpe31_stih407_13 },
715 {},
716};
717MODULE_DEVICE_TABLE(of, st_fdma_match);
718
719static int st_fdma_parse_dt(struct platform_device *pdev,
720 const struct st_fdma_driverdata *drvdata,
721 struct st_fdma_dev *fdev)
722{
723 struct device_node *np = pdev->dev.of_node;
724 int ret;
725
726 if (!np)
727 goto err;
728
729 ret = of_property_read_u32(np, "dma-channels", &fdev->nr_channels);
730 if (ret)
731 goto err;
732
733 snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
734 drvdata->name, drvdata->id);
735
736err:
737 return ret;
738}
739#define FDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
740 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
741 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
742 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
743
744static void st_fdma_free(struct st_fdma_dev *fdev)
745{
746 struct st_fdma_chan *fchan;
747 int i;
748
749 for (i = 0; i < fdev->nr_channels; i++) {
750 fchan = &fdev->chans[i];
751 list_del(&fchan->vchan.chan.device_node);
752 tasklet_kill(&fchan->vchan.task);
753 }
754}
755
756static int st_fdma_probe(struct platform_device *pdev)
757{
758 struct st_fdma_dev *fdev;
759 const struct of_device_id *match;
760 struct device_node *np = pdev->dev.of_node;
761 const struct st_fdma_driverdata *drvdata;
762 int ret, i;
763
764 match = of_match_device((st_fdma_match), &pdev->dev);
765 if (!match || !match->data) {
766 dev_err(&pdev->dev, "No device match found\n");
767 return -ENODEV;
768 }
769
770 drvdata = match->data;
771
772 fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
773 if (!fdev)
774 return -ENOMEM;
775
776 ret = st_fdma_parse_dt(pdev, drvdata, fdev);
777 if (ret) {
778 dev_err(&pdev->dev, "unable to find platform data\n");
779 goto err;
780 }
781
782 fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
783 sizeof(struct st_fdma_chan), GFP_KERNEL);
784 if (!fdev->chans)
785 return -ENOMEM;
786
787 fdev->dev = &pdev->dev;
788 fdev->drvdata = drvdata;
789 platform_set_drvdata(pdev, fdev);
790
791 fdev->irq = platform_get_irq(pdev, 0);
792 if (fdev->irq < 0) {
793 dev_err(&pdev->dev, "Failed to get irq resource\n");
794 return -EINVAL;
795 }
796
797 ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
798 dev_name(&pdev->dev), fdev);
799 if (ret) {
800 dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
801 goto err;
802 }
803
804 fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
805 if (!fdev->slim_rproc) {
806 ret = PTR_ERR(fdev->slim_rproc);
807 dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
808 goto err;
809 }
810
811 /* Initialise list of FDMA channels */
812 INIT_LIST_HEAD(&fdev->dma_device.channels);
813 for (i = 0; i < fdev->nr_channels; i++) {
814 struct st_fdma_chan *fchan = &fdev->chans[i];
815
816 fchan->fdev = fdev;
817 fchan->vchan.desc_free = st_fdma_free_desc;
818 vchan_init(&fchan->vchan, &fdev->dma_device);
819 }
820
821 /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
822 fdev->dreq_mask = BIT(0) | BIT(31);
823
824 dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
825 dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
826 dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
827
828 fdev->dma_device.dev = &pdev->dev;
829 fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
830 fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
831 fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
832 fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
833 fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
834 fdev->dma_device.device_tx_status = st_fdma_tx_status;
835 fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
836 fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
837 fdev->dma_device.device_config = st_fdma_slave_config;
838 fdev->dma_device.device_pause = st_fdma_pause;
839 fdev->dma_device.device_resume = st_fdma_resume;
840
841 fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
842 fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
843 fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
844 fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
845
846 ret = dma_async_device_register(&fdev->dma_device);
847 if (ret) {
848 dev_err(&pdev->dev,
849 "Failed to register DMA device (%d)\n", ret);
850 goto err_rproc;
851 }
852
853 ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
854 if (ret) {
855 dev_err(&pdev->dev,
856 "Failed to register controller (%d)\n", ret);
857 goto err_dma_dev;
858 }
859
860 dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
861
862 return 0;
863
864err_dma_dev:
865 dma_async_device_unregister(&fdev->dma_device);
866err_rproc:
867 st_fdma_free(fdev);
868 st_slim_rproc_put(fdev->slim_rproc);
869err:
870 return ret;
871}
872
873static int st_fdma_remove(struct platform_device *pdev)
874{
875 struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
876
877 devm_free_irq(&pdev->dev, fdev->irq, fdev);
878 st_slim_rproc_put(fdev->slim_rproc);
879 of_dma_controller_free(pdev->dev.of_node);
880 dma_async_device_unregister(&fdev->dma_device);
881
882 return 0;
883}
884
885static struct platform_driver st_fdma_platform_driver = {
886 .driver = {
887 .name = DRIVER_NAME,
888 .of_match_table = st_fdma_match,
889 },
890 .probe = st_fdma_probe,
891 .remove = st_fdma_remove,
892};
893module_platform_driver(st_fdma_platform_driver);
894
895MODULE_LICENSE("GPL v2");
896MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
897MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
898MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
899MODULE_ALIAS("platform: " DRIVER_NAME);