aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-edma.txt76
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/fsl-edma.c975
4 files changed, 1062 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
new file mode 100644
index 000000000000..191d7bd8a6fe
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-edma.txt
@@ -0,0 +1,76 @@
1* Freescale enhanced Direct Memory Access(eDMA) Controller
2
3 The eDMA channels have multiplex capability by programmble memory-mapped
4registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
5specific DMA request source can only be multiplexed by any channel of certain
6group, DMAMUX0 or DMAMUX1, but not both.
7
8* eDMA Controller
9Required properties:
10- compatible :
11 - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
12- reg : Specifies base physical address(s) and size of the eDMA registers.
13 The 1st region is eDMA control register's address and size.
14 The 2nd and the 3rd regions are programmable channel multiplexing
15 control register's address and size.
16- interrupts : A list of interrupt-specifiers, one for each entry in
17 interrupt-names.
18- interrupt-names : Should contain:
19 "edma-tx" - the transmission interrupt
20 "edma-err" - the error interrupt
21- #dma-cells : Must be <2>.
22 The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
23 Specific request source can only be multiplexed by specific channels
24 group called DMAMUX.
25 The 2nd cell specifies the request source(slot) ID.
26 See the SoC's reference manual for all the supported request sources.
27- dma-channels : Number of channels supported by the controller
28- clock-names : A list of channel group clock names. Should contain:
29 "dmamux0" - clock name of mux0 group
30 "dmamux1" - clock name of mux1 group
31- clocks : A list of phandle and clock-specifier pairs, one for each entry in
32 clock-names.
33
34Optional properties:
35- big-endian: If present registers and hardware scatter/gather descriptors
36 of the eDMA are implemented in big endian mode, otherwise in little
37 mode.
38
39
40Examples:
41
42edma0: dma-controller@40018000 {
43 #dma-cells = <2>;
44 compatible = "fsl,vf610-edma";
45 reg = <0x40018000 0x2000>,
46 <0x40024000 0x1000>,
47 <0x40025000 0x1000>;
48 interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
49 <0 9 IRQ_TYPE_LEVEL_HIGH>;
50 interrupt-names = "edma-tx", "edma-err";
51 dma-channels = <32>;
52 clock-names = "dmamux0", "dmamux1";
53 clocks = <&clks VF610_CLK_DMAMUX0>,
54 <&clks VF610_CLK_DMAMUX1>;
55};
56
57
58* DMA clients
59DMA client drivers that uses the DMA function must use the format described
60in the dma.txt file, using a two-cell specifier for each channel: the 1st
61specifies the channel group(DMAMUX) in which this request can be multiplexed,
62and the 2nd specifies the request source.
63
64Examples:
65
66sai2: sai@40031000 {
67 compatible = "fsl,vf610-sai";
68 reg = <0x40031000 0x1000>;
69 interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
70 clock-names = "sai";
71 clocks = <&clks VF610_CLK_SAI2>;
72 dma-names = "tx", "rx";
73 dmas = <&edma0 0 21>,
74 <&edma0 0 20>;
75 status = "disabled";
76};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e4382ecc22a2..830b88d8744c 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -349,6 +349,16 @@ config MOXART_DMA
349 select DMA_VIRTUAL_CHANNELS 349 select DMA_VIRTUAL_CHANNELS
350 help 350 help
351 Enable support for the MOXA ART SoC DMA controller. 351 Enable support for the MOXA ART SoC DMA controller.
352
353config FSL_EDMA
354 tristate "Freescale eDMA engine support"
355 depends on OF
356 select DMA_ENGINE
357 select DMA_VIRTUAL_CHANNELS
358 help
359 Support the Freescale eDMA engine with programmable channel
360 multiplexing capability for DMA request sources(slot).
361 This module can be found on Freescale Vybrid and LS-1 SoCs.
352 362
353config DMA_ENGINE 363config DMA_ENGINE
354 bool 364 bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a029d0f4a1be..995946283f8d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
44obj-$(CONFIG_TI_CPPI41) += cppi41.o 44obj-$(CONFIG_TI_CPPI41) += cppi41.o
45obj-$(CONFIG_K3_DMA) += k3dma.o 45obj-$(CONFIG_K3_DMA) += k3dma.o
46obj-$(CONFIG_MOXART_DMA) += moxart-dma.o 46obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
47obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644
index 000000000000..9025300a1670
--- /dev/null
+++ b/drivers/dma/fsl-edma.c
@@ -0,0 +1,975 @@
1/*
2 * drivers/dma/fsl-edma.c
3 *
4 * Copyright 2013-2014 Freescale Semiconductor, Inc.
5 *
6 * Driver for the Freescale eDMA engine with flexible channel multiplexing
7 * capability for DMA request sources. The eDMA block can be found on some
8 * Vybrid and Layerscape SoCs.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/interrupt.h>
19#include <linux/clk.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_dma.h>
29
30#include "virt-dma.h"
31
32#define EDMA_CR 0x00
33#define EDMA_ES 0x04
34#define EDMA_ERQ 0x0C
35#define EDMA_EEI 0x14
36#define EDMA_SERQ 0x1B
37#define EDMA_CERQ 0x1A
38#define EDMA_SEEI 0x19
39#define EDMA_CEEI 0x18
40#define EDMA_CINT 0x1F
41#define EDMA_CERR 0x1E
42#define EDMA_SSRT 0x1D
43#define EDMA_CDNE 0x1C
44#define EDMA_INTR 0x24
45#define EDMA_ERR 0x2C
46
47#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
48#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
49#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
50#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
51#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
52#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
53#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
54#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
55#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
56#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
57#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
58#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
59#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
60
61#define EDMA_CR_EDBG BIT(1)
62#define EDMA_CR_ERCA BIT(2)
63#define EDMA_CR_ERGA BIT(3)
64#define EDMA_CR_HOE BIT(4)
65#define EDMA_CR_HALT BIT(5)
66#define EDMA_CR_CLM BIT(6)
67#define EDMA_CR_EMLM BIT(7)
68#define EDMA_CR_ECX BIT(16)
69#define EDMA_CR_CX BIT(17)
70
71#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
72#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
73#define EDMA_CINT_CINT(x) ((x) & 0x1F)
74#define EDMA_CERR_CERR(x) ((x) & 0x1F)
75
76#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
77#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
78#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
79#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
80#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
81#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
82#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
83#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
84#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
85#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
86#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
87#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
88#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
89#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
90
91#define EDMA_TCD_SOFF_SOFF(x) (x)
92#define EDMA_TCD_NBYTES_NBYTES(x) (x)
93#define EDMA_TCD_SLAST_SLAST(x) (x)
94#define EDMA_TCD_DADDR_DADDR(x) (x)
95#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
96#define EDMA_TCD_DOFF_DOFF(x) (x)
97#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
98#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
99
100#define EDMA_TCD_CSR_START BIT(0)
101#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
102#define EDMA_TCD_CSR_INT_HALF BIT(2)
103#define EDMA_TCD_CSR_D_REQ BIT(3)
104#define EDMA_TCD_CSR_E_SG BIT(4)
105#define EDMA_TCD_CSR_E_LINK BIT(5)
106#define EDMA_TCD_CSR_ACTIVE BIT(6)
107#define EDMA_TCD_CSR_DONE BIT(7)
108
109#define EDMAMUX_CHCFG_DIS 0x0
110#define EDMAMUX_CHCFG_ENBL 0x80
111#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
112
113#define DMAMUX_NR 2
114
115#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
116 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
117 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119
120struct fsl_edma_hw_tcd {
121 u32 saddr;
122 u16 soff;
123 u16 attr;
124 u32 nbytes;
125 u32 slast;
126 u32 daddr;
127 u16 doff;
128 u16 citer;
129 u32 dlast_sga;
130 u16 csr;
131 u16 biter;
132};
133
134struct fsl_edma_sw_tcd {
135 dma_addr_t ptcd;
136 struct fsl_edma_hw_tcd *vtcd;
137};
138
139struct fsl_edma_slave_config {
140 enum dma_transfer_direction dir;
141 enum dma_slave_buswidth addr_width;
142 u32 dev_addr;
143 u32 burst;
144 u32 attr;
145};
146
147struct fsl_edma_chan {
148 struct virt_dma_chan vchan;
149 enum dma_status status;
150 struct fsl_edma_engine *edma;
151 struct fsl_edma_desc *edesc;
152 struct fsl_edma_slave_config fsc;
153 struct dma_pool *tcd_pool;
154};
155
156struct fsl_edma_desc {
157 struct virt_dma_desc vdesc;
158 struct fsl_edma_chan *echan;
159 bool iscyclic;
160 unsigned int n_tcds;
161 struct fsl_edma_sw_tcd tcd[];
162};
163
164struct fsl_edma_engine {
165 struct dma_device dma_dev;
166 void __iomem *membase;
167 void __iomem *muxbase[DMAMUX_NR];
168 struct clk *muxclk[DMAMUX_NR];
169 struct mutex fsl_edma_mutex;
170 u32 n_chans;
171 int txirq;
172 int errirq;
173 bool big_endian;
174 struct fsl_edma_chan chans[];
175};
176
177/*
178 * R/W functions for big- or little-endian registers
179 * the eDMA controller's endian is independent of the CPU core's endian.
180 */
181
182static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
183{
184 if (edma->big_endian)
185 return ioread16be(addr);
186 else
187 return ioread16(addr);
188}
189
190static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
191{
192 if (edma->big_endian)
193 return ioread32be(addr);
194 else
195 return ioread32(addr);
196}
197
198static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
199{
200 iowrite8(val, addr);
201}
202
203static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
204{
205 if (edma->big_endian)
206 iowrite16be(val, addr);
207 else
208 iowrite16(val, addr);
209}
210
211static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
212{
213 if (edma->big_endian)
214 iowrite32be(val, addr);
215 else
216 iowrite32(val, addr);
217}
218
219static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
220{
221 return container_of(chan, struct fsl_edma_chan, vchan.chan);
222}
223
224static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
225{
226 return container_of(vd, struct fsl_edma_desc, vdesc);
227}
228
229static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
230{
231 void __iomem *addr = fsl_chan->edma->membase;
232 u32 ch = fsl_chan->vchan.chan.chan_id;
233
234 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
235 edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
236}
237
238static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
239{
240 void __iomem *addr = fsl_chan->edma->membase;
241 u32 ch = fsl_chan->vchan.chan.chan_id;
242
243 edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
244 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
245}
246
247static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
248 unsigned int slot, bool enable)
249{
250 u32 ch = fsl_chan->vchan.chan.chan_id;
251 void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
252 unsigned chans_per_mux, ch_off;
253
254 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
255 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
256
257 if (enable)
258 edma_writeb(fsl_chan->edma,
259 EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
260 muxaddr + ch_off);
261 else
262 edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
263}
264
265static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
266{
267 switch (addr_width) {
268 case 1:
269 return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
270 case 2:
271 return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
272 case 4:
273 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
274 case 8:
275 return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
276 default:
277 return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
278 }
279}
280
281static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
282{
283 struct fsl_edma_desc *fsl_desc;
284 int i;
285
286 fsl_desc = to_fsl_edma_desc(vdesc);
287 for (i = 0; i < fsl_desc->n_tcds; i++)
288 dma_pool_free(fsl_desc->echan->tcd_pool,
289 fsl_desc->tcd[i].vtcd,
290 fsl_desc->tcd[i].ptcd);
291 kfree(fsl_desc);
292}
293
294static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
295 unsigned long arg)
296{
297 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
298 struct dma_slave_config *cfg = (void *)arg;
299 unsigned long flags;
300 LIST_HEAD(head);
301
302 switch (cmd) {
303 case DMA_TERMINATE_ALL:
304 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
305 fsl_edma_disable_request(fsl_chan);
306 fsl_chan->edesc = NULL;
307 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
308 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
309 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
310 return 0;
311
312 case DMA_SLAVE_CONFIG:
313 fsl_chan->fsc.dir = cfg->direction;
314 if (cfg->direction == DMA_DEV_TO_MEM) {
315 fsl_chan->fsc.dev_addr = cfg->src_addr;
316 fsl_chan->fsc.addr_width = cfg->src_addr_width;
317 fsl_chan->fsc.burst = cfg->src_maxburst;
318 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
319 } else if (cfg->direction == DMA_MEM_TO_DEV) {
320 fsl_chan->fsc.dev_addr = cfg->dst_addr;
321 fsl_chan->fsc.addr_width = cfg->dst_addr_width;
322 fsl_chan->fsc.burst = cfg->dst_maxburst;
323 fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
324 } else {
325 return -EINVAL;
326 }
327 return 0;
328
329 case DMA_PAUSE:
330 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
331 if (fsl_chan->edesc) {
332 fsl_edma_disable_request(fsl_chan);
333 fsl_chan->status = DMA_PAUSED;
334 }
335 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
336 return 0;
337
338 case DMA_RESUME:
339 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
340 if (fsl_chan->edesc) {
341 fsl_edma_enable_request(fsl_chan);
342 fsl_chan->status = DMA_IN_PROGRESS;
343 }
344 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
345 return 0;
346
347 default:
348 return -ENXIO;
349 }
350}
351
352static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
353 struct virt_dma_desc *vdesc, bool in_progress)
354{
355 struct fsl_edma_desc *edesc = fsl_chan->edesc;
356 void __iomem *addr = fsl_chan->edma->membase;
357 u32 ch = fsl_chan->vchan.chan.chan_id;
358 enum dma_transfer_direction dir = fsl_chan->fsc.dir;
359 dma_addr_t cur_addr, dma_addr;
360 size_t len, size;
361 int i;
362
363 /* calculate the total size in this desc */
364 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
365 len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
366 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
367
368 if (!in_progress)
369 return len;
370
371 if (dir == DMA_MEM_TO_DEV)
372 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
373 else
374 cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
375
376 /* figure out the finished and calculate the residue */
377 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
378 size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
379 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
380 if (dir == DMA_MEM_TO_DEV)
381 dma_addr = edma_readl(fsl_chan->edma,
382 &(edesc->tcd[i].vtcd->saddr));
383 else
384 dma_addr = edma_readl(fsl_chan->edma,
385 &(edesc->tcd[i].vtcd->daddr));
386
387 len -= size;
388 if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
389 len += dma_addr + size - cur_addr;
390 break;
391 }
392 }
393
394 return len;
395}
396
397static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
398 dma_cookie_t cookie, struct dma_tx_state *txstate)
399{
400 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
401 struct virt_dma_desc *vdesc;
402 enum dma_status status;
403 unsigned long flags;
404
405 status = dma_cookie_status(chan, cookie, txstate);
406 if (status == DMA_COMPLETE)
407 return status;
408
409 if (!txstate)
410 return fsl_chan->status;
411
412 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
413 vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
414 if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
415 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
416 else if (vdesc)
417 txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
418 else
419 txstate->residue = 0;
420
421 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
422
423 return fsl_chan->status;
424}
425
426static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
427 u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
428 u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
429 u16 csr)
430{
431 void __iomem *addr = fsl_chan->edma->membase;
432 u32 ch = fsl_chan->vchan.chan.chan_id;
433
434 /*
435 * TCD parameters have been swapped in fill_tcd_params(),
436 * so just write them to registers in the cpu endian here
437 */
438 writew(0, addr + EDMA_TCD_CSR(ch));
439 writel(src, addr + EDMA_TCD_SADDR(ch));
440 writel(dst, addr + EDMA_TCD_DADDR(ch));
441 writew(attr, addr + EDMA_TCD_ATTR(ch));
442 writew(soff, addr + EDMA_TCD_SOFF(ch));
443 writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
444 writel(slast, addr + EDMA_TCD_SLAST(ch));
445 writew(citer, addr + EDMA_TCD_CITER(ch));
446 writew(biter, addr + EDMA_TCD_BITER(ch));
447 writew(doff, addr + EDMA_TCD_DOFF(ch));
448 writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
449 writew(csr, addr + EDMA_TCD_CSR(ch));
450}
451
452static void fill_tcd_params(struct fsl_edma_engine *edma,
453 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
454 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
455 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
456 bool disable_req, bool enable_sg)
457{
458 u16 csr = 0;
459
460 /*
461 * eDMA hardware SGs require the TCD parameters stored in memory
462 * the same endian as the eDMA module so that they can be loaded
463 * automatically by the engine
464 */
465 edma_writel(edma, src, &(tcd->saddr));
466 edma_writel(edma, dst, &(tcd->daddr));
467 edma_writew(edma, attr, &(tcd->attr));
468 edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
469 edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
470 edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
471 edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
472 edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
473 edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
474 edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
475 if (major_int)
476 csr |= EDMA_TCD_CSR_INT_MAJOR;
477
478 if (disable_req)
479 csr |= EDMA_TCD_CSR_D_REQ;
480
481 if (enable_sg)
482 csr |= EDMA_TCD_CSR_E_SG;
483
484 edma_writew(edma, csr, &(tcd->csr));
485}
486
487static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
488 int sg_len)
489{
490 struct fsl_edma_desc *fsl_desc;
491 int i;
492
493 fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
494 GFP_NOWAIT);
495 if (!fsl_desc)
496 return NULL;
497
498 fsl_desc->echan = fsl_chan;
499 fsl_desc->n_tcds = sg_len;
500 for (i = 0; i < sg_len; i++) {
501 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
502 GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
503 if (!fsl_desc->tcd[i].vtcd)
504 goto err;
505 }
506 return fsl_desc;
507
508err:
509 while (--i >= 0)
510 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
511 fsl_desc->tcd[i].ptcd);
512 kfree(fsl_desc);
513 return NULL;
514}
515
516static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
517 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
518 size_t period_len, enum dma_transfer_direction direction,
519 unsigned long flags, void *context)
520{
521 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
522 struct fsl_edma_desc *fsl_desc;
523 dma_addr_t dma_buf_next;
524 int sg_len, i;
525 u32 src_addr, dst_addr, last_sg, nbytes;
526 u16 soff, doff, iter;
527
528 if (!is_slave_direction(fsl_chan->fsc.dir))
529 return NULL;
530
531 sg_len = buf_len / period_len;
532 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
533 if (!fsl_desc)
534 return NULL;
535 fsl_desc->iscyclic = true;
536
537 dma_buf_next = dma_addr;
538 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
539 iter = period_len / nbytes;
540
541 for (i = 0; i < sg_len; i++) {
542 if (dma_buf_next >= dma_addr + buf_len)
543 dma_buf_next = dma_addr;
544
545 /* get next sg's physical address */
546 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
547
548 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
549 src_addr = dma_buf_next;
550 dst_addr = fsl_chan->fsc.dev_addr;
551 soff = fsl_chan->fsc.addr_width;
552 doff = 0;
553 } else {
554 src_addr = fsl_chan->fsc.dev_addr;
555 dst_addr = dma_buf_next;
556 soff = 0;
557 doff = fsl_chan->fsc.addr_width;
558 }
559
560 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
561 dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
562 iter, iter, doff, last_sg, true, false, true);
563 dma_buf_next += period_len;
564 }
565
566 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
567}
568
569static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
570 struct dma_chan *chan, struct scatterlist *sgl,
571 unsigned int sg_len, enum dma_transfer_direction direction,
572 unsigned long flags, void *context)
573{
574 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
575 struct fsl_edma_desc *fsl_desc;
576 struct scatterlist *sg;
577 u32 src_addr, dst_addr, last_sg, nbytes;
578 u16 soff, doff, iter;
579 int i;
580
581 if (!is_slave_direction(fsl_chan->fsc.dir))
582 return NULL;
583
584 fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
585 if (!fsl_desc)
586 return NULL;
587 fsl_desc->iscyclic = false;
588
589 nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
590 for_each_sg(sgl, sg, sg_len, i) {
591 /* get next sg's physical address */
592 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
593
594 if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
595 src_addr = sg_dma_address(sg);
596 dst_addr = fsl_chan->fsc.dev_addr;
597 soff = fsl_chan->fsc.addr_width;
598 doff = 0;
599 } else {
600 src_addr = fsl_chan->fsc.dev_addr;
601 dst_addr = sg_dma_address(sg);
602 soff = 0;
603 doff = fsl_chan->fsc.addr_width;
604 }
605
606 iter = sg_dma_len(sg) / nbytes;
607 if (i < sg_len - 1) {
608 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
609 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
610 src_addr, dst_addr, fsl_chan->fsc.attr,
611 soff, nbytes, 0, iter, iter, doff, last_sg,
612 false, false, true);
613 } else {
614 last_sg = 0;
615 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
616 src_addr, dst_addr, fsl_chan->fsc.attr,
617 soff, nbytes, 0, iter, iter, doff, last_sg,
618 true, true, false);
619 }
620 }
621
622 return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
623}
624
625static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
626{
627 struct fsl_edma_hw_tcd *tcd;
628 struct virt_dma_desc *vdesc;
629
630 vdesc = vchan_next_desc(&fsl_chan->vchan);
631 if (!vdesc)
632 return;
633 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
634 tcd = fsl_chan->edesc->tcd[0].vtcd;
635 fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
636 tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
637 tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
638 fsl_edma_enable_request(fsl_chan);
639 fsl_chan->status = DMA_IN_PROGRESS;
640}
641
642static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
643{
644 struct fsl_edma_engine *fsl_edma = dev_id;
645 unsigned int intr, ch;
646 void __iomem *base_addr;
647 struct fsl_edma_chan *fsl_chan;
648
649 base_addr = fsl_edma->membase;
650
651 intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
652 if (!intr)
653 return IRQ_NONE;
654
655 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
656 if (intr & (0x1 << ch)) {
657 edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
658 base_addr + EDMA_CINT);
659
660 fsl_chan = &fsl_edma->chans[ch];
661
662 spin_lock(&fsl_chan->vchan.lock);
663 if (!fsl_chan->edesc->iscyclic) {
664 list_del(&fsl_chan->edesc->vdesc.node);
665 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
666 fsl_chan->edesc = NULL;
667 fsl_chan->status = DMA_COMPLETE;
668 } else {
669 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
670 }
671
672 if (!fsl_chan->edesc)
673 fsl_edma_xfer_desc(fsl_chan);
674
675 spin_unlock(&fsl_chan->vchan.lock);
676 }
677 }
678 return IRQ_HANDLED;
679}
680
681static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
682{
683 struct fsl_edma_engine *fsl_edma = dev_id;
684 unsigned int err, ch;
685
686 err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
687 if (!err)
688 return IRQ_NONE;
689
690 for (ch = 0; ch < fsl_edma->n_chans; ch++) {
691 if (err & (0x1 << ch)) {
692 fsl_edma_disable_request(&fsl_edma->chans[ch]);
693 edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
694 fsl_edma->membase + EDMA_CERR);
695 fsl_edma->chans[ch].status = DMA_ERROR;
696 }
697 }
698 return IRQ_HANDLED;
699}
700
701static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
702{
703 if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
704 return IRQ_HANDLED;
705
706 return fsl_edma_err_handler(irq, dev_id);
707}
708
709static void fsl_edma_issue_pending(struct dma_chan *chan)
710{
711 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
712 unsigned long flags;
713
714 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
715
716 if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
717 fsl_edma_xfer_desc(fsl_chan);
718
719 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
720}
721
722static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
723 struct of_dma *ofdma)
724{
725 struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
726 struct dma_chan *chan;
727
728 if (dma_spec->args_count != 2)
729 return NULL;
730
731 mutex_lock(&fsl_edma->fsl_edma_mutex);
732 list_for_each_entry(chan, &fsl_edma->dma_dev.channels, device_node) {
733 if (chan->client_count)
734 continue;
735 if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
736 chan = dma_get_slave_channel(chan);
737 if (chan) {
738 chan->device->privatecnt++;
739 fsl_edma_chan_mux(to_fsl_edma_chan(chan),
740 dma_spec->args[1], true);
741 mutex_unlock(&fsl_edma->fsl_edma_mutex);
742 return chan;
743 }
744 }
745 }
746 mutex_unlock(&fsl_edma->fsl_edma_mutex);
747 return NULL;
748}
749
750static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
751{
752 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
753
754 fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
755 sizeof(struct fsl_edma_hw_tcd),
756 32, 0);
757 return 0;
758}
759
760static void fsl_edma_free_chan_resources(struct dma_chan *chan)
761{
762 struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
763 unsigned long flags;
764 LIST_HEAD(head);
765
766 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
767 fsl_edma_disable_request(fsl_chan);
768 fsl_edma_chan_mux(fsl_chan, 0, false);
769 fsl_chan->edesc = NULL;
770 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
771 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
772
773 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
774 dma_pool_destroy(fsl_chan->tcd_pool);
775 fsl_chan->tcd_pool = NULL;
776}
777
778static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
779 struct dma_slave_caps *caps)
780{
781 caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
782 caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
783 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
784 caps->cmd_pause = true;
785 caps->cmd_terminate = true;
786
787 return 0;
788}
789
790static int
791fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
792{
793 int ret;
794
795 fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
796 if (fsl_edma->txirq < 0) {
797 dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
798 return fsl_edma->txirq;
799 }
800
801 fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
802 if (fsl_edma->errirq < 0) {
803 dev_err(&pdev->dev, "Can't get edma-err irq.\n");
804 return fsl_edma->errirq;
805 }
806
807 if (fsl_edma->txirq == fsl_edma->errirq) {
808 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
809 fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
810 if (ret) {
811 dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
812 return ret;
813 }
814 } else {
815 ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
816 fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
817 if (ret) {
818 dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
819 return ret;
820 }
821
822 ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
823 fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
824 if (ret) {
825 dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
826 return ret;
827 }
828 }
829
830 return 0;
831}
832
833static int fsl_edma_probe(struct platform_device *pdev)
834{
835 struct device_node *np = pdev->dev.of_node;
836 struct fsl_edma_engine *fsl_edma;
837 struct fsl_edma_chan *fsl_chan;
838 struct resource *res;
839 int len, chans;
840 int ret, i;
841
842 ret = of_property_read_u32(np, "dma-channels", &chans);
843 if (ret) {
844 dev_err(&pdev->dev, "Can't get dma-channels.\n");
845 return ret;
846 }
847
848 len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
849 fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
850 if (!fsl_edma)
851 return -ENOMEM;
852
853 fsl_edma->n_chans = chans;
854 mutex_init(&fsl_edma->fsl_edma_mutex);
855
856 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
857 fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
858 if (IS_ERR(fsl_edma->membase))
859 return PTR_ERR(fsl_edma->membase);
860
861 for (i = 0; i < DMAMUX_NR; i++) {
862 char clkname[32];
863
864 res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
865 fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
866 if (IS_ERR(fsl_edma->muxbase[i]))
867 return PTR_ERR(fsl_edma->muxbase[i]);
868
869 sprintf(clkname, "dmamux%d", i);
870 fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
871 if (IS_ERR(fsl_edma->muxclk[i])) {
872 dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
873 return PTR_ERR(fsl_edma->muxclk[i]);
874 }
875
876 ret = clk_prepare_enable(fsl_edma->muxclk[i]);
877 if (ret) {
878 dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
879 return ret;
880 }
881
882 }
883
884 ret = fsl_edma_irq_init(pdev, fsl_edma);
885 if (ret)
886 return ret;
887
888 fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
889
890 INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
891 for (i = 0; i < fsl_edma->n_chans; i++) {
892 struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
893
894 fsl_chan->edma = fsl_edma;
895
896 fsl_chan->vchan.desc_free = fsl_edma_free_desc;
897 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
898
899 edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
900 fsl_edma_chan_mux(fsl_chan, 0, false);
901 }
902
903 dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
904 dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
905 dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
906
907 fsl_edma->dma_dev.dev = &pdev->dev;
908 fsl_edma->dma_dev.device_alloc_chan_resources
909 = fsl_edma_alloc_chan_resources;
910 fsl_edma->dma_dev.device_free_chan_resources
911 = fsl_edma_free_chan_resources;
912 fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
913 fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
914 fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
915 fsl_edma->dma_dev.device_control = fsl_edma_control;
916 fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
917 fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
918
919 platform_set_drvdata(pdev, fsl_edma);
920
921 ret = dma_async_device_register(&fsl_edma->dma_dev);
922 if (ret) {
923 dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
924 return ret;
925 }
926
927 ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
928 if (ret) {
929 dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
930 dma_async_device_unregister(&fsl_edma->dma_dev);
931 return ret;
932 }
933
934 /* enable round robin arbitration */
935 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
936
937 return 0;
938}
939
940static int fsl_edma_remove(struct platform_device *pdev)
941{
942 struct device_node *np = pdev->dev.of_node;
943 struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
944 int i;
945
946 of_dma_controller_free(np);
947 dma_async_device_unregister(&fsl_edma->dma_dev);
948
949 for (i = 0; i < DMAMUX_NR; i++)
950 clk_disable_unprepare(fsl_edma->muxclk[i]);
951
952 return 0;
953}
954
955static const struct of_device_id fsl_edma_dt_ids[] = {
956 { .compatible = "fsl,vf610-edma", },
957 { /* sentinel */ }
958};
959MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
960
961static struct platform_driver fsl_edma_driver = {
962 .driver = {
963 .name = "fsl-edma",
964 .owner = THIS_MODULE,
965 .of_match_table = fsl_edma_dt_ids,
966 },
967 .probe = fsl_edma_probe,
968 .remove = fsl_edma_remove,
969};
970
971module_platform_driver(fsl_edma_driver);
972
973MODULE_ALIAS("platform:fsl-edma");
974MODULE_DESCRIPTION("Freescale eDMA engine driver");
975MODULE_LICENSE("GPL v2");