diff options
author | Kevin Hilman <khilman@deeprootsystems.com> | 2009-04-14 08:18:14 -0400 |
---|---|---|
committer | Kevin Hilman <khilman@deeprootsystems.com> | 2009-04-27 12:49:42 -0400 |
commit | a4768d2275cb7c0d2a665b9ad4de07834be0714b (patch) | |
tree | d6c756b06e11d78f22d69fe275b37be2d10ad6a4 /arch | |
parent | e60990023cac11bc6185e7a7f1007fbbb8d30e4d (diff) |
davinci: add EDMA driver
Original code for 2.6.10 and 2.6.28 series done by Texas Instruments
and MontaVista, but major updates and rework done by Troy Kisky and
David Brownell.
Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Cc: Sudhakar Rajashekhara <sudhakar.raj@ti.com>
Cc: Troy Kisky <troy.kisky@boundarydevices.com>
Cc: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mach-davinci/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mach-davinci/dma.c | 1135 | ||||
-rw-r--r-- | arch/arm/mach-davinci/include/mach/edma.h | 228 |
3 files changed, 1364 insertions, 1 deletions
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile index 4dc458597f40..3ee3d73d9566 100644 --- a/arch/arm/mach-davinci/Makefile +++ b/arch/arm/mach-davinci/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | # Common objects | 6 | # Common objects |
7 | obj-y := time.o irq.o clock.o serial.o io.o id.o psc.o \ | 7 | obj-y := time.o irq.o clock.o serial.o io.o id.o psc.o \ |
8 | gpio.o mux.o devices.o usb.o | 8 | gpio.o mux.o devices.o dma.o usb.o |
9 | 9 | ||
10 | # Board specific | 10 | # Board specific |
11 | obj-$(CONFIG_MACH_DAVINCI_EVM) += board-evm.o | 11 | obj-$(CONFIG_MACH_DAVINCI_EVM) += board-evm.o |
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c new file mode 100644 index 000000000000..15e9eb158bb7 --- /dev/null +++ b/arch/arm/mach-davinci/dma.c | |||
@@ -0,0 +1,1135 @@ | |||
1 | /* | ||
2 | * EDMA3 support for DaVinci | ||
3 | * | ||
4 | * Copyright (C) 2006-2009 Texas Instruments. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/compiler.h> | ||
28 | #include <linux/io.h> | ||
29 | |||
30 | #include <mach/cputype.h> | ||
31 | #include <mach/memory.h> | ||
32 | #include <mach/hardware.h> | ||
33 | #include <mach/irqs.h> | ||
34 | #include <mach/edma.h> | ||
35 | #include <mach/mux.h> | ||
36 | |||
37 | |||
38 | /* Offsets matching "struct edmacc_param" */ | ||
39 | #define PARM_OPT 0x00 | ||
40 | #define PARM_SRC 0x04 | ||
41 | #define PARM_A_B_CNT 0x08 | ||
42 | #define PARM_DST 0x0c | ||
43 | #define PARM_SRC_DST_BIDX 0x10 | ||
44 | #define PARM_LINK_BCNTRLD 0x14 | ||
45 | #define PARM_SRC_DST_CIDX 0x18 | ||
46 | #define PARM_CCNT 0x1c | ||
47 | |||
48 | #define PARM_SIZE 0x20 | ||
49 | |||
50 | /* Offsets for EDMA CC global channel registers and their shadows */ | ||
51 | #define SH_ER 0x00 /* 64 bits */ | ||
52 | #define SH_ECR 0x08 /* 64 bits */ | ||
53 | #define SH_ESR 0x10 /* 64 bits */ | ||
54 | #define SH_CER 0x18 /* 64 bits */ | ||
55 | #define SH_EER 0x20 /* 64 bits */ | ||
56 | #define SH_EECR 0x28 /* 64 bits */ | ||
57 | #define SH_EESR 0x30 /* 64 bits */ | ||
58 | #define SH_SER 0x38 /* 64 bits */ | ||
59 | #define SH_SECR 0x40 /* 64 bits */ | ||
60 | #define SH_IER 0x50 /* 64 bits */ | ||
61 | #define SH_IECR 0x58 /* 64 bits */ | ||
62 | #define SH_IESR 0x60 /* 64 bits */ | ||
63 | #define SH_IPR 0x68 /* 64 bits */ | ||
64 | #define SH_ICR 0x70 /* 64 bits */ | ||
65 | #define SH_IEVAL 0x78 | ||
66 | #define SH_QER 0x80 | ||
67 | #define SH_QEER 0x84 | ||
68 | #define SH_QEECR 0x88 | ||
69 | #define SH_QEESR 0x8c | ||
70 | #define SH_QSER 0x90 | ||
71 | #define SH_QSECR 0x94 | ||
72 | #define SH_SIZE 0x200 | ||
73 | |||
74 | /* Offsets for EDMA CC global registers */ | ||
75 | #define EDMA_REV 0x0000 | ||
76 | #define EDMA_CCCFG 0x0004 | ||
77 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ | ||
78 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ | ||
79 | #define EDMA_QDMAQNUM 0x0260 | ||
80 | #define EDMA_QUETCMAP 0x0280 | ||
81 | #define EDMA_QUEPRI 0x0284 | ||
82 | #define EDMA_EMR 0x0300 /* 64 bits */ | ||
83 | #define EDMA_EMCR 0x0308 /* 64 bits */ | ||
84 | #define EDMA_QEMR 0x0310 | ||
85 | #define EDMA_QEMCR 0x0314 | ||
86 | #define EDMA_CCERR 0x0318 | ||
87 | #define EDMA_CCERRCLR 0x031c | ||
88 | #define EDMA_EEVAL 0x0320 | ||
89 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ | ||
90 | #define EDMA_QRAE 0x0380 /* 4 registers */ | ||
91 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ | ||
92 | #define EDMA_QSTAT 0x0600 /* 2 registers */ | ||
93 | #define EDMA_QWMTHRA 0x0620 | ||
94 | #define EDMA_QWMTHRB 0x0624 | ||
95 | #define EDMA_CCSTAT 0x0640 | ||
96 | |||
97 | #define EDMA_M 0x1000 /* global channel registers */ | ||
98 | #define EDMA_ECR 0x1008 | ||
99 | #define EDMA_ECRH 0x100C | ||
100 | #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */ | ||
101 | #define EDMA_PARM 0x4000 /* 128 param entries */ | ||
102 | |||
103 | #define DAVINCI_DMA_3PCC_BASE 0x01C00000 | ||
104 | |||
105 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | ||
106 | |||
107 | #define EDMA_MAX_DMACH 64 | ||
108 | #define EDMA_MAX_PARAMENTRY 512 | ||
109 | #define EDMA_MAX_EVQUE 2 /* FIXME too small */ | ||
110 | |||
111 | |||
112 | /*****************************************************************************/ | ||
113 | |||
114 | static void __iomem *edmacc_regs_base; | ||
115 | |||
116 | static inline unsigned int edma_read(int offset) | ||
117 | { | ||
118 | return (unsigned int)__raw_readl(edmacc_regs_base + offset); | ||
119 | } | ||
120 | |||
121 | static inline void edma_write(int offset, int val) | ||
122 | { | ||
123 | __raw_writel(val, edmacc_regs_base + offset); | ||
124 | } | ||
125 | static inline void edma_modify(int offset, unsigned and, unsigned or) | ||
126 | { | ||
127 | unsigned val = edma_read(offset); | ||
128 | val &= and; | ||
129 | val |= or; | ||
130 | edma_write(offset, val); | ||
131 | } | ||
132 | static inline void edma_and(int offset, unsigned and) | ||
133 | { | ||
134 | unsigned val = edma_read(offset); | ||
135 | val &= and; | ||
136 | edma_write(offset, val); | ||
137 | } | ||
138 | static inline void edma_or(int offset, unsigned or) | ||
139 | { | ||
140 | unsigned val = edma_read(offset); | ||
141 | val |= or; | ||
142 | edma_write(offset, val); | ||
143 | } | ||
144 | static inline unsigned int edma_read_array(int offset, int i) | ||
145 | { | ||
146 | return edma_read(offset + (i << 2)); | ||
147 | } | ||
148 | static inline void edma_write_array(int offset, int i, unsigned val) | ||
149 | { | ||
150 | edma_write(offset + (i << 2), val); | ||
151 | } | ||
152 | static inline void edma_modify_array(int offset, int i, | ||
153 | unsigned and, unsigned or) | ||
154 | { | ||
155 | edma_modify(offset + (i << 2), and, or); | ||
156 | } | ||
157 | static inline void edma_or_array(int offset, int i, unsigned or) | ||
158 | { | ||
159 | edma_or(offset + (i << 2), or); | ||
160 | } | ||
161 | static inline void edma_or_array2(int offset, int i, int j, unsigned or) | ||
162 | { | ||
163 | edma_or(offset + ((i*2 + j) << 2), or); | ||
164 | } | ||
165 | static inline void edma_write_array2(int offset, int i, int j, unsigned val) | ||
166 | { | ||
167 | edma_write(offset + ((i*2 + j) << 2), val); | ||
168 | } | ||
169 | static inline unsigned int edma_shadow0_read(int offset) | ||
170 | { | ||
171 | return edma_read(EDMA_SHADOW0 + offset); | ||
172 | } | ||
173 | static inline unsigned int edma_shadow0_read_array(int offset, int i) | ||
174 | { | ||
175 | return edma_read(EDMA_SHADOW0 + offset + (i << 2)); | ||
176 | } | ||
177 | static inline void edma_shadow0_write(int offset, unsigned val) | ||
178 | { | ||
179 | edma_write(EDMA_SHADOW0 + offset, val); | ||
180 | } | ||
181 | static inline void edma_shadow0_write_array(int offset, int i, unsigned val) | ||
182 | { | ||
183 | edma_write(EDMA_SHADOW0 + offset + (i << 2), val); | ||
184 | } | ||
185 | static inline unsigned int edma_parm_read(int offset, int param_no) | ||
186 | { | ||
187 | return edma_read(EDMA_PARM + offset + (param_no << 5)); | ||
188 | } | ||
189 | static inline void edma_parm_write(int offset, int param_no, unsigned val) | ||
190 | { | ||
191 | edma_write(EDMA_PARM + offset + (param_no << 5), val); | ||
192 | } | ||
193 | static inline void edma_parm_modify(int offset, int param_no, | ||
194 | unsigned and, unsigned or) | ||
195 | { | ||
196 | edma_modify(EDMA_PARM + offset + (param_no << 5), and, or); | ||
197 | } | ||
198 | static inline void edma_parm_and(int offset, int param_no, unsigned and) | ||
199 | { | ||
200 | edma_and(EDMA_PARM + offset + (param_no << 5), and); | ||
201 | } | ||
202 | static inline void edma_parm_or(int offset, int param_no, unsigned or) | ||
203 | { | ||
204 | edma_or(EDMA_PARM + offset + (param_no << 5), or); | ||
205 | } | ||
206 | |||
207 | /*****************************************************************************/ | ||
208 | |||
209 | /* actual number of DMA channels and slots on this silicon */ | ||
210 | static unsigned num_channels; | ||
211 | static unsigned num_slots; | ||
212 | |||
213 | static struct dma_interrupt_data { | ||
214 | void (*callback)(unsigned channel, unsigned short ch_status, | ||
215 | void *data); | ||
216 | void *data; | ||
217 | } intr_data[EDMA_MAX_DMACH]; | ||
218 | |||
219 | /* The edma_inuse bit for each PaRAM slot is clear unless the | ||
220 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. | ||
221 | */ | ||
222 | static DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY); | ||
223 | |||
224 | /* The edma_noevent bit for each channel is clear unless | ||
225 | * it doesn't trigger DMA events on this platform. It uses a | ||
226 | * bit of SOC-specific initialization code. | ||
227 | */ | ||
228 | static DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH); | ||
229 | |||
230 | /* dummy param set used to (re)initialize parameter RAM slots */ | ||
231 | static const struct edmacc_param dummy_paramset = { | ||
232 | .link_bcntrld = 0xffff, | ||
233 | .ccnt = 1, | ||
234 | }; | ||
235 | |||
236 | static const int __initconst | ||
237 | queue_tc_mapping[EDMA_MAX_EVQUE + 1][2] = { | ||
238 | /* {event queue no, TC no} */ | ||
239 | {0, 0}, | ||
240 | {1, 1}, | ||
241 | {-1, -1} | ||
242 | }; | ||
243 | |||
244 | static const int __initconst | ||
245 | queue_priority_mapping[EDMA_MAX_EVQUE + 1][2] = { | ||
246 | /* {event queue no, Priority} */ | ||
247 | {0, 3}, | ||
248 | {1, 7}, | ||
249 | {-1, -1} | ||
250 | }; | ||
251 | |||
252 | /*****************************************************************************/ | ||
253 | |||
254 | static void map_dmach_queue(unsigned ch_no, enum dma_event_q queue_no) | ||
255 | { | ||
256 | int bit = (ch_no & 0x7) * 4; | ||
257 | |||
258 | /* default to low priority queue */ | ||
259 | if (queue_no == EVENTQ_DEFAULT) | ||
260 | queue_no = EVENTQ_1; | ||
261 | |||
262 | queue_no &= 7; | ||
263 | edma_modify_array(EDMA_DMAQNUM, (ch_no >> 3), | ||
264 | ~(0x7 << bit), queue_no << bit); | ||
265 | } | ||
266 | |||
267 | static void __init map_queue_tc(int queue_no, int tc_no) | ||
268 | { | ||
269 | int bit = queue_no * 4; | ||
270 | edma_modify(EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit)); | ||
271 | } | ||
272 | |||
273 | static void __init assign_priority_to_queue(int queue_no, int priority) | ||
274 | { | ||
275 | int bit = queue_no * 4; | ||
276 | edma_modify(EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | setup_dma_interrupt(unsigned lch, | ||
281 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
282 | void *data) | ||
283 | { | ||
284 | if (!callback) { | ||
285 | edma_shadow0_write_array(SH_IECR, lch >> 5, | ||
286 | (1 << (lch & 0x1f))); | ||
287 | } | ||
288 | |||
289 | intr_data[lch].callback = callback; | ||
290 | intr_data[lch].data = data; | ||
291 | |||
292 | if (callback) { | ||
293 | edma_shadow0_write_array(SH_ICR, lch >> 5, | ||
294 | (1 << (lch & 0x1f))); | ||
295 | edma_shadow0_write_array(SH_IESR, lch >> 5, | ||
296 | (1 << (lch & 0x1f))); | ||
297 | } | ||
298 | } | ||
299 | |||
300 | /****************************************************************************** | ||
301 | * | ||
302 | * DMA interrupt handler | ||
303 | * | ||
304 | *****************************************************************************/ | ||
305 | static irqreturn_t dma_irq_handler(int irq, void *data) | ||
306 | { | ||
307 | int i; | ||
308 | unsigned int cnt = 0; | ||
309 | |||
310 | dev_dbg(data, "dma_irq_handler\n"); | ||
311 | |||
312 | if ((edma_shadow0_read_array(SH_IPR, 0) == 0) | ||
313 | && (edma_shadow0_read_array(SH_IPR, 1) == 0)) | ||
314 | return IRQ_NONE; | ||
315 | |||
316 | while (1) { | ||
317 | int j; | ||
318 | if (edma_shadow0_read_array(SH_IPR, 0)) | ||
319 | j = 0; | ||
320 | else if (edma_shadow0_read_array(SH_IPR, 1)) | ||
321 | j = 1; | ||
322 | else | ||
323 | break; | ||
324 | dev_dbg(data, "IPR%d %08x\n", j, | ||
325 | edma_shadow0_read_array(SH_IPR, j)); | ||
326 | for (i = 0; i < 32; i++) { | ||
327 | int k = (j << 5) + i; | ||
328 | if (edma_shadow0_read_array(SH_IPR, j) & (1 << i)) { | ||
329 | /* Clear the corresponding IPR bits */ | ||
330 | edma_shadow0_write_array(SH_ICR, j, (1 << i)); | ||
331 | if (intr_data[k].callback) { | ||
332 | intr_data[k].callback(k, DMA_COMPLETE, | ||
333 | intr_data[k].data); | ||
334 | } | ||
335 | } | ||
336 | } | ||
337 | cnt++; | ||
338 | if (cnt > 10) | ||
339 | break; | ||
340 | } | ||
341 | edma_shadow0_write(SH_IEVAL, 1); | ||
342 | return IRQ_HANDLED; | ||
343 | } | ||
344 | |||
345 | /****************************************************************************** | ||
346 | * | ||
347 | * DMA error interrupt handler | ||
348 | * | ||
349 | *****************************************************************************/ | ||
350 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | ||
351 | { | ||
352 | int i; | ||
353 | unsigned int cnt = 0; | ||
354 | |||
355 | dev_dbg(data, "dma_ccerr_handler\n"); | ||
356 | |||
357 | if ((edma_read_array(EDMA_EMR, 0) == 0) && | ||
358 | (edma_read_array(EDMA_EMR, 1) == 0) && | ||
359 | (edma_read(EDMA_QEMR) == 0) && (edma_read(EDMA_CCERR) == 0)) | ||
360 | return IRQ_NONE; | ||
361 | |||
362 | while (1) { | ||
363 | int j = -1; | ||
364 | if (edma_read_array(EDMA_EMR, 0)) | ||
365 | j = 0; | ||
366 | else if (edma_read_array(EDMA_EMR, 1)) | ||
367 | j = 1; | ||
368 | if (j >= 0) { | ||
369 | dev_dbg(data, "EMR%d %08x\n", j, | ||
370 | edma_read_array(EDMA_EMR, j)); | ||
371 | for (i = 0; i < 32; i++) { | ||
372 | int k = (j << 5) + i; | ||
373 | if (edma_read_array(EDMA_EMR, j) & (1 << i)) { | ||
374 | /* Clear the corresponding EMR bits */ | ||
375 | edma_write_array(EDMA_EMCR, j, 1 << i); | ||
376 | /* Clear any SER */ | ||
377 | edma_shadow0_write_array(SH_SECR, j, | ||
378 | (1 << i)); | ||
379 | if (intr_data[k].callback) { | ||
380 | intr_data[k].callback(k, | ||
381 | DMA_CC_ERROR, | ||
382 | intr_data | ||
383 | [k].data); | ||
384 | } | ||
385 | } | ||
386 | } | ||
387 | } else if (edma_read(EDMA_QEMR)) { | ||
388 | dev_dbg(data, "QEMR %02x\n", | ||
389 | edma_read(EDMA_QEMR)); | ||
390 | for (i = 0; i < 8; i++) { | ||
391 | if (edma_read(EDMA_QEMR) & (1 << i)) { | ||
392 | /* Clear the corresponding IPR bits */ | ||
393 | edma_write(EDMA_QEMCR, 1 << i); | ||
394 | edma_shadow0_write(SH_QSECR, (1 << i)); | ||
395 | |||
396 | /* NOTE: not reported!! */ | ||
397 | } | ||
398 | } | ||
399 | } else if (edma_read(EDMA_CCERR)) { | ||
400 | dev_dbg(data, "CCERR %08x\n", | ||
401 | edma_read(EDMA_CCERR)); | ||
402 | /* FIXME: CCERR.BIT(16) ignored! much better | ||
403 | * to just write CCERRCLR with CCERR value... | ||
404 | */ | ||
405 | for (i = 0; i < 8; i++) { | ||
406 | if (edma_read(EDMA_CCERR) & (1 << i)) { | ||
407 | /* Clear the corresponding IPR bits */ | ||
408 | edma_write(EDMA_CCERRCLR, 1 << i); | ||
409 | |||
410 | /* NOTE: not reported!! */ | ||
411 | } | ||
412 | } | ||
413 | } | ||
414 | if ((edma_read_array(EDMA_EMR, 0) == 0) | ||
415 | && (edma_read_array(EDMA_EMR, 1) == 0) | ||
416 | && (edma_read(EDMA_QEMR) == 0) | ||
417 | && (edma_read(EDMA_CCERR) == 0)) { | ||
418 | break; | ||
419 | } | ||
420 | cnt++; | ||
421 | if (cnt > 10) | ||
422 | break; | ||
423 | } | ||
424 | edma_write(EDMA_EEVAL, 1); | ||
425 | return IRQ_HANDLED; | ||
426 | } | ||
427 | |||
428 | /****************************************************************************** | ||
429 | * | ||
430 | * Transfer controller error interrupt handlers | ||
431 | * | ||
432 | *****************************************************************************/ | ||
433 | |||
434 | #define tc_errs_handled false /* disabled as long as they're NOPs */ | ||
435 | |||
436 | static irqreturn_t dma_tc0err_handler(int irq, void *data) | ||
437 | { | ||
438 | dev_dbg(data, "dma_tc0err_handler\n"); | ||
439 | return IRQ_HANDLED; | ||
440 | } | ||
441 | |||
442 | static irqreturn_t dma_tc1err_handler(int irq, void *data) | ||
443 | { | ||
444 | dev_dbg(data, "dma_tc1err_handler\n"); | ||
445 | return IRQ_HANDLED; | ||
446 | } | ||
447 | |||
448 | /*-----------------------------------------------------------------------*/ | ||
449 | |||
450 | /* Resource alloc/free: dma channels, parameter RAM slots */ | ||
451 | |||
452 | /** | ||
453 | * edma_alloc_channel - allocate DMA channel and paired parameter RAM | ||
454 | * @channel: specific channel to allocate; negative for "any unmapped channel" | ||
455 | * @callback: optional; to be issued on DMA completion or errors | ||
456 | * @data: passed to callback | ||
457 | * @eventq_no: an EVENTQ_* constant, used to choose which Transfer | ||
458 | * Controller (TC) executes requests using this channel. Use | ||
459 | * EVENTQ_DEFAULT unless you really need a high priority queue. | ||
460 | * | ||
461 | * This allocates a DMA channel and its associated parameter RAM slot. | ||
462 | * The parameter RAM is initialized to hold a dummy transfer. | ||
463 | * | ||
464 | * Normal use is to pass a specific channel number as @channel, to make | ||
465 | * use of hardware events mapped to that channel. When the channel will | ||
466 | * be used only for software triggering or event chaining, channels not | ||
467 | * mapped to hardware events (or mapped to unused events) are preferable. | ||
468 | * | ||
469 | * DMA transfers start from a channel using edma_start(), or by | ||
470 | * chaining. When the transfer described in that channel's parameter RAM | ||
471 | * slot completes, that slot's data may be reloaded through a link. | ||
472 | * | ||
473 | * DMA errors are only reported to the @callback associated with the | ||
474 | * channel driving that transfer, but transfer completion callbacks can | ||
475 | * be sent to another channel under control of the TCC field in | ||
476 | * the option word of the transfer's parameter RAM set. Drivers must not | ||
477 | * use DMA transfer completion callbacks for channels they did not allocate. | ||
478 | * (The same applies to TCC codes used in transfer chaining.) | ||
479 | * | ||
480 | * Returns the number of the channel, else negative errno. | ||
481 | */ | ||
482 | int edma_alloc_channel(int channel, | ||
483 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
484 | void *data, | ||
485 | enum dma_event_q eventq_no) | ||
486 | { | ||
487 | if (channel < 0) { | ||
488 | channel = 0; | ||
489 | for (;;) { | ||
490 | channel = find_next_bit(edma_noevent, | ||
491 | num_channels, channel); | ||
492 | if (channel == num_channels) | ||
493 | return -ENOMEM; | ||
494 | if (!test_and_set_bit(channel, edma_inuse)) | ||
495 | break; | ||
496 | channel++; | ||
497 | } | ||
498 | } else if (channel >= num_channels) { | ||
499 | return -EINVAL; | ||
500 | } else if (test_and_set_bit(channel, edma_inuse)) { | ||
501 | return -EBUSY; | ||
502 | } | ||
503 | |||
504 | /* ensure access through shadow region 0 */ | ||
505 | edma_or_array2(EDMA_DRAE, 0, channel >> 5, 1 << (channel & 0x1f)); | ||
506 | |||
507 | /* ensure no events are pending */ | ||
508 | edma_stop(channel); | ||
509 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel), | ||
510 | &dummy_paramset, PARM_SIZE); | ||
511 | |||
512 | if (callback) | ||
513 | setup_dma_interrupt(channel, callback, data); | ||
514 | |||
515 | map_dmach_queue(channel, eventq_no); | ||
516 | |||
517 | return channel; | ||
518 | } | ||
519 | EXPORT_SYMBOL(edma_alloc_channel); | ||
520 | |||
521 | |||
522 | /** | ||
523 | * edma_free_channel - deallocate DMA channel | ||
524 | * @channel: dma channel returned from edma_alloc_channel() | ||
525 | * | ||
526 | * This deallocates the DMA channel and associated parameter RAM slot | ||
527 | * allocated by edma_alloc_channel(). | ||
528 | * | ||
529 | * Callers are responsible for ensuring the channel is inactive, and | ||
530 | * will not be reactivated by linking, chaining, or software calls to | ||
531 | * edma_start(). | ||
532 | */ | ||
533 | void edma_free_channel(unsigned channel) | ||
534 | { | ||
535 | if (channel >= num_channels) | ||
536 | return; | ||
537 | |||
538 | setup_dma_interrupt(channel, NULL, NULL); | ||
539 | /* REVISIT should probably take out of shadow region 0 */ | ||
540 | |||
541 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(channel), | ||
542 | &dummy_paramset, PARM_SIZE); | ||
543 | clear_bit(channel, edma_inuse); | ||
544 | } | ||
545 | EXPORT_SYMBOL(edma_free_channel); | ||
546 | |||
547 | /** | ||
548 | * edma_alloc_slot - allocate DMA parameter RAM | ||
549 | * @slot: specific slot to allocate; negative for "any unused slot" | ||
550 | * | ||
551 | * This allocates a parameter RAM slot, initializing it to hold a | ||
552 | * dummy transfer. Slots allocated using this routine have not been | ||
553 | * mapped to a hardware DMA channel, and will normally be used by | ||
554 | * linking to them from a slot associated with a DMA channel. | ||
555 | * | ||
556 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific | ||
557 | * slots may be allocated on behalf of DSP firmware. | ||
558 | * | ||
559 | * Returns the number of the slot, else negative errno. | ||
560 | */ | ||
561 | int edma_alloc_slot(int slot) | ||
562 | { | ||
563 | if (slot < 0) { | ||
564 | slot = num_channels; | ||
565 | for (;;) { | ||
566 | slot = find_next_zero_bit(edma_inuse, | ||
567 | num_slots, slot); | ||
568 | if (slot == num_slots) | ||
569 | return -ENOMEM; | ||
570 | if (!test_and_set_bit(slot, edma_inuse)) | ||
571 | break; | ||
572 | } | ||
573 | } else if (slot < num_channels || slot >= num_slots) { | ||
574 | return -EINVAL; | ||
575 | } else if (test_and_set_bit(slot, edma_inuse)) { | ||
576 | return -EBUSY; | ||
577 | } | ||
578 | |||
579 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), | ||
580 | &dummy_paramset, PARM_SIZE); | ||
581 | |||
582 | return slot; | ||
583 | } | ||
584 | EXPORT_SYMBOL(edma_alloc_slot); | ||
585 | |||
586 | /** | ||
587 | * edma_free_slot - deallocate DMA parameter RAM | ||
588 | * @slot: parameter RAM slot returned from edma_alloc_slot() | ||
589 | * | ||
590 | * This deallocates the parameter RAM slot allocated by edma_alloc_slot(). | ||
591 | * Callers are responsible for ensuring the slot is inactive, and will | ||
592 | * not be activated. | ||
593 | */ | ||
594 | void edma_free_slot(unsigned slot) | ||
595 | { | ||
596 | if (slot < num_channels || slot >= num_slots) | ||
597 | return; | ||
598 | |||
599 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), | ||
600 | &dummy_paramset, PARM_SIZE); | ||
601 | clear_bit(slot, edma_inuse); | ||
602 | } | ||
603 | EXPORT_SYMBOL(edma_free_slot); | ||
604 | |||
605 | /*-----------------------------------------------------------------------*/ | ||
606 | |||
607 | /* Parameter RAM operations (i) -- read/write partial slots */ | ||
608 | |||
609 | /** | ||
610 | * edma_set_src - set initial DMA source address in parameter RAM slot | ||
611 | * @slot: parameter RAM slot being configured | ||
612 | * @src_port: physical address of source (memory, controller FIFO, etc) | ||
613 | * @addressMode: INCR, except in very rare cases | ||
614 | * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the | ||
615 | * width to use when addressing the fifo (e.g. W8BIT, W32BIT) | ||
616 | * | ||
617 | * Note that the source address is modified during the DMA transfer | ||
618 | * according to edma_set_src_index(). | ||
619 | */ | ||
620 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
621 | enum address_mode mode, enum fifo_width width) | ||
622 | { | ||
623 | if (slot < num_slots) { | ||
624 | unsigned int i = edma_parm_read(PARM_OPT, slot); | ||
625 | |||
626 | if (mode) { | ||
627 | /* set SAM and program FWID */ | ||
628 | i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8)); | ||
629 | } else { | ||
630 | /* clear SAM */ | ||
631 | i &= ~SAM; | ||
632 | } | ||
633 | edma_parm_write(PARM_OPT, slot, i); | ||
634 | |||
635 | /* set the source port address | ||
636 | in source register of param structure */ | ||
637 | edma_parm_write(PARM_SRC, slot, src_port); | ||
638 | } | ||
639 | } | ||
640 | EXPORT_SYMBOL(edma_set_src); | ||
641 | |||
642 | /** | ||
643 | * edma_set_dest - set initial DMA destination address in parameter RAM slot | ||
644 | * @slot: parameter RAM slot being configured | ||
645 | * @dest_port: physical address of destination (memory, controller FIFO, etc) | ||
646 | * @addressMode: INCR, except in very rare cases | ||
647 | * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the | ||
648 | * width to use when addressing the fifo (e.g. W8BIT, W32BIT) | ||
649 | * | ||
650 | * Note that the destination address is modified during the DMA transfer | ||
651 | * according to edma_set_dest_index(). | ||
652 | */ | ||
653 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
654 | enum address_mode mode, enum fifo_width width) | ||
655 | { | ||
656 | if (slot < num_slots) { | ||
657 | unsigned int i = edma_parm_read(PARM_OPT, slot); | ||
658 | |||
659 | if (mode) { | ||
660 | /* set DAM and program FWID */ | ||
661 | i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8)); | ||
662 | } else { | ||
663 | /* clear DAM */ | ||
664 | i &= ~DAM; | ||
665 | } | ||
666 | edma_parm_write(PARM_OPT, slot, i); | ||
667 | /* set the destination port address | ||
668 | in dest register of param structure */ | ||
669 | edma_parm_write(PARM_DST, slot, dest_port); | ||
670 | } | ||
671 | } | ||
672 | EXPORT_SYMBOL(edma_set_dest); | ||
673 | |||
674 | /** | ||
675 | * edma_get_position - returns the current transfer points | ||
676 | * @slot: parameter RAM slot being examined | ||
677 | * @src: pointer to source port position | ||
678 | * @dst: pointer to destination port position | ||
679 | * | ||
680 | * Returns current source and destination addresses for a particular | ||
681 | * parameter RAM slot. Its channel should not be active when this is called. | ||
682 | */ | ||
683 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst) | ||
684 | { | ||
685 | struct edmacc_param temp; | ||
686 | |||
687 | edma_read_slot(slot, &temp); | ||
688 | if (src != NULL) | ||
689 | *src = temp.src; | ||
690 | if (dst != NULL) | ||
691 | *dst = temp.dst; | ||
692 | } | ||
693 | EXPORT_SYMBOL(edma_get_position); | ||
694 | |||
695 | /** | ||
696 | * edma_set_src_index - configure DMA source address indexing | ||
697 | * @slot: parameter RAM slot being configured | ||
698 | * @src_bidx: byte offset between source arrays in a frame | ||
699 | * @src_cidx: byte offset between source frames in a block | ||
700 | * | ||
701 | * Offsets are specified to support either contiguous or discontiguous | ||
702 | * memory transfers, or repeated access to a hardware register, as needed. | ||
703 | * When accessing hardware registers, both offsets are normally zero. | ||
704 | */ | ||
705 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx) | ||
706 | { | ||
707 | if (slot < num_slots) { | ||
708 | edma_parm_modify(PARM_SRC_DST_BIDX, slot, | ||
709 | 0xffff0000, src_bidx); | ||
710 | edma_parm_modify(PARM_SRC_DST_CIDX, slot, | ||
711 | 0xffff0000, src_cidx); | ||
712 | } | ||
713 | } | ||
714 | EXPORT_SYMBOL(edma_set_src_index); | ||
715 | |||
716 | /** | ||
717 | * edma_set_dest_index - configure DMA destination address indexing | ||
718 | * @slot: parameter RAM slot being configured | ||
719 | * @dest_bidx: byte offset between destination arrays in a frame | ||
720 | * @dest_cidx: byte offset between destination frames in a block | ||
721 | * | ||
722 | * Offsets are specified to support either contiguous or discontiguous | ||
723 | * memory transfers, or repeated access to a hardware register, as needed. | ||
724 | * When accessing hardware registers, both offsets are normally zero. | ||
725 | */ | ||
726 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx) | ||
727 | { | ||
728 | if (slot < num_slots) { | ||
729 | edma_parm_modify(PARM_SRC_DST_BIDX, slot, | ||
730 | 0x0000ffff, dest_bidx << 16); | ||
731 | edma_parm_modify(PARM_SRC_DST_CIDX, slot, | ||
732 | 0x0000ffff, dest_cidx << 16); | ||
733 | } | ||
734 | } | ||
735 | EXPORT_SYMBOL(edma_set_dest_index); | ||
736 | |||
737 | /** | ||
738 | * edma_set_transfer_params - configure DMA transfer parameters | ||
739 | * @slot: parameter RAM slot being configured | ||
740 | * @acnt: how many bytes per array (at least one) | ||
741 | * @bcnt: how many arrays per frame (at least one) | ||
742 | * @ccnt: how many frames per block (at least one) | ||
743 | * @bcnt_rld: used only for A-Synchronized transfers; this specifies | ||
744 | * the value to reload into bcnt when it decrements to zero | ||
745 | * @sync_mode: ASYNC or ABSYNC | ||
746 | * | ||
747 | * See the EDMA3 documentation to understand how to configure and link | ||
748 | * transfers using the fields in PaRAM slots. If you are not doing it | ||
749 | * all at once with edma_write_slot(), you will use this routine | ||
750 | * plus two calls each for source and destination, setting the initial | ||
751 | * address and saying how to index that address. | ||
752 | * | ||
753 | * An example of an A-Synchronized transfer is a serial link using a | ||
754 | * single word shift register. In that case, @acnt would be equal to | ||
755 | * that word size; the serial controller issues a DMA synchronization | ||
756 | * event to transfer each word, and memory access by the DMA transfer | ||
757 | * controller will be word-at-a-time. | ||
758 | * | ||
759 | * An example of an AB-Synchronized transfer is a device using a FIFO. | ||
760 | * In that case, @acnt equals the FIFO width and @bcnt equals its depth. | ||
761 | * The controller with the FIFO issues DMA synchronization events when | ||
762 | * the FIFO threshold is reached, and the DMA transfer controller will | ||
763 | * transfer one frame to (or from) the FIFO. It will probably use | ||
764 | * efficient burst modes to access memory. | ||
765 | */ | ||
766 | void edma_set_transfer_params(unsigned slot, | ||
767 | u16 acnt, u16 bcnt, u16 ccnt, | ||
768 | u16 bcnt_rld, enum sync_dimension sync_mode) | ||
769 | { | ||
770 | if (slot < num_slots) { | ||
771 | edma_parm_modify(PARM_LINK_BCNTRLD, slot, | ||
772 | 0x0000ffff, bcnt_rld << 16); | ||
773 | if (sync_mode == ASYNC) | ||
774 | edma_parm_and(PARM_OPT, slot, ~SYNCDIM); | ||
775 | else | ||
776 | edma_parm_or(PARM_OPT, slot, SYNCDIM); | ||
777 | /* Set the acount, bcount, ccount registers */ | ||
778 | edma_parm_write(PARM_A_B_CNT, slot, (bcnt << 16) | acnt); | ||
779 | edma_parm_write(PARM_CCNT, slot, ccnt); | ||
780 | } | ||
781 | } | ||
782 | EXPORT_SYMBOL(edma_set_transfer_params); | ||
783 | |||
784 | /** | ||
785 | * edma_link - link one parameter RAM slot to another | ||
786 | * @from: parameter RAM slot originating the link | ||
787 | * @to: parameter RAM slot which is the link target | ||
788 | * | ||
789 | * The originating slot should not be part of any active DMA transfer. | ||
790 | */ | ||
791 | void edma_link(unsigned from, unsigned to) | ||
792 | { | ||
793 | if (from >= num_slots) | ||
794 | return; | ||
795 | if (to >= num_slots) | ||
796 | return; | ||
797 | edma_parm_modify(PARM_LINK_BCNTRLD, from, 0xffff0000, PARM_OFFSET(to)); | ||
798 | } | ||
799 | EXPORT_SYMBOL(edma_link); | ||
800 | |||
801 | /** | ||
802 | * edma_unlink - cut link from one parameter RAM slot | ||
803 | * @from: parameter RAM slot originating the link | ||
804 | * | ||
805 | * The originating slot should not be part of any active DMA transfer. | ||
806 | * Its link is set to 0xffff. | ||
807 | */ | ||
808 | void edma_unlink(unsigned from) | ||
809 | { | ||
810 | if (from >= num_slots) | ||
811 | return; | ||
812 | edma_parm_or(PARM_LINK_BCNTRLD, from, 0xffff); | ||
813 | } | ||
814 | EXPORT_SYMBOL(edma_unlink); | ||
815 | |||
816 | /*-----------------------------------------------------------------------*/ | ||
817 | |||
818 | /* Parameter RAM operations (ii) -- read/write whole parameter sets */ | ||
819 | |||
820 | /** | ||
821 | * edma_write_slot - write parameter RAM data for slot | ||
822 | * @slot: number of parameter RAM slot being modified | ||
823 | * @param: data to be written into parameter RAM slot | ||
824 | * | ||
825 | * Use this to assign all parameters of a transfer at once. This | ||
826 | * allows more efficient setup of transfers than issuing multiple | ||
827 | * calls to set up those parameters in small pieces, and provides | ||
828 | * complete control over all transfer options. | ||
829 | */ | ||
830 | void edma_write_slot(unsigned slot, const struct edmacc_param *param) | ||
831 | { | ||
832 | if (slot >= num_slots) | ||
833 | return; | ||
834 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(slot), param, PARM_SIZE); | ||
835 | } | ||
836 | EXPORT_SYMBOL(edma_write_slot); | ||
837 | |||
838 | /** | ||
839 | * edma_read_slot - read parameter RAM data from slot | ||
840 | * @slot: number of parameter RAM slot being copied | ||
841 | * @param: where to store copy of parameter RAM data | ||
842 | * | ||
843 | * Use this to read data from a parameter RAM slot, perhaps to | ||
844 | * save them as a template for later reuse. | ||
845 | */ | ||
846 | void edma_read_slot(unsigned slot, struct edmacc_param *param) | ||
847 | { | ||
848 | if (slot >= num_slots) | ||
849 | return; | ||
850 | memcpy_fromio(param, edmacc_regs_base + PARM_OFFSET(slot), PARM_SIZE); | ||
851 | } | ||
852 | EXPORT_SYMBOL(edma_read_slot); | ||
853 | |||
854 | /*-----------------------------------------------------------------------*/ | ||
855 | |||
856 | /* Various EDMA channel control operations */ | ||
857 | |||
858 | /** | ||
859 | * edma_pause - pause dma on a channel | ||
860 | * @channel: on which edma_start() has been called | ||
861 | * | ||
862 | * This temporarily disables EDMA hardware events on the specified channel, | ||
863 | * preventing them from triggering new transfers on its behalf | ||
864 | */ | ||
865 | void edma_pause(unsigned channel) | ||
866 | { | ||
867 | if (channel < num_channels) { | ||
868 | unsigned int mask = (1 << (channel & 0x1f)); | ||
869 | |||
870 | edma_shadow0_write_array(SH_EECR, channel >> 5, mask); | ||
871 | } | ||
872 | } | ||
873 | EXPORT_SYMBOL(edma_pause); | ||
874 | |||
875 | /** | ||
876 | * edma_resume - resumes dma on a paused channel | ||
877 | * @channel: on which edma_pause() has been called | ||
878 | * | ||
879 | * This re-enables EDMA hardware events on the specified channel. | ||
880 | */ | ||
881 | void edma_resume(unsigned channel) | ||
882 | { | ||
883 | if (channel < num_channels) { | ||
884 | unsigned int mask = (1 << (channel & 0x1f)); | ||
885 | |||
886 | edma_shadow0_write_array(SH_EESR, channel >> 5, mask); | ||
887 | } | ||
888 | } | ||
889 | EXPORT_SYMBOL(edma_resume); | ||
890 | |||
891 | /** | ||
892 | * edma_start - start dma on a channel | ||
893 | * @channel: channel being activated | ||
894 | * | ||
895 | * Channels with event associations will be triggered by their hardware | ||
896 | * events, and channels without such associations will be triggered by | ||
897 | * software. (At this writing there is no interface for using software | ||
898 | * triggers except with channels that don't support hardware triggers.) | ||
899 | * | ||
900 | * Returns zero on success, else negative errno. | ||
901 | */ | ||
902 | int edma_start(unsigned channel) | ||
903 | { | ||
904 | if (channel < num_channels) { | ||
905 | int j = channel >> 5; | ||
906 | unsigned int mask = (1 << (channel & 0x1f)); | ||
907 | |||
908 | /* EDMA channels without event association */ | ||
909 | if (test_bit(channel, edma_noevent)) { | ||
910 | pr_debug("EDMA: ESR%d %08x\n", j, | ||
911 | edma_shadow0_read_array(SH_ESR, j)); | ||
912 | edma_shadow0_write_array(SH_ESR, j, mask); | ||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | /* EDMA channel with event association */ | ||
917 | pr_debug("EDMA: ER%d %08x\n", j, | ||
918 | edma_shadow0_read_array(SH_ER, j)); | ||
919 | /* Clear any pending error */ | ||
920 | edma_write_array(EDMA_EMCR, j, mask); | ||
921 | /* Clear any SER */ | ||
922 | edma_shadow0_write_array(SH_SECR, j, mask); | ||
923 | edma_shadow0_write_array(SH_EESR, j, mask); | ||
924 | pr_debug("EDMA: EER%d %08x\n", j, | ||
925 | edma_shadow0_read_array(SH_EER, j)); | ||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | return -EINVAL; | ||
930 | } | ||
931 | EXPORT_SYMBOL(edma_start); | ||
932 | |||
933 | /** | ||
934 | * edma_stop - stops dma on the channel passed | ||
935 | * @channel: channel being deactivated | ||
936 | * | ||
937 | * When @lch is a channel, any active transfer is paused and | ||
938 | * all pending hardware events are cleared. The current transfer | ||
939 | * may not be resumed, and the channel's Parameter RAM should be | ||
940 | * reinitialized before being reused. | ||
941 | */ | ||
942 | void edma_stop(unsigned channel) | ||
943 | { | ||
944 | if (channel < num_channels) { | ||
945 | int j = channel >> 5; | ||
946 | unsigned int mask = (1 << (channel & 0x1f)); | ||
947 | |||
948 | edma_shadow0_write_array(SH_EECR, j, mask); | ||
949 | edma_shadow0_write_array(SH_ECR, j, mask); | ||
950 | edma_shadow0_write_array(SH_SECR, j, mask); | ||
951 | edma_write_array(EDMA_EMCR, j, mask); | ||
952 | |||
953 | pr_debug("EDMA: EER%d %08x\n", j, | ||
954 | edma_shadow0_read_array(SH_EER, j)); | ||
955 | |||
956 | /* REVISIT: consider guarding against inappropriate event | ||
957 | * chaining by overwriting with dummy_paramset. | ||
958 | */ | ||
959 | } | ||
960 | } | ||
961 | EXPORT_SYMBOL(edma_stop); | ||
962 | |||
963 | /****************************************************************************** | ||
964 | * | ||
965 | * It cleans ParamEntry qand bring back EDMA to initial state if media has | ||
966 | * been removed before EDMA has finished.It is usedful for removable media. | ||
967 | * Arguments: | ||
968 | * ch_no - channel no | ||
969 | * | ||
970 | * Return: zero on success, or corresponding error no on failure | ||
971 | * | ||
972 | * FIXME this should not be needed ... edma_stop() should suffice. | ||
973 | * | ||
974 | *****************************************************************************/ | ||
975 | |||
976 | void edma_clean_channel(unsigned channel) | ||
977 | { | ||
978 | if (channel < num_channels) { | ||
979 | int j = (channel >> 5); | ||
980 | unsigned int mask = 1 << (channel & 0x1f); | ||
981 | |||
982 | pr_debug("EDMA: EMR%d %08x\n", j, | ||
983 | edma_read_array(EDMA_EMR, j)); | ||
984 | edma_shadow0_write_array(SH_ECR, j, mask); | ||
985 | /* Clear the corresponding EMR bits */ | ||
986 | edma_write_array(EDMA_EMCR, j, mask); | ||
987 | /* Clear any SER */ | ||
988 | edma_shadow0_write_array(SH_SECR, j, mask); | ||
989 | edma_write(EDMA_CCERRCLR, (1 << 16) | 0x3); | ||
990 | } | ||
991 | } | ||
992 | EXPORT_SYMBOL(edma_clean_channel); | ||
993 | |||
994 | /* | ||
995 | * edma_clear_event - clear an outstanding event on the DMA channel | ||
996 | * Arguments: | ||
997 | * channel - channel number | ||
998 | */ | ||
999 | void edma_clear_event(unsigned channel) | ||
1000 | { | ||
1001 | if (channel >= num_channels) | ||
1002 | return; | ||
1003 | if (channel < 32) | ||
1004 | edma_write(EDMA_ECR, 1 << channel); | ||
1005 | else | ||
1006 | edma_write(EDMA_ECRH, 1 << (channel - 32)); | ||
1007 | } | ||
1008 | EXPORT_SYMBOL(edma_clear_event); | ||
1009 | |||
1010 | /*-----------------------------------------------------------------------*/ | ||
1011 | |||
1012 | static int __init edma_probe(struct platform_device *pdev) | ||
1013 | { | ||
1014 | struct edma_soc_info *info = pdev->dev.platform_data; | ||
1015 | int i; | ||
1016 | int status; | ||
1017 | const s8 *noevent; | ||
1018 | int irq = 0, err_irq = 0; | ||
1019 | struct resource *r; | ||
1020 | resource_size_t len; | ||
1021 | |||
1022 | if (!info) | ||
1023 | return -ENODEV; | ||
1024 | |||
1025 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma_cc"); | ||
1026 | if (!r) | ||
1027 | return -ENODEV; | ||
1028 | |||
1029 | len = r->end - r->start + 1; | ||
1030 | |||
1031 | r = request_mem_region(r->start, len, r->name); | ||
1032 | if (!r) | ||
1033 | return -EBUSY; | ||
1034 | |||
1035 | edmacc_regs_base = ioremap(r->start, len); | ||
1036 | if (!edmacc_regs_base) { | ||
1037 | status = -EBUSY; | ||
1038 | goto fail1; | ||
1039 | } | ||
1040 | |||
1041 | num_channels = min_t(unsigned, info->n_channel, EDMA_MAX_DMACH); | ||
1042 | num_slots = min_t(unsigned, info->n_slot, EDMA_MAX_PARAMENTRY); | ||
1043 | |||
1044 | dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base); | ||
1045 | |||
1046 | for (i = 0; i < num_slots; i++) | ||
1047 | memcpy_toio(edmacc_regs_base + PARM_OFFSET(i), | ||
1048 | &dummy_paramset, PARM_SIZE); | ||
1049 | |||
1050 | noevent = info->noevent; | ||
1051 | if (noevent) { | ||
1052 | while (*noevent != -1) | ||
1053 | set_bit(*noevent++, edma_noevent); | ||
1054 | } | ||
1055 | |||
1056 | irq = platform_get_irq(pdev, 0); | ||
1057 | status = request_irq(irq, dma_irq_handler, 0, "edma", &pdev->dev); | ||
1058 | if (status < 0) { | ||
1059 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1060 | irq, status); | ||
1061 | goto fail; | ||
1062 | } | ||
1063 | |||
1064 | err_irq = platform_get_irq(pdev, 1); | ||
1065 | status = request_irq(err_irq, dma_ccerr_handler, 0, | ||
1066 | "edma_error", &pdev->dev); | ||
1067 | if (status < 0) { | ||
1068 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1069 | err_irq, status); | ||
1070 | goto fail; | ||
1071 | } | ||
1072 | |||
1073 | if (tc_errs_handled) { | ||
1074 | status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0, | ||
1075 | "edma_tc0", &pdev->dev); | ||
1076 | if (status < 0) { | ||
1077 | dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n", | ||
1078 | IRQ_TCERRINT0, status); | ||
1079 | return status; | ||
1080 | } | ||
1081 | status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0, | ||
1082 | "edma_tc1", &pdev->dev); | ||
1083 | if (status < 0) { | ||
1084 | dev_dbg(&pdev->dev, "request_irq %d --> %d\n", | ||
1085 | IRQ_TCERRINT, status); | ||
1086 | return status; | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | /* Everything lives on transfer controller 1 until otherwise specified. | ||
1091 | * This way, long transfers on the low priority queue | ||
1092 | * started by the codec engine will not cause audio defects. | ||
1093 | */ | ||
1094 | for (i = 0; i < num_channels; i++) | ||
1095 | map_dmach_queue(i, EVENTQ_1); | ||
1096 | |||
1097 | /* Event queue to TC mapping */ | ||
1098 | for (i = 0; queue_tc_mapping[i][0] != -1; i++) | ||
1099 | map_queue_tc(queue_tc_mapping[i][0], queue_tc_mapping[i][1]); | ||
1100 | |||
1101 | /* Event queue priority mapping */ | ||
1102 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | ||
1103 | assign_priority_to_queue(queue_priority_mapping[i][0], | ||
1104 | queue_priority_mapping[i][1]); | ||
1105 | |||
1106 | for (i = 0; i < info->n_region; i++) { | ||
1107 | edma_write_array2(EDMA_DRAE, i, 0, 0x0); | ||
1108 | edma_write_array2(EDMA_DRAE, i, 1, 0x0); | ||
1109 | edma_write_array(EDMA_QRAE, i, 0x0); | ||
1110 | } | ||
1111 | |||
1112 | return 0; | ||
1113 | |||
1114 | fail: | ||
1115 | if (err_irq) | ||
1116 | free_irq(err_irq, NULL); | ||
1117 | if (irq) | ||
1118 | free_irq(irq, NULL); | ||
1119 | iounmap(edmacc_regs_base); | ||
1120 | fail1: | ||
1121 | release_mem_region(r->start, len); | ||
1122 | return status; | ||
1123 | } | ||
1124 | |||
1125 | |||
1126 | static struct platform_driver edma_driver = { | ||
1127 | .driver.name = "edma", | ||
1128 | }; | ||
1129 | |||
1130 | static int __init edma_init(void) | ||
1131 | { | ||
1132 | return platform_driver_probe(&edma_driver, edma_probe); | ||
1133 | } | ||
1134 | arch_initcall(edma_init); | ||
1135 | |||
diff --git a/arch/arm/mach-davinci/include/mach/edma.h b/arch/arm/mach-davinci/include/mach/edma.h new file mode 100644 index 000000000000..f6fc5396dafc --- /dev/null +++ b/arch/arm/mach-davinci/include/mach/edma.h | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * TI DAVINCI dma definitions | ||
3 | * | ||
4 | * Copyright (C) 2006-2009 Texas Instruments. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
12 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
13 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
14 | * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
15 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
16 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
17 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
18 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
19 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
20 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License along | ||
23 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * This EDMA3 programming framework exposes two basic kinds of resource: | ||
30 | * | ||
31 | * Channel Triggers transfers, usually from a hardware event but | ||
32 | * also manually or by "chaining" from DMA completions. | ||
33 | * Each channel is coupled to a Parameter RAM (PaRAM) slot. | ||
34 | * | ||
35 | * Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM | ||
36 | * "set"), source and destination addresses, a link to a | ||
37 | * next PaRAM slot (if any), options for the transfer, and | ||
38 | * instructions for updating those addresses. There are | ||
39 | * more than twice as many slots as event channels. | ||
40 | * | ||
41 | * Each PaRAM set describes a sequence of transfers, either for one large | ||
42 | * buffer or for several discontiguous smaller buffers. An EDMA transfer | ||
43 | * is driven only from a channel, which performs the transfers specified | ||
44 | * in its PaRAM slot until there are no more transfers. When that last | ||
45 | * transfer completes, the "link" field may be used to reload the channel's | ||
46 | * PaRAM slot with a new transfer descriptor. | ||
47 | * | ||
48 | * The EDMA Channel Controller (CC) maps requests from channels into physical | ||
49 | * Transfer Controller (TC) requests when the channel triggers (by hardware | ||
50 | * or software events, or by chaining). The two physical DMA channels provided | ||
51 | * by the TCs are thus shared by many logical channels. | ||
52 | * | ||
53 | * DaVinci hardware also has a "QDMA" mechanism which is not currently | ||
54 | * supported through this interface. (DSP firmware uses it though.) | ||
55 | */ | ||
56 | |||
57 | #ifndef EDMA_H_ | ||
58 | #define EDMA_H_ | ||
59 | |||
60 | /* PaRAM slots are laid out like this */ | ||
61 | struct edmacc_param { | ||
62 | unsigned int opt; | ||
63 | unsigned int src; | ||
64 | unsigned int a_b_cnt; | ||
65 | unsigned int dst; | ||
66 | unsigned int src_dst_bidx; | ||
67 | unsigned int link_bcntrld; | ||
68 | unsigned int src_dst_cidx; | ||
69 | unsigned int ccnt; | ||
70 | }; | ||
71 | |||
72 | #define CCINT0_INTERRUPT 16 | ||
73 | #define CCERRINT_INTERRUPT 17 | ||
74 | #define TCERRINT0_INTERRUPT 18 | ||
75 | #define TCERRINT1_INTERRUPT 19 | ||
76 | |||
77 | /* fields in edmacc_param.opt */ | ||
78 | #define SAM BIT(0) | ||
79 | #define DAM BIT(1) | ||
80 | #define SYNCDIM BIT(2) | ||
81 | #define STATIC BIT(3) | ||
82 | #define EDMA_FWID (0x07 << 8) | ||
83 | #define TCCMODE BIT(11) | ||
84 | #define EDMA_TCC(t) ((t) << 12) | ||
85 | #define TCINTEN BIT(20) | ||
86 | #define ITCINTEN BIT(21) | ||
87 | #define TCCHEN BIT(22) | ||
88 | #define ITCCHEN BIT(23) | ||
89 | |||
90 | #define TRWORD (0x7<<2) | ||
91 | #define PAENTRY (0x1ff<<5) | ||
92 | |||
93 | /* Drivers should avoid using these symbolic names for dm644x | ||
94 | * channels, and use platform_device IORESOURCE_DMA resources | ||
95 | * instead. (Other DaVinci chips have different peripherals | ||
96 | * and thus have different DMA channel mappings.) | ||
97 | */ | ||
98 | #define DAVINCI_DMA_MCBSP_TX 2 | ||
99 | #define DAVINCI_DMA_MCBSP_RX 3 | ||
100 | #define DAVINCI_DMA_VPSS_HIST 4 | ||
101 | #define DAVINCI_DMA_VPSS_H3A 5 | ||
102 | #define DAVINCI_DMA_VPSS_PRVU 6 | ||
103 | #define DAVINCI_DMA_VPSS_RSZ 7 | ||
104 | #define DAVINCI_DMA_IMCOP_IMXINT 8 | ||
105 | #define DAVINCI_DMA_IMCOP_VLCDINT 9 | ||
106 | #define DAVINCI_DMA_IMCO_PASQINT 10 | ||
107 | #define DAVINCI_DMA_IMCOP_DSQINT 11 | ||
108 | #define DAVINCI_DMA_SPI_SPIX 16 | ||
109 | #define DAVINCI_DMA_SPI_SPIR 17 | ||
110 | #define DAVINCI_DMA_UART0_URXEVT0 18 | ||
111 | #define DAVINCI_DMA_UART0_UTXEVT0 19 | ||
112 | #define DAVINCI_DMA_UART1_URXEVT1 20 | ||
113 | #define DAVINCI_DMA_UART1_UTXEVT1 21 | ||
114 | #define DAVINCI_DMA_UART2_URXEVT2 22 | ||
115 | #define DAVINCI_DMA_UART2_UTXEVT2 23 | ||
116 | #define DAVINCI_DMA_MEMSTK_MSEVT 24 | ||
117 | #define DAVINCI_DMA_MMCRXEVT 26 | ||
118 | #define DAVINCI_DMA_MMCTXEVT 27 | ||
119 | #define DAVINCI_DMA_I2C_ICREVT 28 | ||
120 | #define DAVINCI_DMA_I2C_ICXEVT 29 | ||
121 | #define DAVINCI_DMA_GPIO_GPINT0 32 | ||
122 | #define DAVINCI_DMA_GPIO_GPINT1 33 | ||
123 | #define DAVINCI_DMA_GPIO_GPINT2 34 | ||
124 | #define DAVINCI_DMA_GPIO_GPINT3 35 | ||
125 | #define DAVINCI_DMA_GPIO_GPINT4 36 | ||
126 | #define DAVINCI_DMA_GPIO_GPINT5 37 | ||
127 | #define DAVINCI_DMA_GPIO_GPINT6 38 | ||
128 | #define DAVINCI_DMA_GPIO_GPINT7 39 | ||
129 | #define DAVINCI_DMA_GPIO_GPBNKINT0 40 | ||
130 | #define DAVINCI_DMA_GPIO_GPBNKINT1 41 | ||
131 | #define DAVINCI_DMA_GPIO_GPBNKINT2 42 | ||
132 | #define DAVINCI_DMA_GPIO_GPBNKINT3 43 | ||
133 | #define DAVINCI_DMA_GPIO_GPBNKINT4 44 | ||
134 | #define DAVINCI_DMA_TIMER0_TINT0 48 | ||
135 | #define DAVINCI_DMA_TIMER1_TINT1 49 | ||
136 | #define DAVINCI_DMA_TIMER2_TINT2 50 | ||
137 | #define DAVINCI_DMA_TIMER3_TINT3 51 | ||
138 | #define DAVINCI_DMA_PWM0 52 | ||
139 | #define DAVINCI_DMA_PWM1 53 | ||
140 | #define DAVINCI_DMA_PWM2 54 | ||
141 | |||
142 | /*ch_status paramater of callback function possible values*/ | ||
143 | #define DMA_COMPLETE 1 | ||
144 | #define DMA_CC_ERROR 2 | ||
145 | #define DMA_TC1_ERROR 3 | ||
146 | #define DMA_TC2_ERROR 4 | ||
147 | |||
148 | enum address_mode { | ||
149 | INCR = 0, | ||
150 | FIFO = 1 | ||
151 | }; | ||
152 | |||
153 | enum fifo_width { | ||
154 | W8BIT = 0, | ||
155 | W16BIT = 1, | ||
156 | W32BIT = 2, | ||
157 | W64BIT = 3, | ||
158 | W128BIT = 4, | ||
159 | W256BIT = 5 | ||
160 | }; | ||
161 | |||
162 | enum dma_event_q { | ||
163 | EVENTQ_0 = 0, | ||
164 | EVENTQ_1 = 1, | ||
165 | EVENTQ_DEFAULT = -1 | ||
166 | }; | ||
167 | |||
168 | enum sync_dimension { | ||
169 | ASYNC = 0, | ||
170 | ABSYNC = 1 | ||
171 | }; | ||
172 | |||
173 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ | ||
174 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | ||
175 | |||
176 | /* alloc/free DMA channels and their dedicated parameter RAM slots */ | ||
177 | int edma_alloc_channel(int channel, | ||
178 | void (*callback)(unsigned channel, u16 ch_status, void *data), | ||
179 | void *data, enum dma_event_q); | ||
180 | void edma_free_channel(unsigned channel); | ||
181 | |||
182 | /* alloc/free parameter RAM slots */ | ||
183 | int edma_alloc_slot(int slot); | ||
184 | void edma_free_slot(unsigned slot); | ||
185 | |||
186 | /* calls that operate on part of a parameter RAM slot */ | ||
187 | void edma_set_src(unsigned slot, dma_addr_t src_port, | ||
188 | enum address_mode mode, enum fifo_width); | ||
189 | void edma_set_dest(unsigned slot, dma_addr_t dest_port, | ||
190 | enum address_mode mode, enum fifo_width); | ||
191 | void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst); | ||
192 | void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx); | ||
193 | void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx); | ||
194 | void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt, | ||
195 | u16 bcnt_rld, enum sync_dimension sync_mode); | ||
196 | void edma_link(unsigned from, unsigned to); | ||
197 | void edma_unlink(unsigned from); | ||
198 | |||
199 | /* calls that operate on an entire parameter RAM slot */ | ||
200 | void edma_write_slot(unsigned slot, const struct edmacc_param *params); | ||
201 | void edma_read_slot(unsigned slot, struct edmacc_param *params); | ||
202 | |||
203 | /* channel control operations */ | ||
204 | int edma_start(unsigned channel); | ||
205 | void edma_stop(unsigned channel); | ||
206 | void edma_clean_channel(unsigned channel); | ||
207 | void edma_clear_event(unsigned channel); | ||
208 | void edma_pause(unsigned channel); | ||
209 | void edma_resume(unsigned channel); | ||
210 | |||
211 | /* UNRELATED TO DMA */ | ||
212 | int davinci_alloc_iram(unsigned size); | ||
213 | void davinci_free_iram(unsigned addr, unsigned size); | ||
214 | |||
215 | /* platform_data for EDMA driver */ | ||
216 | struct edma_soc_info { | ||
217 | |||
218 | /* how many dma resources of each type */ | ||
219 | unsigned n_channel; | ||
220 | unsigned n_region; | ||
221 | unsigned n_slot; | ||
222 | unsigned n_tc; | ||
223 | |||
224 | /* list of channels with no even trigger; terminated by "-1" */ | ||
225 | const s8 *noevent; | ||
226 | }; | ||
227 | |||
228 | #endif | ||