aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common/edma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common/edma.c')
-rw-r--r--arch/arm/common/edma.c1554
1 files changed, 1554 insertions, 0 deletions
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
new file mode 100644
index 000000000000..a1db6cd8cf79
--- /dev/null
+++ b/arch/arm/common/edma.c
@@ -0,0 +1,1554 @@
1/*
2 * EDMA3 support for DaVinci
3 *
4 * Copyright (C) 2006-2009 Texas Instruments.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/platform_device.h>
25#include <linux/io.h>
26#include <linux/slab.h>
27
28#include <linux/platform_data/edma.h>
29
30/* Offsets matching "struct edmacc_param" */
31#define PARM_OPT 0x00
32#define PARM_SRC 0x04
33#define PARM_A_B_CNT 0x08
34#define PARM_DST 0x0c
35#define PARM_SRC_DST_BIDX 0x10
36#define PARM_LINK_BCNTRLD 0x14
37#define PARM_SRC_DST_CIDX 0x18
38#define PARM_CCNT 0x1c
39
40#define PARM_SIZE 0x20
41
42/* Offsets for EDMA CC global channel registers and their shadows */
43#define SH_ER 0x00 /* 64 bits */
44#define SH_ECR 0x08 /* 64 bits */
45#define SH_ESR 0x10 /* 64 bits */
46#define SH_CER 0x18 /* 64 bits */
47#define SH_EER 0x20 /* 64 bits */
48#define SH_EECR 0x28 /* 64 bits */
49#define SH_EESR 0x30 /* 64 bits */
50#define SH_SER 0x38 /* 64 bits */
51#define SH_SECR 0x40 /* 64 bits */
52#define SH_IER 0x50 /* 64 bits */
53#define SH_IECR 0x58 /* 64 bits */
54#define SH_IESR 0x60 /* 64 bits */
55#define SH_IPR 0x68 /* 64 bits */
56#define SH_ICR 0x70 /* 64 bits */
57#define SH_IEVAL 0x78
58#define SH_QER 0x80
59#define SH_QEER 0x84
60#define SH_QEECR 0x88
61#define SH_QEESR 0x8c
62#define SH_QSER 0x90
63#define SH_QSECR 0x94
64#define SH_SIZE 0x200
65
66/* Offsets for EDMA CC global registers */
67#define EDMA_REV 0x0000
68#define EDMA_CCCFG 0x0004
69#define EDMA_QCHMAP 0x0200 /* 8 registers */
70#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
71#define EDMA_QDMAQNUM 0x0260
72#define EDMA_QUETCMAP 0x0280
73#define EDMA_QUEPRI 0x0284
74#define EDMA_EMR 0x0300 /* 64 bits */
75#define EDMA_EMCR 0x0308 /* 64 bits */
76#define EDMA_QEMR 0x0310
77#define EDMA_QEMCR 0x0314
78#define EDMA_CCERR 0x0318
79#define EDMA_CCERRCLR 0x031c
80#define EDMA_EEVAL 0x0320
81#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
82#define EDMA_QRAE 0x0380 /* 4 registers */
83#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
84#define EDMA_QSTAT 0x0600 /* 2 registers */
85#define EDMA_QWMTHRA 0x0620
86#define EDMA_QWMTHRB 0x0624
87#define EDMA_CCSTAT 0x0640
88
89#define EDMA_M 0x1000 /* global channel registers */
90#define EDMA_ECR 0x1008
91#define EDMA_ECRH 0x100C
92#define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
93#define EDMA_PARM 0x4000 /* 128 param entries */
94
95#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
96
97#define EDMA_DCHMAP 0x0100 /* 64 registers */
98#define CHMAP_EXIST BIT(24)
99
100#define EDMA_MAX_DMACH 64
101#define EDMA_MAX_PARAMENTRY 512
102
103/*****************************************************************************/
104
105static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
106
107static inline unsigned int edma_read(unsigned ctlr, int offset)
108{
109 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
110}
111
112static inline void edma_write(unsigned ctlr, int offset, int val)
113{
114 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
115}
116static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
117 unsigned or)
118{
119 unsigned val = edma_read(ctlr, offset);
120 val &= and;
121 val |= or;
122 edma_write(ctlr, offset, val);
123}
124static inline void edma_and(unsigned ctlr, int offset, unsigned and)
125{
126 unsigned val = edma_read(ctlr, offset);
127 val &= and;
128 edma_write(ctlr, offset, val);
129}
130static inline void edma_or(unsigned ctlr, int offset, unsigned or)
131{
132 unsigned val = edma_read(ctlr, offset);
133 val |= or;
134 edma_write(ctlr, offset, val);
135}
136static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
137{
138 return edma_read(ctlr, offset + (i << 2));
139}
140static inline void edma_write_array(unsigned ctlr, int offset, int i,
141 unsigned val)
142{
143 edma_write(ctlr, offset + (i << 2), val);
144}
145static inline void edma_modify_array(unsigned ctlr, int offset, int i,
146 unsigned and, unsigned or)
147{
148 edma_modify(ctlr, offset + (i << 2), and, or);
149}
150static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
151{
152 edma_or(ctlr, offset + (i << 2), or);
153}
154static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
155 unsigned or)
156{
157 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
158}
159static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
160 unsigned val)
161{
162 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
163}
164static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
165{
166 return edma_read(ctlr, EDMA_SHADOW0 + offset);
167}
168static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
169 int i)
170{
171 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
172}
173static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
174{
175 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
176}
177static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
178 unsigned val)
179{
180 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
181}
182static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
183 int param_no)
184{
185 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
186}
187static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
188 unsigned val)
189{
190 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
191}
192static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
193 unsigned and, unsigned or)
194{
195 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
196}
197static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
198 unsigned and)
199{
200 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
201}
202static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
203 unsigned or)
204{
205 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
206}
207
208static inline void set_bits(int offset, int len, unsigned long *p)
209{
210 for (; len > 0; len--)
211 set_bit(offset + (len - 1), p);
212}
213
214static inline void clear_bits(int offset, int len, unsigned long *p)
215{
216 for (; len > 0; len--)
217 clear_bit(offset + (len - 1), p);
218}
219
220/*****************************************************************************/
221
222/* actual number of DMA channels and slots on this silicon */
223struct edma {
224 /* how many dma resources of each type */
225 unsigned num_channels;
226 unsigned num_region;
227 unsigned num_slots;
228 unsigned num_tc;
229 unsigned num_cc;
230 enum dma_event_q default_queue;
231
232 /* list of channels with no even trigger; terminated by "-1" */
233 const s8 *noevent;
234
235 /* The edma_inuse bit for each PaRAM slot is clear unless the
236 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
237 */
238 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
239
240 /* The edma_unused bit for each channel is clear unless
241 * it is not being used on this platform. It uses a bit
242 * of SOC-specific initialization code.
243 */
244 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
245
246 unsigned irq_res_start;
247 unsigned irq_res_end;
248
249 struct dma_interrupt_data {
250 void (*callback)(unsigned channel, unsigned short ch_status,
251 void *data);
252 void *data;
253 } intr_data[EDMA_MAX_DMACH];
254};
255
256static struct edma *edma_cc[EDMA_MAX_CC];
257static int arch_num_cc;
258
259/* dummy param set used to (re)initialize parameter RAM slots */
260static const struct edmacc_param dummy_paramset = {
261 .link_bcntrld = 0xffff,
262 .ccnt = 1,
263};
264
265/*****************************************************************************/
266
267static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
268 enum dma_event_q queue_no)
269{
270 int bit = (ch_no & 0x7) * 4;
271
272 /* default to low priority queue */
273 if (queue_no == EVENTQ_DEFAULT)
274 queue_no = edma_cc[ctlr]->default_queue;
275
276 queue_no &= 7;
277 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
278 ~(0x7 << bit), queue_no << bit);
279}
280
281static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
282{
283 int bit = queue_no * 4;
284 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
285}
286
287static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
288 int priority)
289{
290 int bit = queue_no * 4;
291 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
292 ((priority & 0x7) << bit));
293}
294
295/**
296 * map_dmach_param - Maps channel number to param entry number
297 *
298 * This maps the dma channel number to param entry numberter. In
299 * other words using the DMA channel mapping registers a param entry
300 * can be mapped to any channel
301 *
302 * Callers are responsible for ensuring the channel mapping logic is
303 * included in that particular EDMA variant (Eg : dm646x)
304 *
305 */
306static void __init map_dmach_param(unsigned ctlr)
307{
308 int i;
309 for (i = 0; i < EDMA_MAX_DMACH; i++)
310 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
311}
312
313static inline void
314setup_dma_interrupt(unsigned lch,
315 void (*callback)(unsigned channel, u16 ch_status, void *data),
316 void *data)
317{
318 unsigned ctlr;
319
320 ctlr = EDMA_CTLR(lch);
321 lch = EDMA_CHAN_SLOT(lch);
322
323 if (!callback)
324 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
325 BIT(lch & 0x1f));
326
327 edma_cc[ctlr]->intr_data[lch].callback = callback;
328 edma_cc[ctlr]->intr_data[lch].data = data;
329
330 if (callback) {
331 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
332 BIT(lch & 0x1f));
333 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
334 BIT(lch & 0x1f));
335 }
336}
337
338static int irq2ctlr(int irq)
339{
340 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
341 return 0;
342 else if (irq >= edma_cc[1]->irq_res_start &&
343 irq <= edma_cc[1]->irq_res_end)
344 return 1;
345
346 return -1;
347}
348
349/******************************************************************************
350 *
351 * DMA interrupt handler
352 *
353 *****************************************************************************/
354static irqreturn_t dma_irq_handler(int irq, void *data)
355{
356 int ctlr;
357 u32 sh_ier;
358 u32 sh_ipr;
359 u32 bank;
360
361 ctlr = irq2ctlr(irq);
362 if (ctlr < 0)
363 return IRQ_NONE;
364
365 dev_dbg(data, "dma_irq_handler\n");
366
367 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
368 if (!sh_ipr) {
369 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
370 if (!sh_ipr)
371 return IRQ_NONE;
372 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
373 bank = 1;
374 } else {
375 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
376 bank = 0;
377 }
378
379 do {
380 u32 slot;
381 u32 channel;
382
383 dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
384
385 slot = __ffs(sh_ipr);
386 sh_ipr &= ~(BIT(slot));
387
388 if (sh_ier & BIT(slot)) {
389 channel = (bank << 5) | slot;
390 /* Clear the corresponding IPR bits */
391 edma_shadow0_write_array(ctlr, SH_ICR, bank,
392 BIT(slot));
393 if (edma_cc[ctlr]->intr_data[channel].callback)
394 edma_cc[ctlr]->intr_data[channel].callback(
395 channel, DMA_COMPLETE,
396 edma_cc[ctlr]->intr_data[channel].data);
397 }
398 } while (sh_ipr);
399
400 edma_shadow0_write(ctlr, SH_IEVAL, 1);
401 return IRQ_HANDLED;
402}
403
404/******************************************************************************
405 *
406 * DMA error interrupt handler
407 *
408 *****************************************************************************/
409static irqreturn_t dma_ccerr_handler(int irq, void *data)
410{
411 int i;
412 int ctlr;
413 unsigned int cnt = 0;
414
415 ctlr = irq2ctlr(irq);
416 if (ctlr < 0)
417 return IRQ_NONE;
418
419 dev_dbg(data, "dma_ccerr_handler\n");
420
421 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
422 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
423 (edma_read(ctlr, EDMA_QEMR) == 0) &&
424 (edma_read(ctlr, EDMA_CCERR) == 0))
425 return IRQ_NONE;
426
427 while (1) {
428 int j = -1;
429 if (edma_read_array(ctlr, EDMA_EMR, 0))
430 j = 0;
431 else if (edma_read_array(ctlr, EDMA_EMR, 1))
432 j = 1;
433 if (j >= 0) {
434 dev_dbg(data, "EMR%d %08x\n", j,
435 edma_read_array(ctlr, EDMA_EMR, j));
436 for (i = 0; i < 32; i++) {
437 int k = (j << 5) + i;
438 if (edma_read_array(ctlr, EDMA_EMR, j) &
439 BIT(i)) {
440 /* Clear the corresponding EMR bits */
441 edma_write_array(ctlr, EDMA_EMCR, j,
442 BIT(i));
443 /* Clear any SER */
444 edma_shadow0_write_array(ctlr, SH_SECR,
445 j, BIT(i));
446 if (edma_cc[ctlr]->intr_data[k].
447 callback) {
448 edma_cc[ctlr]->intr_data[k].
449 callback(k,
450 DMA_CC_ERROR,
451 edma_cc[ctlr]->intr_data
452 [k].data);
453 }
454 }
455 }
456 } else if (edma_read(ctlr, EDMA_QEMR)) {
457 dev_dbg(data, "QEMR %02x\n",
458 edma_read(ctlr, EDMA_QEMR));
459 for (i = 0; i < 8; i++) {
460 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
461 /* Clear the corresponding IPR bits */
462 edma_write(ctlr, EDMA_QEMCR, BIT(i));
463 edma_shadow0_write(ctlr, SH_QSECR,
464 BIT(i));
465
466 /* NOTE: not reported!! */
467 }
468 }
469 } else if (edma_read(ctlr, EDMA_CCERR)) {
470 dev_dbg(data, "CCERR %08x\n",
471 edma_read(ctlr, EDMA_CCERR));
472 /* FIXME: CCERR.BIT(16) ignored! much better
473 * to just write CCERRCLR with CCERR value...
474 */
475 for (i = 0; i < 8; i++) {
476 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
477 /* Clear the corresponding IPR bits */
478 edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
479
480 /* NOTE: not reported!! */
481 }
482 }
483 }
484 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
485 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
486 (edma_read(ctlr, EDMA_QEMR) == 0) &&
487 (edma_read(ctlr, EDMA_CCERR) == 0))
488 break;
489 cnt++;
490 if (cnt > 10)
491 break;
492 }
493 edma_write(ctlr, EDMA_EEVAL, 1);
494 return IRQ_HANDLED;
495}
496
497static int reserve_contiguous_slots(int ctlr, unsigned int id,
498 unsigned int num_slots,
499 unsigned int start_slot)
500{
501 int i, j;
502 unsigned int count = num_slots;
503 int stop_slot = start_slot;
504 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
505
506 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
507 j = EDMA_CHAN_SLOT(i);
508 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
509 /* Record our current beginning slot */
510 if (count == num_slots)
511 stop_slot = i;
512
513 count--;
514 set_bit(j, tmp_inuse);
515
516 if (count == 0)
517 break;
518 } else {
519 clear_bit(j, tmp_inuse);
520
521 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
522 stop_slot = i;
523 break;
524 } else {
525 count = num_slots;
526 }
527 }
528 }
529
530 /*
531 * We have to clear any bits that we set
532 * if we run out parameter RAM slots, i.e we do find a set
533 * of contiguous parameter RAM slots but do not find the exact number
534 * requested as we may reach the total number of parameter RAM slots
535 */
536 if (i == edma_cc[ctlr]->num_slots)
537 stop_slot = i;
538
539 j = start_slot;
540 for_each_set_bit_from(j, tmp_inuse, stop_slot)
541 clear_bit(j, edma_cc[ctlr]->edma_inuse);
542
543 if (count)
544 return -EBUSY;
545
546 for (j = i - num_slots + 1; j <= i; ++j)
547 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
548 &dummy_paramset, PARM_SIZE);
549
550 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
551}
552
553static int prepare_unused_channel_list(struct device *dev, void *data)
554{
555 struct platform_device *pdev = to_platform_device(dev);
556 int i, ctlr;
557
558 for (i = 0; i < pdev->num_resources; i++) {
559 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
560 (int)pdev->resource[i].start >= 0) {
561 ctlr = EDMA_CTLR(pdev->resource[i].start);
562 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
563 edma_cc[ctlr]->edma_unused);
564 }
565 }
566
567 return 0;
568}
569
570/*-----------------------------------------------------------------------*/
571
572static bool unused_chan_list_done;
573
574/* Resource alloc/free: dma channels, parameter RAM slots */
575
576/**
577 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
578 * @channel: specific channel to allocate; negative for "any unmapped channel"
579 * @callback: optional; to be issued on DMA completion or errors
580 * @data: passed to callback
581 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
582 * Controller (TC) executes requests using this channel. Use
583 * EVENTQ_DEFAULT unless you really need a high priority queue.
584 *
585 * This allocates a DMA channel and its associated parameter RAM slot.
586 * The parameter RAM is initialized to hold a dummy transfer.
587 *
588 * Normal use is to pass a specific channel number as @channel, to make
589 * use of hardware events mapped to that channel. When the channel will
590 * be used only for software triggering or event chaining, channels not
591 * mapped to hardware events (or mapped to unused events) are preferable.
592 *
593 * DMA transfers start from a channel using edma_start(), or by
594 * chaining. When the transfer described in that channel's parameter RAM
595 * slot completes, that slot's data may be reloaded through a link.
596 *
597 * DMA errors are only reported to the @callback associated with the
598 * channel driving that transfer, but transfer completion callbacks can
599 * be sent to another channel under control of the TCC field in
600 * the option word of the transfer's parameter RAM set. Drivers must not
601 * use DMA transfer completion callbacks for channels they did not allocate.
602 * (The same applies to TCC codes used in transfer chaining.)
603 *
604 * Returns the number of the channel, else negative errno.
605 */
606int edma_alloc_channel(int channel,
607 void (*callback)(unsigned channel, u16 ch_status, void *data),
608 void *data,
609 enum dma_event_q eventq_no)
610{
611 unsigned i, done = 0, ctlr = 0;
612 int ret = 0;
613
614 if (!unused_chan_list_done) {
615 /*
616 * Scan all the platform devices to find out the EDMA channels
617 * used and clear them in the unused list, making the rest
618 * available for ARM usage.
619 */
620 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
621 prepare_unused_channel_list);
622 if (ret < 0)
623 return ret;
624
625 unused_chan_list_done = true;
626 }
627
628 if (channel >= 0) {
629 ctlr = EDMA_CTLR(channel);
630 channel = EDMA_CHAN_SLOT(channel);
631 }
632
633 if (channel < 0) {
634 for (i = 0; i < arch_num_cc; i++) {
635 channel = 0;
636 for (;;) {
637 channel = find_next_bit(edma_cc[i]->edma_unused,
638 edma_cc[i]->num_channels,
639 channel);
640 if (channel == edma_cc[i]->num_channels)
641 break;
642 if (!test_and_set_bit(channel,
643 edma_cc[i]->edma_inuse)) {
644 done = 1;
645 ctlr = i;
646 break;
647 }
648 channel++;
649 }
650 if (done)
651 break;
652 }
653 if (!done)
654 return -ENOMEM;
655 } else if (channel >= edma_cc[ctlr]->num_channels) {
656 return -EINVAL;
657 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
658 return -EBUSY;
659 }
660
661 /* ensure access through shadow region 0 */
662 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
663
664 /* ensure no events are pending */
665 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
666 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
667 &dummy_paramset, PARM_SIZE);
668
669 if (callback)
670 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
671 callback, data);
672
673 map_dmach_queue(ctlr, channel, eventq_no);
674
675 return EDMA_CTLR_CHAN(ctlr, channel);
676}
677EXPORT_SYMBOL(edma_alloc_channel);
678
679
680/**
681 * edma_free_channel - deallocate DMA channel
682 * @channel: dma channel returned from edma_alloc_channel()
683 *
684 * This deallocates the DMA channel and associated parameter RAM slot
685 * allocated by edma_alloc_channel().
686 *
687 * Callers are responsible for ensuring the channel is inactive, and
688 * will not be reactivated by linking, chaining, or software calls to
689 * edma_start().
690 */
691void edma_free_channel(unsigned channel)
692{
693 unsigned ctlr;
694
695 ctlr = EDMA_CTLR(channel);
696 channel = EDMA_CHAN_SLOT(channel);
697
698 if (channel >= edma_cc[ctlr]->num_channels)
699 return;
700
701 setup_dma_interrupt(channel, NULL, NULL);
702 /* REVISIT should probably take out of shadow region 0 */
703
704 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
705 &dummy_paramset, PARM_SIZE);
706 clear_bit(channel, edma_cc[ctlr]->edma_inuse);
707}
708EXPORT_SYMBOL(edma_free_channel);
709
710/**
711 * edma_alloc_slot - allocate DMA parameter RAM
712 * @slot: specific slot to allocate; negative for "any unused slot"
713 *
714 * This allocates a parameter RAM slot, initializing it to hold a
715 * dummy transfer. Slots allocated using this routine have not been
716 * mapped to a hardware DMA channel, and will normally be used by
717 * linking to them from a slot associated with a DMA channel.
718 *
719 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
720 * slots may be allocated on behalf of DSP firmware.
721 *
722 * Returns the number of the slot, else negative errno.
723 */
724int edma_alloc_slot(unsigned ctlr, int slot)
725{
726 if (!edma_cc[ctlr])
727 return -EINVAL;
728
729 if (slot >= 0)
730 slot = EDMA_CHAN_SLOT(slot);
731
732 if (slot < 0) {
733 slot = edma_cc[ctlr]->num_channels;
734 for (;;) {
735 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
736 edma_cc[ctlr]->num_slots, slot);
737 if (slot == edma_cc[ctlr]->num_slots)
738 return -ENOMEM;
739 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
740 break;
741 }
742 } else if (slot < edma_cc[ctlr]->num_channels ||
743 slot >= edma_cc[ctlr]->num_slots) {
744 return -EINVAL;
745 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
746 return -EBUSY;
747 }
748
749 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
750 &dummy_paramset, PARM_SIZE);
751
752 return EDMA_CTLR_CHAN(ctlr, slot);
753}
754EXPORT_SYMBOL(edma_alloc_slot);
755
756/**
757 * edma_free_slot - deallocate DMA parameter RAM
758 * @slot: parameter RAM slot returned from edma_alloc_slot()
759 *
760 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
761 * Callers are responsible for ensuring the slot is inactive, and will
762 * not be activated.
763 */
764void edma_free_slot(unsigned slot)
765{
766 unsigned ctlr;
767
768 ctlr = EDMA_CTLR(slot);
769 slot = EDMA_CHAN_SLOT(slot);
770
771 if (slot < edma_cc[ctlr]->num_channels ||
772 slot >= edma_cc[ctlr]->num_slots)
773 return;
774
775 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
776 &dummy_paramset, PARM_SIZE);
777 clear_bit(slot, edma_cc[ctlr]->edma_inuse);
778}
779EXPORT_SYMBOL(edma_free_slot);
780
781
782/**
783 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
784 * The API will return the starting point of a set of
785 * contiguous parameter RAM slots that have been requested
786 *
787 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
788 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
789 * @count: number of contiguous Paramter RAM slots
790 * @slot - the start value of Parameter RAM slot that should be passed if id
791 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
792 *
793 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
794 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
795 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
796 *
797 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
798 * set of contiguous parameter RAM slots from the "slot" that is passed as an
799 * argument to the API.
800 *
801 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
802 * starts looking for a set of contiguous parameter RAMs from the "slot"
803 * that is passed as an argument to the API. On failure the API will try to
804 * find a set of contiguous Parameter RAM slots from the remaining Parameter
805 * RAM slots
806 */
807int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
808{
809 /*
810 * The start slot requested should be greater than
811 * the number of channels and lesser than the total number
812 * of slots
813 */
814 if ((id != EDMA_CONT_PARAMS_ANY) &&
815 (slot < edma_cc[ctlr]->num_channels ||
816 slot >= edma_cc[ctlr]->num_slots))
817 return -EINVAL;
818
819 /*
820 * The number of parameter RAM slots requested cannot be less than 1
821 * and cannot be more than the number of slots minus the number of
822 * channels
823 */
824 if (count < 1 || count >
825 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
826 return -EINVAL;
827
828 switch (id) {
829 case EDMA_CONT_PARAMS_ANY:
830 return reserve_contiguous_slots(ctlr, id, count,
831 edma_cc[ctlr]->num_channels);
832 case EDMA_CONT_PARAMS_FIXED_EXACT:
833 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
834 return reserve_contiguous_slots(ctlr, id, count, slot);
835 default:
836 return -EINVAL;
837 }
838
839}
840EXPORT_SYMBOL(edma_alloc_cont_slots);
841
842/**
843 * edma_free_cont_slots - deallocate DMA parameter RAM slots
844 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
845 * @count: the number of contiguous parameter RAM slots to be freed
846 *
847 * This deallocates the parameter RAM slots allocated by
848 * edma_alloc_cont_slots.
849 * Callers/applications need to keep track of sets of contiguous
850 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
851 * API.
852 * Callers are responsible for ensuring the slots are inactive, and will
853 * not be activated.
854 */
855int edma_free_cont_slots(unsigned slot, int count)
856{
857 unsigned ctlr, slot_to_free;
858 int i;
859
860 ctlr = EDMA_CTLR(slot);
861 slot = EDMA_CHAN_SLOT(slot);
862
863 if (slot < edma_cc[ctlr]->num_channels ||
864 slot >= edma_cc[ctlr]->num_slots ||
865 count < 1)
866 return -EINVAL;
867
868 for (i = slot; i < slot + count; ++i) {
869 ctlr = EDMA_CTLR(i);
870 slot_to_free = EDMA_CHAN_SLOT(i);
871
872 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
873 &dummy_paramset, PARM_SIZE);
874 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
875 }
876
877 return 0;
878}
879EXPORT_SYMBOL(edma_free_cont_slots);
880
881/*-----------------------------------------------------------------------*/
882
883/* Parameter RAM operations (i) -- read/write partial slots */
884
885/**
886 * edma_set_src - set initial DMA source address in parameter RAM slot
887 * @slot: parameter RAM slot being configured
888 * @src_port: physical address of source (memory, controller FIFO, etc)
889 * @addressMode: INCR, except in very rare cases
890 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
891 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
892 *
893 * Note that the source address is modified during the DMA transfer
894 * according to edma_set_src_index().
895 */
896void edma_set_src(unsigned slot, dma_addr_t src_port,
897 enum address_mode mode, enum fifo_width width)
898{
899 unsigned ctlr;
900
901 ctlr = EDMA_CTLR(slot);
902 slot = EDMA_CHAN_SLOT(slot);
903
904 if (slot < edma_cc[ctlr]->num_slots) {
905 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
906
907 if (mode) {
908 /* set SAM and program FWID */
909 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
910 } else {
911 /* clear SAM */
912 i &= ~SAM;
913 }
914 edma_parm_write(ctlr, PARM_OPT, slot, i);
915
916 /* set the source port address
917 in source register of param structure */
918 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
919 }
920}
921EXPORT_SYMBOL(edma_set_src);
922
923/**
924 * edma_set_dest - set initial DMA destination address in parameter RAM slot
925 * @slot: parameter RAM slot being configured
926 * @dest_port: physical address of destination (memory, controller FIFO, etc)
927 * @addressMode: INCR, except in very rare cases
928 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
929 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
930 *
931 * Note that the destination address is modified during the DMA transfer
932 * according to edma_set_dest_index().
933 */
934void edma_set_dest(unsigned slot, dma_addr_t dest_port,
935 enum address_mode mode, enum fifo_width width)
936{
937 unsigned ctlr;
938
939 ctlr = EDMA_CTLR(slot);
940 slot = EDMA_CHAN_SLOT(slot);
941
942 if (slot < edma_cc[ctlr]->num_slots) {
943 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
944
945 if (mode) {
946 /* set DAM and program FWID */
947 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
948 } else {
949 /* clear DAM */
950 i &= ~DAM;
951 }
952 edma_parm_write(ctlr, PARM_OPT, slot, i);
953 /* set the destination port address
954 in dest register of param structure */
955 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
956 }
957}
958EXPORT_SYMBOL(edma_set_dest);
959
960/**
961 * edma_get_position - returns the current transfer points
962 * @slot: parameter RAM slot being examined
963 * @src: pointer to source port position
964 * @dst: pointer to destination port position
965 *
966 * Returns current source and destination addresses for a particular
967 * parameter RAM slot. Its channel should not be active when this is called.
968 */
969void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
970{
971 struct edmacc_param temp;
972 unsigned ctlr;
973
974 ctlr = EDMA_CTLR(slot);
975 slot = EDMA_CHAN_SLOT(slot);
976
977 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
978 if (src != NULL)
979 *src = temp.src;
980 if (dst != NULL)
981 *dst = temp.dst;
982}
983EXPORT_SYMBOL(edma_get_position);
984
985/**
986 * edma_set_src_index - configure DMA source address indexing
987 * @slot: parameter RAM slot being configured
988 * @src_bidx: byte offset between source arrays in a frame
989 * @src_cidx: byte offset between source frames in a block
990 *
991 * Offsets are specified to support either contiguous or discontiguous
992 * memory transfers, or repeated access to a hardware register, as needed.
993 * When accessing hardware registers, both offsets are normally zero.
994 */
995void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
996{
997 unsigned ctlr;
998
999 ctlr = EDMA_CTLR(slot);
1000 slot = EDMA_CHAN_SLOT(slot);
1001
1002 if (slot < edma_cc[ctlr]->num_slots) {
1003 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1004 0xffff0000, src_bidx);
1005 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1006 0xffff0000, src_cidx);
1007 }
1008}
1009EXPORT_SYMBOL(edma_set_src_index);
1010
1011/**
1012 * edma_set_dest_index - configure DMA destination address indexing
1013 * @slot: parameter RAM slot being configured
1014 * @dest_bidx: byte offset between destination arrays in a frame
1015 * @dest_cidx: byte offset between destination frames in a block
1016 *
1017 * Offsets are specified to support either contiguous or discontiguous
1018 * memory transfers, or repeated access to a hardware register, as needed.
1019 * When accessing hardware registers, both offsets are normally zero.
1020 */
1021void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1022{
1023 unsigned ctlr;
1024
1025 ctlr = EDMA_CTLR(slot);
1026 slot = EDMA_CHAN_SLOT(slot);
1027
1028 if (slot < edma_cc[ctlr]->num_slots) {
1029 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1030 0x0000ffff, dest_bidx << 16);
1031 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1032 0x0000ffff, dest_cidx << 16);
1033 }
1034}
1035EXPORT_SYMBOL(edma_set_dest_index);
1036
1037/**
1038 * edma_set_transfer_params - configure DMA transfer parameters
1039 * @slot: parameter RAM slot being configured
1040 * @acnt: how many bytes per array (at least one)
1041 * @bcnt: how many arrays per frame (at least one)
1042 * @ccnt: how many frames per block (at least one)
1043 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1044 * the value to reload into bcnt when it decrements to zero
1045 * @sync_mode: ASYNC or ABSYNC
1046 *
1047 * See the EDMA3 documentation to understand how to configure and link
1048 * transfers using the fields in PaRAM slots. If you are not doing it
1049 * all at once with edma_write_slot(), you will use this routine
1050 * plus two calls each for source and destination, setting the initial
1051 * address and saying how to index that address.
1052 *
1053 * An example of an A-Synchronized transfer is a serial link using a
1054 * single word shift register. In that case, @acnt would be equal to
1055 * that word size; the serial controller issues a DMA synchronization
1056 * event to transfer each word, and memory access by the DMA transfer
1057 * controller will be word-at-a-time.
1058 *
1059 * An example of an AB-Synchronized transfer is a device using a FIFO.
1060 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1061 * The controller with the FIFO issues DMA synchronization events when
1062 * the FIFO threshold is reached, and the DMA transfer controller will
1063 * transfer one frame to (or from) the FIFO. It will probably use
1064 * efficient burst modes to access memory.
1065 */
1066void edma_set_transfer_params(unsigned slot,
1067 u16 acnt, u16 bcnt, u16 ccnt,
1068 u16 bcnt_rld, enum sync_dimension sync_mode)
1069{
1070 unsigned ctlr;
1071
1072 ctlr = EDMA_CTLR(slot);
1073 slot = EDMA_CHAN_SLOT(slot);
1074
1075 if (slot < edma_cc[ctlr]->num_slots) {
1076 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1077 0x0000ffff, bcnt_rld << 16);
1078 if (sync_mode == ASYNC)
1079 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1080 else
1081 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1082 /* Set the acount, bcount, ccount registers */
1083 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1084 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1085 }
1086}
1087EXPORT_SYMBOL(edma_set_transfer_params);
1088
1089/**
1090 * edma_link - link one parameter RAM slot to another
1091 * @from: parameter RAM slot originating the link
1092 * @to: parameter RAM slot which is the link target
1093 *
1094 * The originating slot should not be part of any active DMA transfer.
1095 */
1096void edma_link(unsigned from, unsigned to)
1097{
1098 unsigned ctlr_from, ctlr_to;
1099
1100 ctlr_from = EDMA_CTLR(from);
1101 from = EDMA_CHAN_SLOT(from);
1102 ctlr_to = EDMA_CTLR(to);
1103 to = EDMA_CHAN_SLOT(to);
1104
1105 if (from >= edma_cc[ctlr_from]->num_slots)
1106 return;
1107 if (to >= edma_cc[ctlr_to]->num_slots)
1108 return;
1109 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1110 PARM_OFFSET(to));
1111}
1112EXPORT_SYMBOL(edma_link);
1113
1114/**
1115 * edma_unlink - cut link from one parameter RAM slot
1116 * @from: parameter RAM slot originating the link
1117 *
1118 * The originating slot should not be part of any active DMA transfer.
1119 * Its link is set to 0xffff.
1120 */
1121void edma_unlink(unsigned from)
1122{
1123 unsigned ctlr;
1124
1125 ctlr = EDMA_CTLR(from);
1126 from = EDMA_CHAN_SLOT(from);
1127
1128 if (from >= edma_cc[ctlr]->num_slots)
1129 return;
1130 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1131}
1132EXPORT_SYMBOL(edma_unlink);
1133
1134/*-----------------------------------------------------------------------*/
1135
1136/* Parameter RAM operations (ii) -- read/write whole parameter sets */
1137
1138/**
1139 * edma_write_slot - write parameter RAM data for slot
1140 * @slot: number of parameter RAM slot being modified
1141 * @param: data to be written into parameter RAM slot
1142 *
1143 * Use this to assign all parameters of a transfer at once. This
1144 * allows more efficient setup of transfers than issuing multiple
1145 * calls to set up those parameters in small pieces, and provides
1146 * complete control over all transfer options.
1147 */
1148void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1149{
1150 unsigned ctlr;
1151
1152 ctlr = EDMA_CTLR(slot);
1153 slot = EDMA_CHAN_SLOT(slot);
1154
1155 if (slot >= edma_cc[ctlr]->num_slots)
1156 return;
1157 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1158 PARM_SIZE);
1159}
1160EXPORT_SYMBOL(edma_write_slot);
1161
1162/**
1163 * edma_read_slot - read parameter RAM data from slot
1164 * @slot: number of parameter RAM slot being copied
1165 * @param: where to store copy of parameter RAM data
1166 *
1167 * Use this to read data from a parameter RAM slot, perhaps to
1168 * save them as a template for later reuse.
1169 */
1170void edma_read_slot(unsigned slot, struct edmacc_param *param)
1171{
1172 unsigned ctlr;
1173
1174 ctlr = EDMA_CTLR(slot);
1175 slot = EDMA_CHAN_SLOT(slot);
1176
1177 if (slot >= edma_cc[ctlr]->num_slots)
1178 return;
1179 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1180 PARM_SIZE);
1181}
1182EXPORT_SYMBOL(edma_read_slot);
1183
1184/*-----------------------------------------------------------------------*/
1185
1186/* Various EDMA channel control operations */
1187
1188/**
1189 * edma_pause - pause dma on a channel
1190 * @channel: on which edma_start() has been called
1191 *
1192 * This temporarily disables EDMA hardware events on the specified channel,
1193 * preventing them from triggering new transfers on its behalf
1194 */
1195void edma_pause(unsigned channel)
1196{
1197 unsigned ctlr;
1198
1199 ctlr = EDMA_CTLR(channel);
1200 channel = EDMA_CHAN_SLOT(channel);
1201
1202 if (channel < edma_cc[ctlr]->num_channels) {
1203 unsigned int mask = BIT(channel & 0x1f);
1204
1205 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1206 }
1207}
1208EXPORT_SYMBOL(edma_pause);
1209
1210/**
1211 * edma_resume - resumes dma on a paused channel
1212 * @channel: on which edma_pause() has been called
1213 *
1214 * This re-enables EDMA hardware events on the specified channel.
1215 */
1216void edma_resume(unsigned channel)
1217{
1218 unsigned ctlr;
1219
1220 ctlr = EDMA_CTLR(channel);
1221 channel = EDMA_CHAN_SLOT(channel);
1222
1223 if (channel < edma_cc[ctlr]->num_channels) {
1224 unsigned int mask = BIT(channel & 0x1f);
1225
1226 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1227 }
1228}
1229EXPORT_SYMBOL(edma_resume);
1230
1231/**
1232 * edma_start - start dma on a channel
1233 * @channel: channel being activated
1234 *
1235 * Channels with event associations will be triggered by their hardware
1236 * events, and channels without such associations will be triggered by
1237 * software. (At this writing there is no interface for using software
1238 * triggers except with channels that don't support hardware triggers.)
1239 *
1240 * Returns zero on success, else negative errno.
1241 */
1242int edma_start(unsigned channel)
1243{
1244 unsigned ctlr;
1245
1246 ctlr = EDMA_CTLR(channel);
1247 channel = EDMA_CHAN_SLOT(channel);
1248
1249 if (channel < edma_cc[ctlr]->num_channels) {
1250 int j = channel >> 5;
1251 unsigned int mask = BIT(channel & 0x1f);
1252
1253 /* EDMA channels without event association */
1254 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1255 pr_debug("EDMA: ESR%d %08x\n", j,
1256 edma_shadow0_read_array(ctlr, SH_ESR, j));
1257 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1258 return 0;
1259 }
1260
1261 /* EDMA channel with event association */
1262 pr_debug("EDMA: ER%d %08x\n", j,
1263 edma_shadow0_read_array(ctlr, SH_ER, j));
1264 /* Clear any pending event or error */
1265 edma_write_array(ctlr, EDMA_ECR, j, mask);
1266 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1267 /* Clear any SER */
1268 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1269 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1270 pr_debug("EDMA: EER%d %08x\n", j,
1271 edma_shadow0_read_array(ctlr, SH_EER, j));
1272 return 0;
1273 }
1274
1275 return -EINVAL;
1276}
1277EXPORT_SYMBOL(edma_start);
1278
1279/**
1280 * edma_stop - stops dma on the channel passed
1281 * @channel: channel being deactivated
1282 *
1283 * When @lch is a channel, any active transfer is paused and
1284 * all pending hardware events are cleared. The current transfer
1285 * may not be resumed, and the channel's Parameter RAM should be
1286 * reinitialized before being reused.
1287 */
1288void edma_stop(unsigned channel)
1289{
1290 unsigned ctlr;
1291
1292 ctlr = EDMA_CTLR(channel);
1293 channel = EDMA_CHAN_SLOT(channel);
1294
1295 if (channel < edma_cc[ctlr]->num_channels) {
1296 int j = channel >> 5;
1297 unsigned int mask = BIT(channel & 0x1f);
1298
1299 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1300 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1301 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1302 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1303
1304 pr_debug("EDMA: EER%d %08x\n", j,
1305 edma_shadow0_read_array(ctlr, SH_EER, j));
1306
1307 /* REVISIT: consider guarding against inappropriate event
1308 * chaining by overwriting with dummy_paramset.
1309 */
1310 }
1311}
1312EXPORT_SYMBOL(edma_stop);
1313
1314/******************************************************************************
1315 *
1316 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1317 * been removed before EDMA has finished.It is usedful for removable media.
1318 * Arguments:
1319 * ch_no - channel no
1320 *
1321 * Return: zero on success, or corresponding error no on failure
1322 *
1323 * FIXME this should not be needed ... edma_stop() should suffice.
1324 *
1325 *****************************************************************************/
1326
1327void edma_clean_channel(unsigned channel)
1328{
1329 unsigned ctlr;
1330
1331 ctlr = EDMA_CTLR(channel);
1332 channel = EDMA_CHAN_SLOT(channel);
1333
1334 if (channel < edma_cc[ctlr]->num_channels) {
1335 int j = (channel >> 5);
1336 unsigned int mask = BIT(channel & 0x1f);
1337
1338 pr_debug("EDMA: EMR%d %08x\n", j,
1339 edma_read_array(ctlr, EDMA_EMR, j));
1340 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1341 /* Clear the corresponding EMR bits */
1342 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1343 /* Clear any SER */
1344 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1345 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1346 }
1347}
1348EXPORT_SYMBOL(edma_clean_channel);
1349
1350/*
1351 * edma_clear_event - clear an outstanding event on the DMA channel
1352 * Arguments:
1353 * channel - channel number
1354 */
1355void edma_clear_event(unsigned channel)
1356{
1357 unsigned ctlr;
1358
1359 ctlr = EDMA_CTLR(channel);
1360 channel = EDMA_CHAN_SLOT(channel);
1361
1362 if (channel >= edma_cc[ctlr]->num_channels)
1363 return;
1364 if (channel < 32)
1365 edma_write(ctlr, EDMA_ECR, BIT(channel));
1366 else
1367 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1368}
1369EXPORT_SYMBOL(edma_clear_event);
1370
1371/*-----------------------------------------------------------------------*/
1372
1373static int __init edma_probe(struct platform_device *pdev)
1374{
1375 struct edma_soc_info **info = pdev->dev.platform_data;
1376 const s8 (*queue_priority_mapping)[2];
1377 const s8 (*queue_tc_mapping)[2];
1378 int i, j, off, ln, found = 0;
1379 int status = -1;
1380 const s16 (*rsv_chans)[2];
1381 const s16 (*rsv_slots)[2];
1382 int irq[EDMA_MAX_CC] = {0, 0};
1383 int err_irq[EDMA_MAX_CC] = {0, 0};
1384 struct resource *r[EDMA_MAX_CC] = {NULL};
1385 resource_size_t len[EDMA_MAX_CC];
1386 char res_name[10];
1387 char irq_name[10];
1388
1389 if (!info)
1390 return -ENODEV;
1391
1392 for (j = 0; j < EDMA_MAX_CC; j++) {
1393 sprintf(res_name, "edma_cc%d", j);
1394 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1395 res_name);
1396 if (!r[j] || !info[j]) {
1397 if (found)
1398 break;
1399 else
1400 return -ENODEV;
1401 } else {
1402 found = 1;
1403 }
1404
1405 len[j] = resource_size(r[j]);
1406
1407 r[j] = request_mem_region(r[j]->start, len[j],
1408 dev_name(&pdev->dev));
1409 if (!r[j]) {
1410 status = -EBUSY;
1411 goto fail1;
1412 }
1413
1414 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1415 if (!edmacc_regs_base[j]) {
1416 status = -EBUSY;
1417 goto fail1;
1418 }
1419
1420 edma_cc[j] = kzalloc(sizeof(struct edma), GFP_KERNEL);
1421 if (!edma_cc[j]) {
1422 status = -ENOMEM;
1423 goto fail1;
1424 }
1425
1426 edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
1427 EDMA_MAX_DMACH);
1428 edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
1429 EDMA_MAX_PARAMENTRY);
1430 edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
1431 EDMA_MAX_CC);
1432
1433 edma_cc[j]->default_queue = info[j]->default_queue;
1434
1435 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1436 edmacc_regs_base[j]);
1437
1438 for (i = 0; i < edma_cc[j]->num_slots; i++)
1439 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1440 &dummy_paramset, PARM_SIZE);
1441
1442 /* Mark all channels as unused */
1443 memset(edma_cc[j]->edma_unused, 0xff,
1444 sizeof(edma_cc[j]->edma_unused));
1445
1446 if (info[j]->rsv) {
1447
1448 /* Clear the reserved channels in unused list */
1449 rsv_chans = info[j]->rsv->rsv_chans;
1450 if (rsv_chans) {
1451 for (i = 0; rsv_chans[i][0] != -1; i++) {
1452 off = rsv_chans[i][0];
1453 ln = rsv_chans[i][1];
1454 clear_bits(off, ln,
1455 edma_cc[j]->edma_unused);
1456 }
1457 }
1458
1459 /* Set the reserved slots in inuse list */
1460 rsv_slots = info[j]->rsv->rsv_slots;
1461 if (rsv_slots) {
1462 for (i = 0; rsv_slots[i][0] != -1; i++) {
1463 off = rsv_slots[i][0];
1464 ln = rsv_slots[i][1];
1465 set_bits(off, ln,
1466 edma_cc[j]->edma_inuse);
1467 }
1468 }
1469 }
1470
1471 sprintf(irq_name, "edma%d", j);
1472 irq[j] = platform_get_irq_byname(pdev, irq_name);
1473 edma_cc[j]->irq_res_start = irq[j];
1474 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1475 &pdev->dev);
1476 if (status < 0) {
1477 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1478 irq[j], status);
1479 goto fail;
1480 }
1481
1482 sprintf(irq_name, "edma%d_err", j);
1483 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1484 edma_cc[j]->irq_res_end = err_irq[j];
1485 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1486 "edma_error", &pdev->dev);
1487 if (status < 0) {
1488 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1489 err_irq[j], status);
1490 goto fail;
1491 }
1492
1493 for (i = 0; i < edma_cc[j]->num_channels; i++)
1494 map_dmach_queue(j, i, info[j]->default_queue);
1495
1496 queue_tc_mapping = info[j]->queue_tc_mapping;
1497 queue_priority_mapping = info[j]->queue_priority_mapping;
1498
1499 /* Event queue to TC mapping */
1500 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1501 map_queue_tc(j, queue_tc_mapping[i][0],
1502 queue_tc_mapping[i][1]);
1503
1504 /* Event queue priority mapping */
1505 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1506 assign_priority_to_queue(j,
1507 queue_priority_mapping[i][0],
1508 queue_priority_mapping[i][1]);
1509
1510 /* Map the channel to param entry if channel mapping logic
1511 * exist
1512 */
1513 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1514 map_dmach_param(j);
1515
1516 for (i = 0; i < info[j]->n_region; i++) {
1517 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1518 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1519 edma_write_array(j, EDMA_QRAE, i, 0x0);
1520 }
1521 arch_num_cc++;
1522 }
1523
1524 return 0;
1525
1526fail:
1527 for (i = 0; i < EDMA_MAX_CC; i++) {
1528 if (err_irq[i])
1529 free_irq(err_irq[i], &pdev->dev);
1530 if (irq[i])
1531 free_irq(irq[i], &pdev->dev);
1532 }
1533fail1:
1534 for (i = 0; i < EDMA_MAX_CC; i++) {
1535 if (r[i])
1536 release_mem_region(r[i]->start, len[i]);
1537 if (edmacc_regs_base[i])
1538 iounmap(edmacc_regs_base[i]);
1539 kfree(edma_cc[i]);
1540 }
1541 return status;
1542}
1543
1544
1545static struct platform_driver edma_driver = {
1546 .driver.name = "edma",
1547};
1548
1549static int __init edma_init(void)
1550{
1551 return platform_driver_probe(&edma_driver, edma_probe);
1552}
1553arch_initcall(edma_init);
1554