aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-s3c64xx/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-s3c64xx/dma.c')
-rw-r--r--arch/arm/plat-s3c64xx/dma.c722
1 files changed, 722 insertions, 0 deletions
diff --git a/arch/arm/plat-s3c64xx/dma.c b/arch/arm/plat-s3c64xx/dma.c
new file mode 100644
index 000000000000..67aa93dbb69e
--- /dev/null
+++ b/arch/arm/plat-s3c64xx/dma.c
@@ -0,0 +1,722 @@
1/* linux/arch/arm/plat-s3c64xx/dma.c
2 *
3 * Copyright 2009 Openmoko, Inc.
4 * Copyright 2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * S3C64XX DMA core
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/interrupt.h>
18#include <linux/dmapool.h>
19#include <linux/sysdev.h>
20#include <linux/errno.h>
21#include <linux/delay.h>
22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/io.h>
25
26#include <mach/dma.h>
27#include <mach/map.h>
28#include <mach/irqs.h>
29
30#include <plat/dma-plat.h>
31#include <plat/regs-sys.h>
32
33#include <asm/hardware/pl080.h>
34
35/* dma channel state information */
36
37struct s3c64xx_dmac {
38 struct sys_device sysdev;
39 struct clk *clk;
40 void __iomem *regs;
41 struct s3c2410_dma_chan *channels;
42 enum dma_ch chanbase;
43};
44
45/* pool to provide LLI buffers */
46static struct dma_pool *dma_pool;
47
48/* Debug configuration and code */
49
50static unsigned char debug_show_buffs = 0;
51
52static void dbg_showchan(struct s3c2410_dma_chan *chan)
53{
54 pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
55 chan->number,
56 readl(chan->regs + PL080_CH_SRC_ADDR),
57 readl(chan->regs + PL080_CH_DST_ADDR),
58 readl(chan->regs + PL080_CH_LLI),
59 readl(chan->regs + PL080_CH_CONTROL),
60 readl(chan->regs + PL080S_CH_CONTROL2),
61 readl(chan->regs + PL080S_CH_CONFIG));
62}
63
64static void show_lli(struct pl080s_lli *lli)
65{
66 pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
67 lli, lli->src_addr, lli->dst_addr, lli->next_lli,
68 lli->control0, lli->control1);
69}
70
71static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
72{
73 struct s3c64xx_dma_buff *ptr;
74 struct s3c64xx_dma_buff *end;
75
76 pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
77 chan->number, chan->next, chan->curr, chan->end);
78
79 ptr = chan->next;
80 end = chan->end;
81
82 if (debug_show_buffs) {
83 for (; ptr != NULL; ptr = ptr->next) {
84 pr_debug("DMA%d: %08x ",
85 chan->number, ptr->lli_dma);
86 show_lli(ptr->lli);
87 }
88 }
89}
90
91/* End of Debug */
92
93static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
94{
95 struct s3c2410_dma_chan *chan;
96 unsigned int start, offs;
97
98 start = 0;
99
100 if (channel >= DMACH_PCM1_TX)
101 start = 8;
102
103 for (offs = 0; offs < 8; offs++) {
104 chan = &s3c2410_chans[start + offs];
105 if (!chan->in_use)
106 goto found;
107 }
108
109 return NULL;
110
111found:
112 s3c_dma_chan_map[channel] = chan;
113 return chan;
114}
115
116int s3c2410_dma_config(unsigned int channel, int xferunit)
117{
118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
119
120 if (chan == NULL)
121 return -EINVAL;
122
123 switch (xferunit) {
124 case 1:
125 chan->hw_width = 0;
126 break;
127 case 2:
128 chan->hw_width = 1;
129 break;
130 case 4:
131 chan->hw_width = 2;
132 break;
133 default:
134 printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
135 return -EINVAL;
136 }
137
138 return 0;
139}
140EXPORT_SYMBOL(s3c2410_dma_config);
141
142static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
143 struct pl080s_lli *lli,
144 dma_addr_t data, int size)
145{
146 dma_addr_t src, dst;
147 u32 control0, control1;
148
149 switch (chan->source) {
150 case S3C2410_DMASRC_HW:
151 src = chan->dev_addr;
152 dst = data;
153 control0 = PL080_CONTROL_SRC_AHB2;
154 control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
155 control0 |= 2 << PL080_CONTROL_DWIDTH_SHIFT;
156 control0 |= PL080_CONTROL_DST_INCR;
157 break;
158
159 case S3C2410_DMASRC_MEM:
160 src = data;
161 dst = chan->dev_addr;
162 control0 = PL080_CONTROL_DST_AHB2;
163 control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
164 control0 |= 2 << PL080_CONTROL_SWIDTH_SHIFT;
165 control0 |= PL080_CONTROL_SRC_INCR;
166 break;
167 default:
168 BUG();
169 }
170
171 /* note, we do not currently setup any of the burst controls */
172
173 control1 = size >> chan->hw_width; /* size in no of xfers */
174 control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
175 control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
176
177 lli->src_addr = src;
178 lli->dst_addr = dst;
179 lli->next_lli = 0;
180 lli->control0 = control0;
181 lli->control1 = control1;
182}
183
184static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
185 struct pl080s_lli *lli)
186{
187 void __iomem *regs = chan->regs;
188
189 pr_debug("%s: LLI %p => regs\n", __func__, lli);
190 show_lli(lli);
191
192 writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
193 writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
194 writel(lli->next_lli, regs + PL080_CH_LLI);
195 writel(lli->control0, regs + PL080_CH_CONTROL);
196 writel(lli->control1, regs + PL080S_CH_CONTROL2);
197}
198
199static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
200{
201 struct s3c64xx_dmac *dmac = chan->dmac;
202 u32 config;
203 u32 bit = chan->bit;
204
205 dbg_showchan(chan);
206
207 pr_debug("%s: clearing interrupts\n", __func__);
208
209 /* clear interrupts */
210 writel(bit, dmac->regs + PL080_TC_CLEAR);
211 writel(bit, dmac->regs + PL080_ERR_CLEAR);
212
213 pr_debug("%s: starting channel\n", __func__);
214
215 config = readl(chan->regs + PL080S_CH_CONFIG);
216 config |= PL080_CONFIG_ENABLE;
217
218 pr_debug("%s: writing config %08x\n", __func__, config);
219 writel(config, chan->regs + PL080S_CH_CONFIG);
220
221 return 0;
222}
223
224static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
225{
226 u32 config;
227 int timeout;
228
229 pr_debug("%s: stopping channel\n", __func__);
230
231 dbg_showchan(chan);
232
233 config = readl(chan->regs + PL080S_CH_CONFIG);
234 config |= PL080_CONFIG_HALT;
235 writel(config, chan->regs + PL080S_CH_CONFIG);
236
237 timeout = 1000;
238 do {
239 config = readl(chan->regs + PL080S_CH_CONFIG);
240 pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
241 if (config & PL080_CONFIG_ACTIVE)
242 udelay(10);
243 else
244 break;
245 } while (--timeout > 0);
246
247 if (config & PL080_CONFIG_ACTIVE) {
248 printk(KERN_ERR "%s: channel still active\n", __func__);
249 return -EFAULT;
250 }
251
252 config = readl(chan->regs + PL080S_CH_CONFIG);
253 config &= ~PL080_CONFIG_ENABLE;
254 writel(config, chan->regs + PL080S_CH_CONFIG);
255
256 return 0;
257}
258
259static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
260 struct s3c64xx_dma_buff *buf,
261 enum s3c2410_dma_buffresult result)
262{
263 if (chan->callback_fn != NULL)
264 (chan->callback_fn)(chan, buf->pw, 0, result);
265}
266
267static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
268{
269 dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
270 kfree(buff);
271}
272
273static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
274{
275 struct s3c64xx_dma_buff *buff, *next;
276 u32 config;
277
278 dbg_showchan(chan);
279
280 pr_debug("%s: flushing channel\n", __func__);
281
282 config = readl(chan->regs + PL080S_CH_CONFIG);
283 config &= ~PL080_CONFIG_ENABLE;
284 writel(config, chan->regs + PL080S_CH_CONFIG);
285
286 /* dump all the buffers associated with this channel */
287
288 for (buff = chan->curr; buff != NULL; buff = next) {
289 next = buff->next;
290 pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
291
292 s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
293 s3c64xx_dma_freebuff(buff);
294 }
295
296 chan->curr = chan->next = chan->end = NULL;
297
298 return 0;
299}
300
301int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
302{
303 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
304
305 WARN_ON(!chan);
306 if (!chan)
307 return -EINVAL;
308
309 switch (op) {
310 case S3C2410_DMAOP_START:
311 return s3c64xx_dma_start(chan);
312
313 case S3C2410_DMAOP_STOP:
314 return s3c64xx_dma_stop(chan);
315
316 case S3C2410_DMAOP_FLUSH:
317 return s3c64xx_dma_flush(chan);
318
319 /* belive PAUSE/RESUME are no-ops */
320 case S3C2410_DMAOP_PAUSE:
321 case S3C2410_DMAOP_RESUME:
322 case S3C2410_DMAOP_STARTED:
323 case S3C2410_DMAOP_TIMEOUT:
324 return 0;
325 }
326
327 return -ENOENT;
328}
329EXPORT_SYMBOL(s3c2410_dma_ctrl);
330
331/* s3c2410_dma_enque
332 *
333 */
334
335int s3c2410_dma_enqueue(unsigned int channel, void *id,
336 dma_addr_t data, int size)
337{
338 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
339 struct s3c64xx_dma_buff *next;
340 struct s3c64xx_dma_buff *buff;
341 struct pl080s_lli *lli;
342 int ret;
343
344 WARN_ON(!chan);
345 if (!chan)
346 return -EINVAL;
347
348 buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_KERNEL);
349 if (!buff) {
350 printk(KERN_ERR "%s: no memory for buffer\n", __func__);
351 return -ENOMEM;
352 }
353
354 lli = dma_pool_alloc(dma_pool, GFP_KERNEL, &buff->lli_dma);
355 if (!lli) {
356 printk(KERN_ERR "%s: no memory for lli\n", __func__);
357 ret = -ENOMEM;
358 goto err_buff;
359 }
360
361 pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
362 __func__, buff, data, lli, (u32)buff->lli_dma, size);
363
364 buff->lli = lli;
365 buff->pw = id;
366
367 s3c64xx_dma_fill_lli(chan, lli, data, size);
368
369 if ((next = chan->next) != NULL) {
370 struct s3c64xx_dma_buff *end = chan->end;
371 struct pl080s_lli *endlli = end->lli;
372
373 pr_debug("enquing onto channel\n");
374
375 end->next = buff;
376 endlli->next_lli = buff->lli_dma;
377
378 if (chan->flags & S3C2410_DMAF_CIRCULAR) {
379 struct s3c64xx_dma_buff *curr = chan->curr;
380 lli->next_lli = curr->lli_dma;
381 }
382
383 if (next == chan->curr) {
384 writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
385 chan->next = buff;
386 }
387
388 show_lli(endlli);
389 chan->end = buff;
390 } else {
391 pr_debug("enquing onto empty channel\n");
392
393 chan->curr = buff;
394 chan->next = buff;
395 chan->end = buff;
396
397 s3c64xx_lli_to_regs(chan, lli);
398 }
399
400 show_lli(lli);
401
402 dbg_showchan(chan);
403 dbg_showbuffs(chan);
404 return 0;
405
406err_buff:
407 kfree(buff);
408 return ret;
409}
410
411EXPORT_SYMBOL(s3c2410_dma_enqueue);
412
413
414int s3c2410_dma_devconfig(int channel,
415 enum s3c2410_dmasrc source,
416 unsigned long devaddr)
417{
418 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
419 u32 peripheral;
420 u32 config = 0;
421
422 pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
423 __func__, channel, source, devaddr, chan);
424
425 WARN_ON(!chan);
426 if (!chan)
427 return -EINVAL;
428
429 peripheral = (chan->peripheral & 0xf);
430 chan->source = source;
431 chan->dev_addr = devaddr;
432
433 pr_debug("%s: peripheral %d\n", __func__, peripheral);
434
435 switch (source) {
436 case S3C2410_DMASRC_HW:
437 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
438 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
439 break;
440 case S3C2410_DMASRC_MEM:
441 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
442 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
443 break;
444 default:
445 printk(KERN_ERR "%s: bad source\n", __func__);
446 return -EINVAL;
447 }
448
449 /* allow TC and ERR interrupts */
450 config |= PL080_CONFIG_TC_IRQ_MASK;
451 config |= PL080_CONFIG_ERR_IRQ_MASK;
452
453 pr_debug("%s: config %08x\n", __func__, config);
454
455 writel(config, chan->regs + PL080S_CH_CONFIG);
456
457 return 0;
458}
459EXPORT_SYMBOL(s3c2410_dma_devconfig);
460
461
462int s3c2410_dma_getposition(unsigned int channel,
463 dma_addr_t *src, dma_addr_t *dst)
464{
465 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
466
467 WARN_ON(!chan);
468 if (!chan)
469 return -EINVAL;
470
471 if (src != NULL)
472 *src = readl(chan->regs + PL080_CH_SRC_ADDR);
473
474 if (dst != NULL)
475 *dst = readl(chan->regs + PL080_CH_DST_ADDR);
476
477 return 0;
478}
479EXPORT_SYMBOL(s3c2410_dma_getposition);
480
481/* s3c2410_request_dma
482 *
483 * get control of an dma channel
484*/
485
486int s3c2410_dma_request(unsigned int channel,
487 struct s3c2410_dma_client *client,
488 void *dev)
489{
490 struct s3c2410_dma_chan *chan;
491 unsigned long flags;
492
493 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
494 channel, client->name, dev);
495
496 local_irq_save(flags);
497
498 chan = s3c64xx_dma_map_channel(channel);
499 if (chan == NULL) {
500 local_irq_restore(flags);
501 return -EBUSY;
502 }
503
504 dbg_showchan(chan);
505
506 chan->client = client;
507 chan->in_use = 1;
508 chan->peripheral = channel;
509
510 local_irq_restore(flags);
511
512 /* need to setup */
513
514 pr_debug("%s: channel initialised, %p\n", __func__, chan);
515
516 return chan->number | DMACH_LOW_LEVEL;
517}
518
519EXPORT_SYMBOL(s3c2410_dma_request);
520
521/* s3c2410_dma_free
522 *
523 * release the given channel back to the system, will stop and flush
524 * any outstanding transfers, and ensure the channel is ready for the
525 * next claimant.
526 *
527 * Note, although a warning is currently printed if the freeing client
528 * info is not the same as the registrant's client info, the free is still
529 * allowed to go through.
530*/
531
532int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
533{
534 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
535 unsigned long flags;
536
537 if (chan == NULL)
538 return -EINVAL;
539
540 local_irq_save(flags);
541
542 if (chan->client != client) {
543 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
544 channel, chan->client, client);
545 }
546
547 /* sort out stopping and freeing the channel */
548
549
550 chan->client = NULL;
551 chan->in_use = 0;
552
553 if (!(channel & DMACH_LOW_LEVEL))
554 s3c_dma_chan_map[channel] = NULL;
555
556 local_irq_restore(flags);
557
558 return 0;
559}
560
561EXPORT_SYMBOL(s3c2410_dma_free);
562
563
564static void s3c64xx_dma_tcirq(struct s3c64xx_dmac *dmac, int offs)
565{
566 struct s3c2410_dma_chan *chan = dmac->channels + offs;
567
568 /* note, we currently do not bother to work out which buffer
569 * or buffers have been completed since the last tc-irq. */
570
571 if (chan->callback_fn)
572 (chan->callback_fn)(chan, chan->curr->pw, 0, S3C2410_RES_OK);
573}
574
575static void s3c64xx_dma_errirq(struct s3c64xx_dmac *dmac, int offs)
576{
577 printk(KERN_DEBUG "%s: offs %d\n", __func__, offs);
578}
579
580static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
581{
582 struct s3c64xx_dmac *dmac = pw;
583 u32 tcstat, errstat;
584 u32 bit;
585 int offs;
586
587 tcstat = readl(dmac->regs + PL080_TC_STATUS);
588 errstat = readl(dmac->regs + PL080_ERR_STATUS);
589
590 for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
591 if (tcstat & bit) {
592 writel(bit, dmac->regs + PL080_TC_CLEAR);
593 s3c64xx_dma_tcirq(dmac, offs);
594 }
595
596 if (errstat & bit) {
597 s3c64xx_dma_errirq(dmac, offs);
598 writel(bit, dmac->regs + PL080_ERR_CLEAR);
599 }
600 }
601
602 return IRQ_HANDLED;
603}
604
605static struct sysdev_class dma_sysclass = {
606 .name = "s3c64xx-dma",
607};
608
609static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
610 int irq, unsigned int base)
611{
612 struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
613 struct s3c64xx_dmac *dmac;
614 char clkname[16];
615 void __iomem *regs;
616 void __iomem *regptr;
617 int err, ch;
618
619 dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
620 if (!dmac) {
621 printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
622 return -ENOMEM;
623 }
624
625 dmac->sysdev.id = chno / 8;
626 dmac->sysdev.cls = &dma_sysclass;
627
628 err = sysdev_register(&dmac->sysdev);
629 if (err) {
630 printk(KERN_ERR "%s: failed to register sysdevice\n", __func__);
631 goto err_alloc;
632 }
633
634 regs = ioremap(base, 0x200);
635 if (!regs) {
636 printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
637 err = -ENXIO;
638 goto err_dev;
639 }
640
641 snprintf(clkname, sizeof(clkname), "dma%d", dmac->sysdev.id);
642
643 dmac->clk = clk_get(NULL, clkname);
644 if (IS_ERR(dmac->clk)) {
645 printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
646 err = PTR_ERR(dmac->clk);
647 goto err_map;
648 }
649
650 clk_enable(dmac->clk);
651
652 dmac->regs = regs;
653 dmac->chanbase = chbase;
654 dmac->channels = chptr;
655
656 err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
657 if (err < 0) {
658 printk(KERN_ERR "%s: failed to get irq\n", __func__);
659 goto err_clk;
660 }
661
662 regptr = regs + PL080_Cx_BASE(0);
663
664 for (ch = 0; ch < 8; ch++, chno++, chptr++) {
665 printk(KERN_INFO "%s: registering DMA %d (%p)\n",
666 __func__, chno, regptr);
667
668 chptr->bit = 1 << ch;
669 chptr->number = chno;
670 chptr->dmac = dmac;
671 chptr->regs = regptr;
672 regptr += PL008_Cx_STRIDE;
673 }
674
675 /* for the moment, permanently enable the controller */
676 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
677
678 printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs);
679
680 return 0;
681
682err_clk:
683 clk_disable(dmac->clk);
684 clk_put(dmac->clk);
685err_map:
686 iounmap(regs);
687err_dev:
688 sysdev_unregister(&dmac->sysdev);
689err_alloc:
690 kfree(dmac);
691 return err;
692}
693
694static int __init s3c64xx_dma_init(void)
695{
696 int ret;
697
698 printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
699
700 dma_pool = dma_pool_create("DMA-LLI", NULL, 32, 16, 0);
701 if (!dma_pool) {
702 printk(KERN_ERR "%s: failed to create pool\n", __func__);
703 return -ENOMEM;
704 }
705
706 ret = sysdev_class_register(&dma_sysclass);
707 if (ret) {
708 printk(KERN_ERR "%s: failed to create sysclass\n", __func__);
709 return -ENOMEM;
710 }
711
712 /* Set all DMA configuration to be DMA, not SDMA */
713 writel(0xffffff, S3C_SYSREG(0x110));
714
715 /* Register standard DMA controlers */
716 s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
717 s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
718
719 return 0;
720}
721
722arch_initcall(s3c64xx_dma_init);