aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-s3c64xx
diff options
context:
space:
mode:
authorTomasz Figa <tomasz.figa@gmail.com>2013-10-16 15:10:56 -0400
committerMark Brown <broonie@linaro.org>2013-11-24 09:38:25 -0500
commit15469ed37f8a9c004ac537495f9f7c51790a80c0 (patch)
treec0504f959f4a6a3c90f2bf4d2ea15410a7c163c0 /arch/arm/mach-s3c64xx
parentd37f7617bd677c46c49daa3c023920cb91fe14db (diff)
ARM: s3c64xx: Remove legacy DMA driver
Since support for generic PL08x DMA engine driver has been added, there is no need to keep the old legacy driver, so this patch removes it. Signed-off-by: Tomasz Figa <tomasz.figa@gmail.com> Signed-off-by: Mark Brown <broonie@linaro.org>
Diffstat (limited to 'arch/arm/mach-s3c64xx')
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig5
-rw-r--r--arch/arm/mach-s3c64xx/Makefile1
-rw-r--r--arch/arm/mach-s3c64xx/dma.c762
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/dma.h123
4 files changed, 0 insertions, 891 deletions
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index d8e0288bf2bf..7094bccbae91 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -22,11 +22,6 @@ config S3C64XX_PL080
22 select AMBA_PL08X 22 select AMBA_PL08X
23 select SAMSUNG_DMADEV 23 select SAMSUNG_DMADEV
24 24
25config S3C64XX_DMA
26 bool "S3C64XX DMA using legacy S3C DMA API"
27 select S3C_DMA
28 depends on !S3C64XX_PL080
29
30config S3C64XX_SETUP_SDHCI 25config S3C64XX_SETUP_SDHCI
31 bool 26 bool
32 select S3C64XX_SETUP_SDHCI_GPIO 27 select S3C64XX_SETUP_SDHCI_GPIO
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index e8e9a468cbc9..58069a702a43 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o
26 26
27# DMA support 27# DMA support
28 28
29obj-$(CONFIG_S3C64XX_DMA) += dma.o
30obj-$(CONFIG_S3C64XX_PL080) += pl080.o 29obj-$(CONFIG_S3C64XX_PL080) += pl080.o
31 30
32# Device support 31# Device support
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
deleted file mode 100644
index 7e22c2113816..000000000000
--- a/arch/arm/mach-s3c64xx/dma.c
+++ /dev/null
@@ -1,762 +0,0 @@
1/* linux/arch/arm/plat-s3c64xx/dma.c
2 *
3 * Copyright 2009 Openmoko, Inc.
4 * Copyright 2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 * http://armlinux.simtec.co.uk/
7 *
8 * S3C64XX DMA core
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13*/
14
15/*
16 * NOTE: Code in this file is not used when booting with Device Tree support.
17 */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/dmapool.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/clk.h>
28#include <linux/err.h>
29#include <linux/io.h>
30#include <linux/amba/pl080.h>
31#include <linux/of.h>
32
33#include <mach/dma.h>
34#include <mach/map.h>
35#include <mach/irqs.h>
36
37#include "regs-sys.h"
38
39/* dma channel state information */
40
41struct s3c64xx_dmac {
42 struct device dev;
43 struct clk *clk;
44 void __iomem *regs;
45 struct s3c2410_dma_chan *channels;
46 enum dma_ch chanbase;
47};
48
49/* pool to provide LLI buffers */
50static struct dma_pool *dma_pool;
51
52/* Debug configuration and code */
53
54static unsigned char debug_show_buffs = 0;
55
56static void dbg_showchan(struct s3c2410_dma_chan *chan)
57{
58 pr_debug("DMA%d: %08x->%08x L %08x C %08x,%08x S %08x\n",
59 chan->number,
60 readl(chan->regs + PL080_CH_SRC_ADDR),
61 readl(chan->regs + PL080_CH_DST_ADDR),
62 readl(chan->regs + PL080_CH_LLI),
63 readl(chan->regs + PL080_CH_CONTROL),
64 readl(chan->regs + PL080S_CH_CONTROL2),
65 readl(chan->regs + PL080S_CH_CONFIG));
66}
67
68static void show_lli(struct pl080s_lli *lli)
69{
70 pr_debug("LLI[%p] %08x->%08x, NL %08x C %08x,%08x\n",
71 lli, lli->src_addr, lli->dst_addr, lli->next_lli,
72 lli->control0, lli->control1);
73}
74
75static void dbg_showbuffs(struct s3c2410_dma_chan *chan)
76{
77 struct s3c64xx_dma_buff *ptr;
78 struct s3c64xx_dma_buff *end;
79
80 pr_debug("DMA%d: buffs next %p, curr %p, end %p\n",
81 chan->number, chan->next, chan->curr, chan->end);
82
83 ptr = chan->next;
84 end = chan->end;
85
86 if (debug_show_buffs) {
87 for (; ptr != NULL; ptr = ptr->next) {
88 pr_debug("DMA%d: %08x ",
89 chan->number, ptr->lli_dma);
90 show_lli(ptr->lli);
91 }
92 }
93}
94
95/* End of Debug */
96
97static struct s3c2410_dma_chan *s3c64xx_dma_map_channel(unsigned int channel)
98{
99 struct s3c2410_dma_chan *chan;
100 unsigned int start, offs;
101
102 start = 0;
103
104 if (channel >= DMACH_PCM1_TX)
105 start = 8;
106
107 for (offs = 0; offs < 8; offs++) {
108 chan = &s3c2410_chans[start + offs];
109 if (!chan->in_use)
110 goto found;
111 }
112
113 return NULL;
114
115found:
116 s3c_dma_chan_map[channel] = chan;
117 return chan;
118}
119
120int s3c2410_dma_config(enum dma_ch channel, int xferunit)
121{
122 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
123
124 if (chan == NULL)
125 return -EINVAL;
126
127 switch (xferunit) {
128 case 1:
129 chan->hw_width = 0;
130 break;
131 case 2:
132 chan->hw_width = 1;
133 break;
134 case 4:
135 chan->hw_width = 2;
136 break;
137 default:
138 printk(KERN_ERR "%s: illegal width %d\n", __func__, xferunit);
139 return -EINVAL;
140 }
141
142 return 0;
143}
144EXPORT_SYMBOL(s3c2410_dma_config);
145
146static void s3c64xx_dma_fill_lli(struct s3c2410_dma_chan *chan,
147 struct pl080s_lli *lli,
148 dma_addr_t data, int size)
149{
150 dma_addr_t src, dst;
151 u32 control0, control1;
152
153 switch (chan->source) {
154 case DMA_FROM_DEVICE:
155 src = chan->dev_addr;
156 dst = data;
157 control0 = PL080_CONTROL_SRC_AHB2;
158 control0 |= PL080_CONTROL_DST_INCR;
159 break;
160
161 case DMA_TO_DEVICE:
162 src = data;
163 dst = chan->dev_addr;
164 control0 = PL080_CONTROL_DST_AHB2;
165 control0 |= PL080_CONTROL_SRC_INCR;
166 break;
167 default:
168 BUG();
169 }
170
171 /* note, we do not currently setup any of the burst controls */
172
173 control1 = size >> chan->hw_width; /* size in no of xfers */
174 control0 |= PL080_CONTROL_PROT_SYS; /* always in priv. mode */
175 control0 |= PL080_CONTROL_TC_IRQ_EN; /* always fire IRQ */
176 control0 |= (u32)chan->hw_width << PL080_CONTROL_DWIDTH_SHIFT;
177 control0 |= (u32)chan->hw_width << PL080_CONTROL_SWIDTH_SHIFT;
178
179 lli->src_addr = src;
180 lli->dst_addr = dst;
181 lli->next_lli = 0;
182 lli->control0 = control0;
183 lli->control1 = control1;
184}
185
186static void s3c64xx_lli_to_regs(struct s3c2410_dma_chan *chan,
187 struct pl080s_lli *lli)
188{
189 void __iomem *regs = chan->regs;
190
191 pr_debug("%s: LLI %p => regs\n", __func__, lli);
192 show_lli(lli);
193
194 writel(lli->src_addr, regs + PL080_CH_SRC_ADDR);
195 writel(lli->dst_addr, regs + PL080_CH_DST_ADDR);
196 writel(lli->next_lli, regs + PL080_CH_LLI);
197 writel(lli->control0, regs + PL080_CH_CONTROL);
198 writel(lli->control1, regs + PL080S_CH_CONTROL2);
199}
200
201static int s3c64xx_dma_start(struct s3c2410_dma_chan *chan)
202{
203 struct s3c64xx_dmac *dmac = chan->dmac;
204 u32 config;
205 u32 bit = chan->bit;
206
207 dbg_showchan(chan);
208
209 pr_debug("%s: clearing interrupts\n", __func__);
210
211 /* clear interrupts */
212 writel(bit, dmac->regs + PL080_TC_CLEAR);
213 writel(bit, dmac->regs + PL080_ERR_CLEAR);
214
215 pr_debug("%s: starting channel\n", __func__);
216
217 config = readl(chan->regs + PL080S_CH_CONFIG);
218 config |= PL080_CONFIG_ENABLE;
219 config &= ~PL080_CONFIG_HALT;
220
221 pr_debug("%s: writing config %08x\n", __func__, config);
222 writel(config, chan->regs + PL080S_CH_CONFIG);
223
224 return 0;
225}
226
227static int s3c64xx_dma_stop(struct s3c2410_dma_chan *chan)
228{
229 u32 config;
230 int timeout;
231
232 pr_debug("%s: stopping channel\n", __func__);
233
234 dbg_showchan(chan);
235
236 config = readl(chan->regs + PL080S_CH_CONFIG);
237 config |= PL080_CONFIG_HALT;
238 writel(config, chan->regs + PL080S_CH_CONFIG);
239
240 timeout = 1000;
241 do {
242 config = readl(chan->regs + PL080S_CH_CONFIG);
243 pr_debug("%s: %d - config %08x\n", __func__, timeout, config);
244 if (config & PL080_CONFIG_ACTIVE)
245 udelay(10);
246 else
247 break;
248 } while (--timeout > 0);
249
250 if (config & PL080_CONFIG_ACTIVE) {
251 printk(KERN_ERR "%s: channel still active\n", __func__);
252 return -EFAULT;
253 }
254
255 config = readl(chan->regs + PL080S_CH_CONFIG);
256 config &= ~PL080_CONFIG_ENABLE;
257 writel(config, chan->regs + PL080S_CH_CONFIG);
258
259 return 0;
260}
261
262static inline void s3c64xx_dma_bufffdone(struct s3c2410_dma_chan *chan,
263 struct s3c64xx_dma_buff *buf,
264 enum s3c2410_dma_buffresult result)
265{
266 if (chan->callback_fn != NULL)
267 (chan->callback_fn)(chan, buf->pw, 0, result);
268}
269
270static void s3c64xx_dma_freebuff(struct s3c64xx_dma_buff *buff)
271{
272 dma_pool_free(dma_pool, buff->lli, buff->lli_dma);
273 kfree(buff);
274}
275
276static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
277{
278 struct s3c64xx_dma_buff *buff, *next;
279 u32 config;
280
281 dbg_showchan(chan);
282
283 pr_debug("%s: flushing channel\n", __func__);
284
285 config = readl(chan->regs + PL080S_CH_CONFIG);
286 config &= ~PL080_CONFIG_ENABLE;
287 writel(config, chan->regs + PL080S_CH_CONFIG);
288
289 /* dump all the buffers associated with this channel */
290
291 for (buff = chan->curr; buff != NULL; buff = next) {
292 next = buff->next;
293 pr_debug("%s: buff %p (next %p)\n", __func__, buff, buff->next);
294
295 s3c64xx_dma_bufffdone(chan, buff, S3C2410_RES_ABORT);
296 s3c64xx_dma_freebuff(buff);
297 }
298
299 chan->curr = chan->next = chan->end = NULL;
300
301 return 0;
302}
303
304int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
305{
306 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
307
308 WARN_ON(!chan);
309 if (!chan)
310 return -EINVAL;
311
312 switch (op) {
313 case S3C2410_DMAOP_START:
314 return s3c64xx_dma_start(chan);
315
316 case S3C2410_DMAOP_STOP:
317 return s3c64xx_dma_stop(chan);
318
319 case S3C2410_DMAOP_FLUSH:
320 return s3c64xx_dma_flush(chan);
321
322 /* believe PAUSE/RESUME are no-ops */
323 case S3C2410_DMAOP_PAUSE:
324 case S3C2410_DMAOP_RESUME:
325 case S3C2410_DMAOP_STARTED:
326 case S3C2410_DMAOP_TIMEOUT:
327 return 0;
328 }
329
330 return -ENOENT;
331}
332EXPORT_SYMBOL(s3c2410_dma_ctrl);
333
334/* s3c2410_dma_enque
335 *
336 */
337
338int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
339 dma_addr_t data, int size)
340{
341 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
342 struct s3c64xx_dma_buff *next;
343 struct s3c64xx_dma_buff *buff;
344 struct pl080s_lli *lli;
345 unsigned long flags;
346 int ret;
347
348 WARN_ON(!chan);
349 if (!chan)
350 return -EINVAL;
351
352 buff = kzalloc(sizeof(struct s3c64xx_dma_buff), GFP_ATOMIC);
353 if (!buff) {
354 printk(KERN_ERR "%s: no memory for buffer\n", __func__);
355 return -ENOMEM;
356 }
357
358 lli = dma_pool_alloc(dma_pool, GFP_ATOMIC, &buff->lli_dma);
359 if (!lli) {
360 printk(KERN_ERR "%s: no memory for lli\n", __func__);
361 ret = -ENOMEM;
362 goto err_buff;
363 }
364
365 pr_debug("%s: buff %p, dp %08x lli (%p, %08x) %d\n",
366 __func__, buff, data, lli, (u32)buff->lli_dma, size);
367
368 buff->lli = lli;
369 buff->pw = id;
370
371 s3c64xx_dma_fill_lli(chan, lli, data, size);
372
373 local_irq_save(flags);
374
375 if ((next = chan->next) != NULL) {
376 struct s3c64xx_dma_buff *end = chan->end;
377 struct pl080s_lli *endlli = end->lli;
378
379 pr_debug("enquing onto channel\n");
380
381 end->next = buff;
382 endlli->next_lli = buff->lli_dma;
383
384 if (chan->flags & S3C2410_DMAF_CIRCULAR) {
385 struct s3c64xx_dma_buff *curr = chan->curr;
386 lli->next_lli = curr->lli_dma;
387 }
388
389 if (next == chan->curr) {
390 writel(buff->lli_dma, chan->regs + PL080_CH_LLI);
391 chan->next = buff;
392 }
393
394 show_lli(endlli);
395 chan->end = buff;
396 } else {
397 pr_debug("enquing onto empty channel\n");
398
399 chan->curr = buff;
400 chan->next = buff;
401 chan->end = buff;
402
403 s3c64xx_lli_to_regs(chan, lli);
404 }
405
406 local_irq_restore(flags);
407
408 show_lli(lli);
409
410 dbg_showchan(chan);
411 dbg_showbuffs(chan);
412 return 0;
413
414err_buff:
415 kfree(buff);
416 return ret;
417}
418
419EXPORT_SYMBOL(s3c2410_dma_enqueue);
420
421
422int s3c2410_dma_devconfig(enum dma_ch channel,
423 enum dma_data_direction source,
424 unsigned long devaddr)
425{
426 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
427 u32 peripheral;
428 u32 config = 0;
429
430 pr_debug("%s: channel %d, source %d, dev %08lx, chan %p\n",
431 __func__, channel, source, devaddr, chan);
432
433 WARN_ON(!chan);
434 if (!chan)
435 return -EINVAL;
436
437 peripheral = (chan->peripheral & 0xf);
438 chan->source = source;
439 chan->dev_addr = devaddr;
440
441 pr_debug("%s: peripheral %d\n", __func__, peripheral);
442
443 switch (source) {
444 case DMA_FROM_DEVICE:
445 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
446 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT;
447 break;
448 case DMA_TO_DEVICE:
449 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT;
450 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT;
451 break;
452 default:
453 printk(KERN_ERR "%s: bad source\n", __func__);
454 return -EINVAL;
455 }
456
457 /* allow TC and ERR interrupts */
458 config |= PL080_CONFIG_TC_IRQ_MASK;
459 config |= PL080_CONFIG_ERR_IRQ_MASK;
460
461 pr_debug("%s: config %08x\n", __func__, config);
462
463 writel(config, chan->regs + PL080S_CH_CONFIG);
464
465 return 0;
466}
467EXPORT_SYMBOL(s3c2410_dma_devconfig);
468
469
470int s3c2410_dma_getposition(enum dma_ch channel,
471 dma_addr_t *src, dma_addr_t *dst)
472{
473 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
474
475 WARN_ON(!chan);
476 if (!chan)
477 return -EINVAL;
478
479 if (src != NULL)
480 *src = readl(chan->regs + PL080_CH_SRC_ADDR);
481
482 if (dst != NULL)
483 *dst = readl(chan->regs + PL080_CH_DST_ADDR);
484
485 return 0;
486}
487EXPORT_SYMBOL(s3c2410_dma_getposition);
488
489/* s3c2410_request_dma
490 *
491 * get control of an dma channel
492*/
493
494int s3c2410_dma_request(enum dma_ch channel,
495 struct s3c2410_dma_client *client,
496 void *dev)
497{
498 struct s3c2410_dma_chan *chan;
499 unsigned long flags;
500
501 pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
502 channel, client->name, dev);
503
504 local_irq_save(flags);
505
506 chan = s3c64xx_dma_map_channel(channel);
507 if (chan == NULL) {
508 local_irq_restore(flags);
509 return -EBUSY;
510 }
511
512 dbg_showchan(chan);
513
514 chan->client = client;
515 chan->in_use = 1;
516 chan->peripheral = channel;
517 chan->flags = 0;
518
519 local_irq_restore(flags);
520
521 /* need to setup */
522
523 pr_debug("%s: channel initialised, %p\n", __func__, chan);
524
525 return chan->number | DMACH_LOW_LEVEL;
526}
527
528EXPORT_SYMBOL(s3c2410_dma_request);
529
530/* s3c2410_dma_free
531 *
532 * release the given channel back to the system, will stop and flush
533 * any outstanding transfers, and ensure the channel is ready for the
534 * next claimant.
535 *
536 * Note, although a warning is currently printed if the freeing client
537 * info is not the same as the registrant's client info, the free is still
538 * allowed to go through.
539*/
540
541int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
542{
543 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
544 unsigned long flags;
545
546 if (chan == NULL)
547 return -EINVAL;
548
549 local_irq_save(flags);
550
551 if (chan->client != client) {
552 printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
553 channel, chan->client, client);
554 }
555
556 /* sort out stopping and freeing the channel */
557
558
559 chan->client = NULL;
560 chan->in_use = 0;
561
562 if (!(channel & DMACH_LOW_LEVEL))
563 s3c_dma_chan_map[channel] = NULL;
564
565 local_irq_restore(flags);
566
567 return 0;
568}
569
570EXPORT_SYMBOL(s3c2410_dma_free);
571
572static irqreturn_t s3c64xx_dma_irq(int irq, void *pw)
573{
574 struct s3c64xx_dmac *dmac = pw;
575 struct s3c2410_dma_chan *chan;
576 enum s3c2410_dma_buffresult res;
577 u32 tcstat, errstat;
578 u32 bit;
579 int offs;
580
581 tcstat = readl(dmac->regs + PL080_TC_STATUS);
582 errstat = readl(dmac->regs + PL080_ERR_STATUS);
583
584 for (offs = 0, bit = 1; offs < 8; offs++, bit <<= 1) {
585 struct s3c64xx_dma_buff *buff;
586
587 if (!(errstat & bit) && !(tcstat & bit))
588 continue;
589
590 chan = dmac->channels + offs;
591 res = S3C2410_RES_ERR;
592
593 if (tcstat & bit) {
594 writel(bit, dmac->regs + PL080_TC_CLEAR);
595 res = S3C2410_RES_OK;
596 }
597
598 if (errstat & bit)
599 writel(bit, dmac->regs + PL080_ERR_CLEAR);
600
601 /* 'next' points to the buffer that is next to the
602 * currently active buffer.
603 * For CIRCULAR queues, 'next' will be same as 'curr'
604 * when 'end' is the active buffer.
605 */
606 buff = chan->curr;
607 while (buff && buff != chan->next
608 && buff->next != chan->next)
609 buff = buff->next;
610
611 if (!buff)
612 BUG();
613
614 if (buff == chan->next)
615 buff = chan->end;
616
617 s3c64xx_dma_bufffdone(chan, buff, res);
618
619 /* Free the node and update curr, if non-circular queue */
620 if (!(chan->flags & S3C2410_DMAF_CIRCULAR)) {
621 chan->curr = buff->next;
622 s3c64xx_dma_freebuff(buff);
623 }
624
625 /* Update 'next' */
626 buff = chan->next;
627 if (chan->next == chan->end) {
628 chan->next = chan->curr;
629 if (!(chan->flags & S3C2410_DMAF_CIRCULAR))
630 chan->end = NULL;
631 } else {
632 chan->next = buff->next;
633 }
634 }
635
636 return IRQ_HANDLED;
637}
638
639static struct bus_type dma_subsys = {
640 .name = "s3c64xx-dma",
641 .dev_name = "s3c64xx-dma",
642};
643
644static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
645 int irq, unsigned int base)
646{
647 struct s3c2410_dma_chan *chptr = &s3c2410_chans[chno];
648 struct s3c64xx_dmac *dmac;
649 char clkname[16];
650 void __iomem *regs;
651 void __iomem *regptr;
652 int err, ch;
653
654 dmac = kzalloc(sizeof(struct s3c64xx_dmac), GFP_KERNEL);
655 if (!dmac) {
656 printk(KERN_ERR "%s: failed to alloc mem\n", __func__);
657 return -ENOMEM;
658 }
659
660 dmac->dev.id = chno / 8;
661 dmac->dev.bus = &dma_subsys;
662
663 err = device_register(&dmac->dev);
664 if (err) {
665 printk(KERN_ERR "%s: failed to register device\n", __func__);
666 goto err_alloc;
667 }
668
669 regs = ioremap(base, 0x200);
670 if (!regs) {
671 printk(KERN_ERR "%s: failed to ioremap()\n", __func__);
672 err = -ENXIO;
673 goto err_dev;
674 }
675
676 snprintf(clkname, sizeof(clkname), "dma%d", dmac->dev.id);
677
678 dmac->clk = clk_get(NULL, clkname);
679 if (IS_ERR(dmac->clk)) {
680 printk(KERN_ERR "%s: failed to get clock %s\n", __func__, clkname);
681 err = PTR_ERR(dmac->clk);
682 goto err_map;
683 }
684
685 clk_prepare_enable(dmac->clk);
686
687 dmac->regs = regs;
688 dmac->chanbase = chbase;
689 dmac->channels = chptr;
690
691 err = request_irq(irq, s3c64xx_dma_irq, 0, "DMA", dmac);
692 if (err < 0) {
693 printk(KERN_ERR "%s: failed to get irq\n", __func__);
694 goto err_clk;
695 }
696
697 regptr = regs + PL080_Cx_BASE(0);
698
699 for (ch = 0; ch < 8; ch++, chptr++) {
700 pr_debug("%s: registering DMA %d (%p)\n",
701 __func__, chno + ch, regptr);
702
703 chptr->bit = 1 << ch;
704 chptr->number = chno + ch;
705 chptr->dmac = dmac;
706 chptr->regs = regptr;
707 regptr += PL080_Cx_STRIDE;
708 }
709
710 /* for the moment, permanently enable the controller */
711 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
712
713 printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
714 irq, regs, chno, chno+8);
715
716 return 0;
717
718err_clk:
719 clk_disable_unprepare(dmac->clk);
720 clk_put(dmac->clk);
721err_map:
722 iounmap(regs);
723err_dev:
724 device_unregister(&dmac->dev);
725err_alloc:
726 kfree(dmac);
727 return err;
728}
729
730static int __init s3c64xx_dma_init(void)
731{
732 int ret;
733
734 /* This driver is not supported when booting with device tree. */
735 if (of_have_populated_dt())
736 return -ENODEV;
737
738 printk(KERN_INFO "%s: Registering DMA channels\n", __func__);
739
740 dma_pool = dma_pool_create("DMA-LLI", NULL, sizeof(struct pl080s_lli), 16, 0);
741 if (!dma_pool) {
742 printk(KERN_ERR "%s: failed to create pool\n", __func__);
743 return -ENOMEM;
744 }
745
746 ret = subsys_system_register(&dma_subsys, NULL);
747 if (ret) {
748 printk(KERN_ERR "%s: failed to create subsys\n", __func__);
749 return -ENOMEM;
750 }
751
752 /* Set all DMA configuration to be DMA, not SDMA */
753 writel(0xffffff, S3C64XX_SDMA_SEL);
754
755 /* Register standard DMA controllers */
756 s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
757 s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
758
759 return 0;
760}
761
762arch_initcall(s3c64xx_dma_init);
diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
index 26a6bc300589..059b1fc85037 100644
--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
@@ -11,127 +11,6 @@
11#ifndef __ASM_ARCH_DMA_H 11#ifndef __ASM_ARCH_DMA_H
12#define __ASM_ARCH_DMA_H __FILE__ 12#define __ASM_ARCH_DMA_H __FILE__
13 13
14#ifdef CONFIG_S3C64XX_DMA
15
16#define S3C_DMA_CHANNELS (16)
17
18/* see mach-s3c2410/dma.h for notes on dma channel numbers */
19
20/* Note, for the S3C64XX architecture we keep the DMACH_
21 * defines in the order they are allocated to [S]DMA0/[S]DMA1
22 * so that is easy to do DHACH_ -> DMA controller conversion
23 */
24enum dma_ch {
25 /* DMA0/SDMA0 */
26 DMACH_UART0 = 0,
27 DMACH_UART0_SRC2,
28 DMACH_UART1,
29 DMACH_UART1_SRC2,
30 DMACH_UART2,
31 DMACH_UART2_SRC2,
32 DMACH_UART3,
33 DMACH_UART3_SRC2,
34 DMACH_PCM0_TX,
35 DMACH_PCM0_RX,
36 DMACH_I2S0_OUT,
37 DMACH_I2S0_IN,
38 DMACH_SPI0_TX,
39 DMACH_SPI0_RX,
40 DMACH_HSI_I2SV40_TX,
41 DMACH_HSI_I2SV40_RX,
42
43 /* DMA1/SDMA1 */
44 DMACH_PCM1_TX = 16,
45 DMACH_PCM1_RX,
46 DMACH_I2S1_OUT,
47 DMACH_I2S1_IN,
48 DMACH_SPI1_TX,
49 DMACH_SPI1_RX,
50 DMACH_AC97_PCMOUT,
51 DMACH_AC97_PCMIN,
52 DMACH_AC97_MICIN,
53 DMACH_PWM,
54 DMACH_IRDA,
55 DMACH_EXTERNAL,
56 DMACH_RES1,
57 DMACH_RES2,
58 DMACH_SECURITY_RX, /* SDMA1 only */
59 DMACH_SECURITY_TX, /* SDMA1 only */
60 DMACH_MAX /* the end */
61};
62
63static inline bool samsung_dma_has_circular(void)
64{
65 return true;
66}
67
68static inline bool samsung_dma_is_dmadev(void)
69{
70 return false;
71}
72#define S3C2410_DMAF_CIRCULAR (1 << 0)
73
74#include <plat/dma.h>
75
76#define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */
77
78struct s3c64xx_dma_buff;
79
80/** s3c64xx_dma_buff - S3C64XX DMA buffer descriptor
81 * @next: Pointer to next buffer in queue or ring.
82 * @pw: Client provided identifier
83 * @lli: Pointer to hardware descriptor this buffer is associated with.
84 * @lli_dma: Hardare address of the descriptor.
85 */
86struct s3c64xx_dma_buff {
87 struct s3c64xx_dma_buff *next;
88
89 void *pw;
90 struct pl080s_lli *lli;
91 dma_addr_t lli_dma;
92};
93
94struct s3c64xx_dmac;
95
96struct s3c2410_dma_chan {
97 unsigned char number; /* number of this dma channel */
98 unsigned char in_use; /* channel allocated */
99 unsigned char bit; /* bit for enable/disable/etc */
100 unsigned char hw_width;
101 unsigned char peripheral;
102
103 unsigned int flags;
104 enum dma_data_direction source;
105
106
107 dma_addr_t dev_addr;
108
109 struct s3c2410_dma_client *client;
110 struct s3c64xx_dmac *dmac; /* pointer to controller */
111
112 void __iomem *regs;
113
114 /* cdriver callbacks */
115 s3c2410_dma_cbfn_t callback_fn; /* buffer done callback */
116 s3c2410_dma_opfn_t op_fn; /* channel op callback */
117
118 /* buffer list and information */
119 struct s3c64xx_dma_buff *curr; /* current dma buffer */
120 struct s3c64xx_dma_buff *next; /* next buffer to load */
121 struct s3c64xx_dma_buff *end; /* end of queue */
122
123 /* note, when channel is running in circular mode, curr is the
124 * first buffer enqueued, end is the last and curr is where the
125 * last buffer-done event is set-at. The buffers are not freed
126 * and the last buffer hardware descriptor points back to the
127 * first.
128 */
129};
130
131#include <plat/dma-core.h>
132
133#else
134
135#define S3C64XX_DMA_CHAN(name) ((unsigned long)(name)) 14#define S3C64XX_DMA_CHAN(name) ((unsigned long)(name))
136 15
137/* DMA0/SDMA0 */ 16/* DMA0/SDMA0 */
@@ -189,6 +68,4 @@ static inline bool samsung_dma_is_dmadev(void)
189#include <linux/amba/pl08x.h> 68#include <linux/amba/pl08x.h>
190#include <plat/dma-ops.h> 69#include <plat/dma-ops.h>
191 70
192#endif
193
194#endif /* __ASM_ARCH_IRQ_H */ 71#endif /* __ASM_ARCH_IRQ_H */