aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2008-09-09 04:19:44 -0400
committerSascha Hauer <s.hauer@pengutronix.de>2008-09-09 06:13:56 -0400
commit58a85f465fb16a918a869eba05addb8f78d9e064 (patch)
tree00b9bb44f4c13502ebe9387ada8c672946fb15ae
parentb72f92ff551d9151c146c8d0f1c5c743611df982 (diff)
MX2: Add DMA support for mx2 and (eventually) mx1
This patch adds DMA support for Freescale i.MX27 SoCs. It is derived from the i.MX1 port and should (though currently untested) still be working for the i.MX1. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
-rw-r--r--arch/arm/plat-mxc/Makefile2
-rw-r--r--arch/arm/plat-mxc/dma-mx1-mx2.c840
-rw-r--r--arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h89
3 files changed, 930 insertions, 1 deletions
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 8c0f06e8fd3e..067556f7c91f 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -5,4 +5,4 @@
5# Common support 5# Common support
6obj-y := irq.o clock.o gpio.o time.o devices.o 6obj-y := irq.o clock.o gpio.o time.o devices.o
7 7
8obj-$(CONFIG_ARCH_MX2) += iomux-mx1-mx2.o 8obj-$(CONFIG_ARCH_MX2) += iomux-mx1-mx2.o dma-mx1-mx2.o
diff --git a/arch/arm/plat-mxc/dma-mx1-mx2.c b/arch/arm/plat-mxc/dma-mx1-mx2.c
new file mode 100644
index 000000000000..b296f19fd89a
--- /dev/null
+++ b/arch/arm/plat-mxc/dma-mx1-mx2.c
@@ -0,0 +1,840 @@
1/*
2 * linux/arch/arm/plat-mxc/dma-mx1-mx2.c
3 *
4 * i.MX DMA registration and IRQ dispatching
5 *
6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/errno.h>
30#include <linux/clk.h>
31#include <linux/scatterlist.h>
32#include <linux/io.h>
33
34#include <asm/system.h>
35#include <asm/irq.h>
36#include <mach/hardware.h>
37#include <asm/dma.h>
38#include <mach/dma-mx1-mx2.h>
39
40#define DMA_DCR 0x00 /* Control Register */
41#define DMA_DISR 0x04 /* Interrupt status Register */
42#define DMA_DIMR 0x08 /* Interrupt mask Register */
43#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
44#define DMA_DRTOSR 0x10 /* Request timeout Register */
45#define DMA_DSESR 0x14 /* Transfer Error Status Register */
46#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
47#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
48#define DMA_WSRA 0x40 /* W-Size Register A */
49#define DMA_XSRA 0x44 /* X-Size Register A */
50#define DMA_YSRA 0x48 /* Y-Size Register A */
51#define DMA_WSRB 0x4c /* W-Size Register B */
52#define DMA_XSRB 0x50 /* X-Size Register B */
53#define DMA_YSRB 0x54 /* Y-Size Register B */
54#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
55#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
56#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
57#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
58#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
59#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
60#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
61#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
62#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
63
64#define DCR_DRST (1<<1)
65#define DCR_DEN (1<<0)
66#define DBTOCR_EN (1<<15)
67#define DBTOCR_CNT(x) ((x) & 0x7fff)
68#define CNTR_CNT(x) ((x) & 0xffffff)
69#define CCR_ACRPT (1<<14)
70#define CCR_DMOD_LINEAR (0x0 << 12)
71#define CCR_DMOD_2D (0x1 << 12)
72#define CCR_DMOD_FIFO (0x2 << 12)
73#define CCR_DMOD_EOBFIFO (0x3 << 12)
74#define CCR_SMOD_LINEAR (0x0 << 10)
75#define CCR_SMOD_2D (0x1 << 10)
76#define CCR_SMOD_FIFO (0x2 << 10)
77#define CCR_SMOD_EOBFIFO (0x3 << 10)
78#define CCR_MDIR_DEC (1<<9)
79#define CCR_MSEL_B (1<<8)
80#define CCR_DSIZ_32 (0x0 << 6)
81#define CCR_DSIZ_8 (0x1 << 6)
82#define CCR_DSIZ_16 (0x2 << 6)
83#define CCR_SSIZ_32 (0x0 << 4)
84#define CCR_SSIZ_8 (0x1 << 4)
85#define CCR_SSIZ_16 (0x2 << 4)
86#define CCR_REN (1<<3)
87#define CCR_RPT (1<<2)
88#define CCR_FRC (1<<1)
89#define CCR_CEN (1<<0)
90#define RTOR_EN (1<<15)
91#define RTOR_CLK (1<<14)
92#define RTOR_PSC (1<<13)
93
94/*
95 * struct imx_dma_channel - i.MX specific DMA extension
96 * @name: name specified by DMA client
97 * @irq_handler: client callback for end of transfer
98 * @err_handler: client callback for error condition
99 * @data: clients context data for callbacks
100 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
101 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
102 * @resbytes: total residual number of bytes to transfer
103 * (it can be lower or same as sum of SG mapped chunk sizes)
104 * @sgcount: number of chunks to be read/written
105 *
106 * Structure is used for IMX DMA processing. It would be probably good
107 * @struct dma_struct in the future for external interfacing and use
108 * @struct imx_dma_channel only as extension to it.
109 */
110
111struct imx_dma_channel {
112 const char *name;
113 void (*irq_handler) (int, void *);
114 void (*err_handler) (int, void *, int errcode);
115 void (*prog_handler) (int, void *, struct scatterlist *);
116 void *data;
117 dmamode_t dma_mode;
118 struct scatterlist *sg;
119 unsigned int resbytes;
120 int dma_num;
121
122 int in_use;
123
124 u32 ccr_from_device;
125 u32 ccr_to_device;
126
127 struct timer_list watchdog;
128
129 int hw_chaining;
130};
131
132static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
133
134static struct clk *dma_clk;
135
136static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
137{
138 if (cpu_is_mx27())
139 return imxdma->hw_chaining;
140 else
141 return 0;
142}
143
144
145/*
146 * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
147 */
148static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
149{
150 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
151 unsigned long now;
152
153 if (!imxdma->name) {
154 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
155 __func__, channel);
156 return 0;
157 }
158
159 now = min(imxdma->resbytes, sg->length);
160 imxdma->resbytes -= now;
161
162 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
163 __raw_writel(sg->dma_address, DMA_BASE + DMA_DAR(channel));
164 else
165 __raw_writel(sg->dma_address, DMA_BASE + DMA_SAR(channel));
166
167 __raw_writel(now, DMA_BASE + DMA_CNTR(channel));
168
169 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
170 "size 0x%08x\n", channel,
171 __raw_readl(DMA_BASE + DMA_DAR(channel)),
172 __raw_readl(DMA_BASE + DMA_SAR(channel)),
173 __raw_readl(DMA_BASE + DMA_CNTR(channel)));
174
175 return now;
176}
177
178/**
179 * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from
180 * device transfer
181 *
182 * @channel: i.MX DMA channel number
183 * @dma_address: the DMA/physical memory address of the linear data block
184 * to transfer
185 * @dma_length: length of the data block in bytes
186 * @dev_addr: physical device port address
187 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
188 * or %DMA_MODE_WRITE from memory to the device
189 *
190 * Return value: if incorrect parameters are provided -%EINVAL.
191 * Zero indicates success.
192 */
193int
194imx_dma_setup_single(int channel, dma_addr_t dma_address,
195 unsigned int dma_length, unsigned int dev_addr,
196 dmamode_t dmamode)
197{
198 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
199
200 imxdma->sg = NULL;
201 imxdma->dma_mode = dmamode;
202
203 if (!dma_address) {
204 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
205 channel);
206 return -EINVAL;
207 }
208
209 if (!dma_length) {
210 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
211 channel);
212 return -EINVAL;
213 }
214
215 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
216 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
217 "dev_addr=0x%08x for read\n",
218 channel, __func__, (unsigned int)dma_address,
219 dma_length, dev_addr);
220
221 __raw_writel(dev_addr, DMA_BASE + DMA_SAR(channel));
222 __raw_writel(dma_address, DMA_BASE + DMA_DAR(channel));
223 __raw_writel(imxdma->ccr_from_device,
224 DMA_BASE + DMA_CCR(channel));
225 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
226 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
227 "dev_addr=0x%08x for write\n",
228 channel, __func__, (unsigned int)dma_address,
229 dma_length, dev_addr);
230
231 __raw_writel(dma_address, DMA_BASE + DMA_SAR(channel));
232 __raw_writel(dev_addr, DMA_BASE + DMA_DAR(channel));
233 __raw_writel(imxdma->ccr_to_device,
234 DMA_BASE + DMA_CCR(channel));
235 } else {
236 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
237 channel);
238 return -EINVAL;
239 }
240
241 __raw_writel(dma_length, DMA_BASE + DMA_CNTR(channel));
242
243 return 0;
244}
245EXPORT_SYMBOL(imx_dma_setup_single);
246
247/**
248 * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
249 * @channel: i.MX DMA channel number
250 * @sg: pointer to the scatter-gather list/vector
251 * @sgcount: scatter-gather list hungs count
252 * @dma_length: total length of the transfer request in bytes
253 * @dev_addr: physical device port address
254 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
255 * or %DMA_MODE_WRITE from memory to the device
256 *
257 * The function sets up DMA channel state and registers to be ready for
258 * transfer specified by provided parameters. The scatter-gather emulation
259 * is set up according to the parameters.
260 *
261 * The full preparation of the transfer requires setup of more register
262 * by the caller before imx_dma_enable() can be called.
263 *
264 * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes
265 *
266 * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx
267 *
268 * %CCR(channel) has to specify transfer parameters, the next settings is
269 * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is
270 * specified
271 *
272 * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
273 *
274 * The typical setup for %DMA_MODE_WRITE is specified by next options
275 * combination
276 *
277 * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
278 *
279 * Be careful here and do not mistakenly mix source and target device
280 * port sizes constants, they are really different:
281 * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
282 * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
283 *
284 * Return value: if incorrect parameters are provided -%EINVAL.
285 * Zero indicates success.
286 */
287int
288imx_dma_setup_sg(int channel,
289 struct scatterlist *sg, unsigned int sgcount,
290 unsigned int dma_length, unsigned int dev_addr,
291 dmamode_t dmamode)
292{
293 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
294
295 if (imxdma->in_use)
296 return -EBUSY;
297
298 imxdma->sg = sg;
299 imxdma->dma_mode = dmamode;
300 imxdma->resbytes = dma_length;
301
302 if (!sg || !sgcount) {
303 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
304 channel);
305 return -EINVAL;
306 }
307
308 if (!sg->length) {
309 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
310 channel);
311 return -EINVAL;
312 }
313
314 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
315 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
316 "dev_addr=0x%08x for read\n",
317 channel, __func__, sg, sgcount, dma_length, dev_addr);
318
319 __raw_writel(dev_addr, DMA_BASE + DMA_SAR(channel));
320 __raw_writel(imxdma->ccr_from_device,
321 DMA_BASE + DMA_CCR(channel));
322 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
323 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
324 "dev_addr=0x%08x for write\n",
325 channel, __func__, sg, sgcount, dma_length, dev_addr);
326
327 __raw_writel(dev_addr, DMA_BASE + DMA_DAR(channel));
328 __raw_writel(imxdma->ccr_to_device,
329 DMA_BASE + DMA_CCR(channel));
330 } else {
331 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
332 channel);
333 return -EINVAL;
334 }
335
336 imx_dma_sg_next(channel, sg);
337
338 return 0;
339}
340EXPORT_SYMBOL(imx_dma_setup_sg);
341
342int
343imx_dma_config_channel(int channel, unsigned int config_port,
344 unsigned int config_mem, unsigned int dmareq, int hw_chaining)
345{
346 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
347 u32 dreq = 0;
348
349 imxdma->hw_chaining = 0;
350
351 if (hw_chaining) {
352 imxdma->hw_chaining = 1;
353 if (!imx_dma_hw_chain(imxdma))
354 return -EINVAL;
355 }
356
357 if (dmareq)
358 dreq = CCR_REN;
359
360 imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
361 imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
362
363 __raw_writel(dmareq, DMA_BASE + DMA_RSSR(channel));
364
365 return 0;
366}
367EXPORT_SYMBOL(imx_dma_config_channel);
368
369void imx_dma_config_burstlen(int channel, unsigned int burstlen)
370{
371 __raw_writel(burstlen, DMA_BASE + DMA_BLR(channel));
372}
373EXPORT_SYMBOL(imx_dma_config_burstlen);
374
375/**
376 * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification
377 * handlers
378 * @channel: i.MX DMA channel number
379 * @irq_handler: the pointer to the function called if the transfer
380 * ends successfully
381 * @err_handler: the pointer to the function called if the premature
382 * end caused by error occurs
383 * @data: user specified value to be passed to the handlers
384 */
385int
386imx_dma_setup_handlers(int channel,
387 void (*irq_handler) (int, void *),
388 void (*err_handler) (int, void *, int),
389 void *data)
390{
391 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
392 unsigned long flags;
393
394 if (!imxdma->name) {
395 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
396 __func__, channel);
397 return -ENODEV;
398 }
399
400 local_irq_save(flags);
401 __raw_writel(1 << channel, DMA_BASE + DMA_DISR);
402 imxdma->irq_handler = irq_handler;
403 imxdma->err_handler = err_handler;
404 imxdma->data = data;
405 local_irq_restore(flags);
406 return 0;
407}
408EXPORT_SYMBOL(imx_dma_setup_handlers);
409
410/**
411 * imx_dma_setup_progression_handler - setup i.MX DMA channel progression
412 * handlers
413 * @channel: i.MX DMA channel number
414 * @prog_handler: the pointer to the function called if the transfer progresses
415 */
416int
417imx_dma_setup_progression_handler(int channel,
418 void (*prog_handler) (int, void*, struct scatterlist*))
419{
420 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
421 unsigned long flags;
422
423 if (!imxdma->name) {
424 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
425 __func__, channel);
426 return -ENODEV;
427 }
428
429 local_irq_save(flags);
430 imxdma->prog_handler = prog_handler;
431 local_irq_restore(flags);
432 return 0;
433}
434EXPORT_SYMBOL(imx_dma_setup_progression_handler);
435
436/**
437 * imx_dma_enable - function to start i.MX DMA channel operation
438 * @channel: i.MX DMA channel number
439 *
440 * The channel has to be allocated by driver through imx_dma_request()
441 * or imx_dma_request_by_prio() function.
442 * The transfer parameters has to be set to the channel registers through
443 * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
444 * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to
445 * be set prior this function call by the channel user.
446 */
447void imx_dma_enable(int channel)
448{
449 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
450 unsigned long flags;
451
452 pr_debug("imxdma%d: imx_dma_enable\n", channel);
453
454 if (!imxdma->name) {
455 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
456 __func__, channel);
457 return;
458 }
459
460 if (imxdma->in_use)
461 return;
462
463 local_irq_save(flags);
464
465 __raw_writel(1 << channel, DMA_BASE + DMA_DISR);
466 __raw_writel(__raw_readl(DMA_BASE + DMA_DIMR) & ~(1 << channel),
467 DMA_BASE + DMA_DIMR);
468 __raw_writel(__raw_readl(DMA_BASE + DMA_CCR(channel)) | CCR_CEN |
469 CCR_ACRPT,
470 DMA_BASE + DMA_CCR(channel));
471
472#ifdef CONFIG_ARCH_MX2
473 if (imxdma->sg && imx_dma_hw_chain(imxdma)) {
474 imxdma->sg = sg_next(imxdma->sg);
475 if (imxdma->sg) {
476 u32 tmp;
477 imx_dma_sg_next(channel, imxdma->sg);
478 tmp = __raw_readl(DMA_BASE + DMA_CCR(channel));
479 __raw_writel(tmp | CCR_RPT | CCR_ACRPT,
480 DMA_BASE + DMA_CCR(channel));
481 }
482 }
483#endif
484 imxdma->in_use = 1;
485
486 local_irq_restore(flags);
487}
488EXPORT_SYMBOL(imx_dma_enable);
489
490/**
491 * imx_dma_disable - stop, finish i.MX DMA channel operatin
492 * @channel: i.MX DMA channel number
493 */
494void imx_dma_disable(int channel)
495{
496 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
497 unsigned long flags;
498
499 pr_debug("imxdma%d: imx_dma_disable\n", channel);
500
501 if (imx_dma_hw_chain(imxdma))
502 del_timer(&imxdma->watchdog);
503
504 local_irq_save(flags);
505 __raw_writel(__raw_readl(DMA_BASE + DMA_DIMR) | (1 << channel),
506 DMA_BASE + DMA_DIMR);
507 __raw_writel(__raw_readl(DMA_BASE + DMA_CCR(channel)) & ~CCR_CEN,
508 DMA_BASE + DMA_CCR(channel));
509 __raw_writel(1 << channel, DMA_BASE + DMA_DISR);
510 imxdma->in_use = 0;
511 local_irq_restore(flags);
512}
513EXPORT_SYMBOL(imx_dma_disable);
514
515static void imx_dma_watchdog(unsigned long chno)
516{
517 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
518
519 __raw_writel(0, DMA_BASE + DMA_CCR(chno));
520 imxdma->in_use = 0;
521 imxdma->sg = NULL;
522
523 if (imxdma->err_handler)
524 imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
525}
526
527static irqreturn_t dma_err_handler(int irq, void *dev_id)
528{
529 int i, disr;
530 struct imx_dma_channel *imxdma;
531 unsigned int err_mask;
532 int errcode;
533
534 disr = __raw_readl(DMA_BASE + DMA_DISR);
535
536 err_mask = __raw_readl(DMA_BASE + DMA_DBTOSR) |
537 __raw_readl(DMA_BASE + DMA_DRTOSR) |
538 __raw_readl(DMA_BASE + DMA_DSESR) |
539 __raw_readl(DMA_BASE + DMA_DBOSR);
540
541 if (!err_mask)
542 return IRQ_HANDLED;
543
544 __raw_writel(disr & err_mask, DMA_BASE + DMA_DISR);
545
546 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
547 if (!(err_mask & (1 << i)))
548 continue;
549 imxdma = &imx_dma_channels[i];
550 errcode = 0;
551
552 if (__raw_readl(DMA_BASE + DMA_DBTOSR) & (1 << i)) {
553 __raw_writel(1 << i, DMA_BASE + DMA_DBTOSR);
554 errcode |= IMX_DMA_ERR_BURST;
555 }
556 if (__raw_readl(DMA_BASE + DMA_DRTOSR) & (1 << i)) {
557 __raw_writel(1 << i, DMA_BASE + DMA_DRTOSR);
558 errcode |= IMX_DMA_ERR_REQUEST;
559 }
560 if (__raw_readl(DMA_BASE + DMA_DSESR) & (1 << i)) {
561 __raw_writel(1 << i, DMA_BASE + DMA_DSESR);
562 errcode |= IMX_DMA_ERR_TRANSFER;
563 }
564 if (__raw_readl(DMA_BASE + DMA_DBOSR) & (1 << i)) {
565 __raw_writel(1 << i, DMA_BASE + DMA_DBOSR);
566 errcode |= IMX_DMA_ERR_BUFFER;
567 }
568 if (imxdma->name && imxdma->err_handler) {
569 imxdma->err_handler(i, imxdma->data, errcode);
570 continue;
571 }
572
573 imx_dma_channels[i].sg = NULL;
574
575 printk(KERN_WARNING
576 "DMA timeout on channel %d (%s) -%s%s%s%s\n",
577 i, imxdma->name,
578 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
579 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
580 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
581 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
582 }
583 return IRQ_HANDLED;
584}
585
586static void dma_irq_handle_channel(int chno)
587{
588 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
589
590 if (!imxdma->name) {
591 /*
592 * IRQ for an unregistered DMA channel:
593 * let's clear the interrupts and disable it.
594 */
595 printk(KERN_WARNING
596 "spurious IRQ for DMA channel %d\n", chno);
597 return;
598 }
599
600 if (imxdma->sg) {
601 u32 tmp;
602 struct scatterlist *current_sg = imxdma->sg;
603 imxdma->sg = sg_next(imxdma->sg);
604
605 if (imxdma->sg) {
606 imx_dma_sg_next(chno, imxdma->sg);
607
608 tmp = __raw_readl(DMA_BASE + DMA_CCR(chno));
609
610 if (imx_dma_hw_chain(imxdma)) {
611 /* FIXME: The timeout should probably be
612 * configurable
613 */
614 mod_timer(&imxdma->watchdog,
615 jiffies + msecs_to_jiffies(500));
616
617 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
618 __raw_writel(tmp, DMA_BASE +
619 DMA_CCR(chno));
620 } else {
621 __raw_writel(tmp & ~CCR_CEN, DMA_BASE +
622 DMA_CCR(chno));
623 tmp |= CCR_CEN;
624 }
625
626 __raw_writel(tmp, DMA_BASE + DMA_CCR(chno));
627
628 if (imxdma->prog_handler)
629 imxdma->prog_handler(chno, imxdma->data,
630 current_sg);
631
632 return;
633 }
634
635 if (imx_dma_hw_chain(imxdma)) {
636 del_timer(&imxdma->watchdog);
637 return;
638 }
639 }
640
641 __raw_writel(0, DMA_BASE + DMA_CCR(chno));
642 imxdma->in_use = 0;
643 if (imxdma->irq_handler)
644 imxdma->irq_handler(chno, imxdma->data);
645}
646
647static irqreturn_t dma_irq_handler(int irq, void *dev_id)
648{
649 int i, disr;
650
651#ifdef CONFIG_ARCH_MX2
652 dma_err_handler(irq, dev_id);
653#endif
654
655 disr = __raw_readl(DMA_BASE + DMA_DISR);
656
657 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
658 disr);
659
660 __raw_writel(disr, DMA_BASE + DMA_DISR);
661 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
662 if (disr & (1 << i))
663 dma_irq_handle_channel(i);
664 }
665
666 return IRQ_HANDLED;
667}
668
669/**
670 * imx_dma_request - request/allocate specified channel number
671 * @channel: i.MX DMA channel number
672 * @name: the driver/caller own non-%NULL identification
673 */
674int imx_dma_request(int channel, const char *name)
675{
676 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
677 unsigned long flags;
678 int ret;
679
680 /* basic sanity checks */
681 if (!name)
682 return -EINVAL;
683
684 if (channel >= IMX_DMA_CHANNELS) {
685 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
686 __func__, channel);
687 return -EINVAL;
688 }
689
690 local_irq_save(flags);
691 if (imxdma->name) {
692 local_irq_restore(flags);
693 return -EBUSY;
694 }
695
696#ifdef CONFIG_ARCH_MX2
697 ret = request_irq(MXC_INT_DMACH0 + channel, dma_irq_handler, 0, "DMA",
698 NULL);
699 if (ret) {
700 printk(KERN_CRIT "Can't register IRQ %d for DMA channel %d\n",
701 MXC_INT_DMACH0 + channel, channel);
702 return ret;
703 }
704 init_timer(&imxdma->watchdog);
705 imxdma->watchdog.function = &imx_dma_watchdog;
706 imxdma->watchdog.data = channel;
707#endif
708
709 imxdma->name = name;
710 imxdma->irq_handler = NULL;
711 imxdma->err_handler = NULL;
712 imxdma->data = NULL;
713 imxdma->sg = NULL;
714
715 local_irq_restore(flags);
716 return 0;
717}
718EXPORT_SYMBOL(imx_dma_request);
719
720/**
721 * imx_dma_free - release previously acquired channel
722 * @channel: i.MX DMA channel number
723 */
724void imx_dma_free(int channel)
725{
726 unsigned long flags;
727 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
728
729 if (!imxdma->name) {
730 printk(KERN_CRIT
731 "%s: trying to free free channel %d\n",
732 __func__, channel);
733 return;
734 }
735
736 local_irq_save(flags);
737 /* Disable interrupts */
738 __raw_writel(__raw_readl(DMA_BASE + DMA_DIMR) | (1 << channel),
739 DMA_BASE + DMA_DIMR);
740 __raw_writel(__raw_readl(DMA_BASE + DMA_CCR(channel)) & ~CCR_CEN,
741 DMA_BASE + DMA_CCR(channel));
742 imxdma->name = NULL;
743
744#ifdef CONFIG_ARCH_MX2
745 free_irq(MXC_INT_DMACH0 + channel, NULL);
746#endif
747
748 local_irq_restore(flags);
749}
750EXPORT_SYMBOL(imx_dma_free);
751
752/**
753 * imx_dma_request_by_prio - find and request some of free channels best
754 * suiting requested priority
755 * @channel: i.MX DMA channel number
756 * @name: the driver/caller own non-%NULL identification
757 *
758 * This function tries to find a free channel in the specified priority group
759 * This function tries to find a free channel in the specified priority group
760 * if the priority cannot be achieved it tries to look for free channel
761 * in the higher and then even lower priority groups.
762 *
763 * Return value: If there is no free channel to allocate, -%ENODEV is returned.
764 * On successful allocation channel is returned.
765 */
766int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
767{
768 int i;
769 int best;
770
771 switch (prio) {
772 case (DMA_PRIO_HIGH):
773 best = 8;
774 break;
775 case (DMA_PRIO_MEDIUM):
776 best = 4;
777 break;
778 case (DMA_PRIO_LOW):
779 default:
780 best = 0;
781 break;
782 }
783
784 for (i = best; i < IMX_DMA_CHANNELS; i++)
785 if (!imx_dma_request(i, name))
786 return i;
787
788 for (i = best - 1; i >= 0; i--)
789 if (!imx_dma_request(i, name))
790 return i;
791
792 printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
793
794 return -ENODEV;
795}
796EXPORT_SYMBOL(imx_dma_request_by_prio);
797
798static int __init imx_dma_init(void)
799{
800 int ret = 0;
801 int i;
802
803 dma_clk = clk_get(NULL, "dma_clk");
804 clk_enable(dma_clk);
805
806 /* reset DMA module */
807 __raw_writel(DCR_DRST, DMA_BASE + DMA_DCR);
808
809#ifdef CONFIG_ARCH_MX1
810 ret = request_irq(DMA_INT, dma_irq_handler, 0, "DMA", NULL);
811 if (ret) {
812 printk(KERN_CRIT "Wow! Can't register IRQ for DMA\n");
813 return ret;
814 }
815
816 ret = request_irq(DMA_ERR, dma_err_handler, 0, "DMA", NULL);
817 if (ret) {
818 printk(KERN_CRIT "Wow! Can't register ERRIRQ for DMA\n");
819 free_irq(DMA_INT, NULL);
820 return ret;
821 }
822#endif
823 /* enable DMA module */
824 __raw_writel(DCR_DEN, DMA_BASE + DMA_DCR);
825
826 /* clear all interrupts */
827 __raw_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_BASE + DMA_DISR);
828
829 /* disable interrupts */
830 __raw_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_BASE + DMA_DIMR);
831
832 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
833 imx_dma_channels[i].sg = NULL;
834 imx_dma_channels[i].dma_num = i;
835 }
836
837 return ret;
838}
839
840arch_initcall(imx_dma_init);
diff --git a/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h b/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h
new file mode 100644
index 000000000000..e85fd946116c
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h
@@ -0,0 +1,89 @@
1/*
2 * linux/arch/arm/plat-mxc/include/mach/dma-mx1-mx2.h
3 *
4 * i.MX DMA registration and IRQ dispatching
5 *
6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25#include <asm/dma.h>
26
27#ifndef __ASM_ARCH_MXC_DMA_H
28#define __ASM_ARCH_MXC_DMA_H
29
30#define IMX_DMA_CHANNELS 16
31
32#define DMA_BASE IO_ADDRESS(DMA_BASE_ADDR)
33
34#define IMX_DMA_MEMSIZE_32 (0 << 4)
35#define IMX_DMA_MEMSIZE_8 (1 << 4)
36#define IMX_DMA_MEMSIZE_16 (2 << 4)
37#define IMX_DMA_TYPE_LINEAR (0 << 10)
38#define IMX_DMA_TYPE_2D (1 << 10)
39#define IMX_DMA_TYPE_FIFO (2 << 10)
40
41#define IMX_DMA_ERR_BURST (1 << 0)
42#define IMX_DMA_ERR_REQUEST (1 << 1)
43#define IMX_DMA_ERR_TRANSFER (1 << 2)
44#define IMX_DMA_ERR_BUFFER (1 << 3)
45#define IMX_DMA_ERR_TIMEOUT (1 << 4)
46
47int
48imx_dma_config_channel(int channel, unsigned int config_port,
49 unsigned int config_mem, unsigned int dmareq, int hw_chaining);
50
51void
52imx_dma_config_burstlen(int channel, unsigned int burstlen);
53
54int
55imx_dma_setup_single(int channel, dma_addr_t dma_address,
56 unsigned int dma_length, unsigned int dev_addr,
57 dmamode_t dmamode);
58
59int
60imx_dma_setup_sg(int channel, struct scatterlist *sg,
61 unsigned int sgcount, unsigned int dma_length,
62 unsigned int dev_addr, dmamode_t dmamode);
63
64int
65imx_dma_setup_handlers(int channel,
66 void (*irq_handler) (int, void *),
67 void (*err_handler) (int, void *, int), void *data);
68
69int
70imx_dma_setup_progression_handler(int channel,
71 void (*prog_handler) (int, void*, struct scatterlist*));
72
73void imx_dma_enable(int channel);
74
75void imx_dma_disable(int channel);
76
77int imx_dma_request(int channel, const char *name);
78
79void imx_dma_free(int channel);
80
81enum imx_dma_prio {
82 DMA_PRIO_HIGH = 0,
83 DMA_PRIO_MEDIUM = 1,
84 DMA_PRIO_LOW = 2
85};
86
87int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
88
89#endif /* _ASM_ARCH_MXC_DMA_H */