aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-imx
diff options
context:
space:
mode:
authorUwe Kleine-König <u.kleine-koenig@pengutronix.de>2010-06-14 11:56:52 -0400
committerUwe Kleine-König <u.kleine-koenig@pengutronix.de>2010-06-30 02:59:58 -0400
commit9f72ffedc8409b9c9cbe17a9f66c2982baa4ff52 (patch)
treef996492cf0eebe5bb6f164776db25865c7ab8991 /arch/arm/mach-imx
parente780d2392dd37fcc231d97400c1cdd8d261ed556 (diff)
ARM: imx: new Kconfig symbol and feature test macro for DMA on mx1 and mx2
This should be used instead of hard coding the corresponding platforms. The feature test macro is needed to support different SOCs in a single kernel image. While at it rename dma-mx1-mx2 to dma-v1 as mx25 doesn't use it and so the mx2 part is wrong and move the header to arch/arm/mach-imx. Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Diffstat (limited to 'arch/arm/mach-imx')
-rw-r--r--arch/arm/mach-imx/Kconfig6
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/dma-v1.c863
-rw-r--r--arch/arm/mach-imx/include/mach/dma-mx1-mx2.h10
-rw-r--r--arch/arm/mach-imx/include/mach/dma-v1.h107
5 files changed, 988 insertions, 0 deletions
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 5edead235bd1..73c70af35fc8 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -1,7 +1,11 @@
1config IMX_HAVE_DMA_V1
2 bool
3
1if ARCH_MX1 4if ARCH_MX1
2 5
3config SOC_IMX1 6config SOC_IMX1
4 select CPU_ARM920T 7 select CPU_ARM920T
8 select IMX_HAVE_DMA_V1
5 select IMX_HAVE_IOMUX_V1 9 select IMX_HAVE_IOMUX_V1
6 bool 10 bool
7 11
@@ -27,12 +31,14 @@ if ARCH_MX2
27config SOC_IMX21 31config SOC_IMX21
28 select CPU_ARM926T 32 select CPU_ARM926T
29 select ARCH_MXC_AUDMUX_V1 33 select ARCH_MXC_AUDMUX_V1
34 select IMX_HAVE_DMA_V1
30 select IMX_HAVE_IOMUX_V1 35 select IMX_HAVE_IOMUX_V1
31 bool 36 bool
32 37
33config SOC_IMX27 38config SOC_IMX27
34 select CPU_ARM926T 39 select CPU_ARM926T
35 select ARCH_MXC_AUDMUX_V1 40 select ARCH_MXC_AUDMUX_V1
41 select IMX_HAVE_DMA_V1
36 select IMX_HAVE_IOMUX_V1 42 select IMX_HAVE_IOMUX_V1
37 bool 43 bool
38 44
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index e56a1191c9df..86b53e6bc94e 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -6,6 +6,8 @@
6 6
7obj-y := devices.o 7obj-y := devices.o
8 8
9obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o
10
9obj-$(CONFIG_ARCH_MX1) += clock-imx1.o mm-imx1.o 11obj-$(CONFIG_ARCH_MX1) += clock-imx1.o mm-imx1.o
10obj-$(CONFIG_MACH_MX21) += clock-imx21.o mm-imx21.o 12obj-$(CONFIG_MACH_MX21) += clock-imx21.o mm-imx21.o
11 13
diff --git a/arch/arm/mach-imx/dma-v1.c b/arch/arm/mach-imx/dma-v1.c
new file mode 100644
index 000000000000..fd1d9197d06e
--- /dev/null
+++ b/arch/arm/mach-imx/dma-v1.c
@@ -0,0 +1,863 @@
1/*
2 * linux/arch/arm/plat-mxc/dma-v1.c
3 *
4 * i.MX DMA registration and IRQ dispatching
5 *
6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/interrupt.h>
29#include <linux/errno.h>
30#include <linux/clk.h>
31#include <linux/scatterlist.h>
32#include <linux/io.h>
33
34#include <asm/system.h>
35#include <asm/irq.h>
36#include <mach/hardware.h>
37#include <mach/dma-v1.h>
38
39#define DMA_DCR 0x00 /* Control Register */
40#define DMA_DISR 0x04 /* Interrupt status Register */
41#define DMA_DIMR 0x08 /* Interrupt mask Register */
42#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
43#define DMA_DRTOSR 0x10 /* Request timeout Register */
44#define DMA_DSESR 0x14 /* Transfer Error Status Register */
45#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
46#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
47#define DMA_WSRA 0x40 /* W-Size Register A */
48#define DMA_XSRA 0x44 /* X-Size Register A */
49#define DMA_YSRA 0x48 /* Y-Size Register A */
50#define DMA_WSRB 0x4c /* W-Size Register B */
51#define DMA_XSRB 0x50 /* X-Size Register B */
52#define DMA_YSRB 0x54 /* Y-Size Register B */
53#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
54#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
55#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
56#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
57#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
58#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
59#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
60#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
61#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
62
63#define DCR_DRST (1<<1)
64#define DCR_DEN (1<<0)
65#define DBTOCR_EN (1<<15)
66#define DBTOCR_CNT(x) ((x) & 0x7fff)
67#define CNTR_CNT(x) ((x) & 0xffffff)
68#define CCR_ACRPT (1<<14)
69#define CCR_DMOD_LINEAR (0x0 << 12)
70#define CCR_DMOD_2D (0x1 << 12)
71#define CCR_DMOD_FIFO (0x2 << 12)
72#define CCR_DMOD_EOBFIFO (0x3 << 12)
73#define CCR_SMOD_LINEAR (0x0 << 10)
74#define CCR_SMOD_2D (0x1 << 10)
75#define CCR_SMOD_FIFO (0x2 << 10)
76#define CCR_SMOD_EOBFIFO (0x3 << 10)
77#define CCR_MDIR_DEC (1<<9)
78#define CCR_MSEL_B (1<<8)
79#define CCR_DSIZ_32 (0x0 << 6)
80#define CCR_DSIZ_8 (0x1 << 6)
81#define CCR_DSIZ_16 (0x2 << 6)
82#define CCR_SSIZ_32 (0x0 << 4)
83#define CCR_SSIZ_8 (0x1 << 4)
84#define CCR_SSIZ_16 (0x2 << 4)
85#define CCR_REN (1<<3)
86#define CCR_RPT (1<<2)
87#define CCR_FRC (1<<1)
88#define CCR_CEN (1<<0)
89#define RTOR_EN (1<<15)
90#define RTOR_CLK (1<<14)
91#define RTOR_PSC (1<<13)
92
93/*
94 * struct imx_dma_channel - i.MX specific DMA extension
95 * @name: name specified by DMA client
96 * @irq_handler: client callback for end of transfer
97 * @err_handler: client callback for error condition
98 * @data: clients context data for callbacks
99 * @dma_mode: direction of the transfer %DMA_MODE_READ or %DMA_MODE_WRITE
100 * @sg: pointer to the actual read/written chunk for scatter-gather emulation
101 * @resbytes: total residual number of bytes to transfer
102 * (it can be lower or same as sum of SG mapped chunk sizes)
103 * @sgcount: number of chunks to be read/written
104 *
105 * Structure is used for IMX DMA processing. It would be probably good
106 * @struct dma_struct in the future for external interfacing and use
107 * @struct imx_dma_channel only as extension to it.
108 */
109
110struct imx_dma_channel {
111 const char *name;
112 void (*irq_handler) (int, void *);
113 void (*err_handler) (int, void *, int errcode);
114 void (*prog_handler) (int, void *, struct scatterlist *);
115 void *data;
116 unsigned int dma_mode;
117 struct scatterlist *sg;
118 unsigned int resbytes;
119 int dma_num;
120
121 int in_use;
122
123 u32 ccr_from_device;
124 u32 ccr_to_device;
125
126 struct timer_list watchdog;
127
128 int hw_chaining;
129};
130
131static void __iomem *imx_dmav1_baseaddr;
132
133static void imx_dmav1_writel(unsigned val, unsigned offset)
134{
135 __raw_writel(val, imx_dmav1_baseaddr + offset);
136}
137
138static unsigned imx_dmav1_readl(unsigned offset)
139{
140 return __raw_readl(imx_dmav1_baseaddr + offset);
141}
142
143static struct imx_dma_channel imx_dma_channels[IMX_DMA_CHANNELS];
144
145static struct clk *dma_clk;
146
147static int imx_dma_hw_chain(struct imx_dma_channel *imxdma)
148{
149 if (cpu_is_mx27())
150 return imxdma->hw_chaining;
151 else
152 return 0;
153}
154
155/*
156 * imx_dma_sg_next - prepare next chunk for scatter-gather DMA emulation
157 */
158static inline int imx_dma_sg_next(int channel, struct scatterlist *sg)
159{
160 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
161 unsigned long now;
162
163 if (!imxdma->name) {
164 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
165 __func__, channel);
166 return 0;
167 }
168
169 now = min(imxdma->resbytes, sg->length);
170 if (imxdma->resbytes != IMX_DMA_LENGTH_LOOP)
171 imxdma->resbytes -= now;
172
173 if ((imxdma->dma_mode & DMA_MODE_MASK) == DMA_MODE_READ)
174 imx_dmav1_writel(sg->dma_address, DMA_DAR(channel));
175 else
176 imx_dmav1_writel(sg->dma_address, DMA_SAR(channel));
177
178 imx_dmav1_writel(now, DMA_CNTR(channel));
179
180 pr_debug("imxdma%d: next sg chunk dst 0x%08x, src 0x%08x, "
181 "size 0x%08x\n", channel,
182 imx_dmav1_readl(DMA_DAR(channel)),
183 imx_dmav1_readl(DMA_SAR(channel)),
184 imx_dmav1_readl(DMA_CNTR(channel)));
185
186 return now;
187}
188
189/**
190 * imx_dma_setup_single - setup i.MX DMA channel for linear memory to/from
191 * device transfer
192 *
193 * @channel: i.MX DMA channel number
194 * @dma_address: the DMA/physical memory address of the linear data block
195 * to transfer
196 * @dma_length: length of the data block in bytes
197 * @dev_addr: physical device port address
198 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
199 * or %DMA_MODE_WRITE from memory to the device
200 *
201 * Return value: if incorrect parameters are provided -%EINVAL.
202 * Zero indicates success.
203 */
204int
205imx_dma_setup_single(int channel, dma_addr_t dma_address,
206 unsigned int dma_length, unsigned int dev_addr,
207 unsigned int dmamode)
208{
209 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
210
211 imxdma->sg = NULL;
212 imxdma->dma_mode = dmamode;
213
214 if (!dma_address) {
215 printk(KERN_ERR "imxdma%d: imx_dma_setup_single null address\n",
216 channel);
217 return -EINVAL;
218 }
219
220 if (!dma_length) {
221 printk(KERN_ERR "imxdma%d: imx_dma_setup_single zero length\n",
222 channel);
223 return -EINVAL;
224 }
225
226 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
227 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
228 "dev_addr=0x%08x for read\n",
229 channel, __func__, (unsigned int)dma_address,
230 dma_length, dev_addr);
231
232 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
233 imx_dmav1_writel(dma_address, DMA_DAR(channel));
234 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
235 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
236 pr_debug("imxdma%d: %s dma_addressg=0x%08x dma_length=%d "
237 "dev_addr=0x%08x for write\n",
238 channel, __func__, (unsigned int)dma_address,
239 dma_length, dev_addr);
240
241 imx_dmav1_writel(dma_address, DMA_SAR(channel));
242 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
243 imx_dmav1_writel(imxdma->ccr_to_device,
244 DMA_CCR(channel));
245 } else {
246 printk(KERN_ERR "imxdma%d: imx_dma_setup_single bad dmamode\n",
247 channel);
248 return -EINVAL;
249 }
250
251 imx_dmav1_writel(dma_length, DMA_CNTR(channel));
252
253 return 0;
254}
255EXPORT_SYMBOL(imx_dma_setup_single);
256
257/**
258 * imx_dma_setup_sg - setup i.MX DMA channel SG list to/from device transfer
259 * @channel: i.MX DMA channel number
260 * @sg: pointer to the scatter-gather list/vector
261 * @sgcount: scatter-gather list hungs count
262 * @dma_length: total length of the transfer request in bytes
263 * @dev_addr: physical device port address
264 * @dmamode: DMA transfer mode, %DMA_MODE_READ from the device to the memory
265 * or %DMA_MODE_WRITE from memory to the device
266 *
267 * The function sets up DMA channel state and registers to be ready for
268 * transfer specified by provided parameters. The scatter-gather emulation
269 * is set up according to the parameters.
270 *
271 * The full preparation of the transfer requires setup of more register
272 * by the caller before imx_dma_enable() can be called.
273 *
274 * %BLR(channel) holds transfer burst length in bytes, 0 means 64 bytes
275 *
276 * %RSSR(channel) has to be set to the DMA request line source %DMA_REQ_xxx
277 *
278 * %CCR(channel) has to specify transfer parameters, the next settings is
279 * typical for linear or simple scatter-gather transfers if %DMA_MODE_READ is
280 * specified
281 *
282 * %CCR_DMOD_LINEAR | %CCR_DSIZ_32 | %CCR_SMOD_FIFO | %CCR_SSIZ_x
283 *
284 * The typical setup for %DMA_MODE_WRITE is specified by next options
285 * combination
286 *
287 * %CCR_SMOD_LINEAR | %CCR_SSIZ_32 | %CCR_DMOD_FIFO | %CCR_DSIZ_x
288 *
289 * Be careful here and do not mistakenly mix source and target device
290 * port sizes constants, they are really different:
291 * %CCR_SSIZ_8, %CCR_SSIZ_16, %CCR_SSIZ_32,
292 * %CCR_DSIZ_8, %CCR_DSIZ_16, %CCR_DSIZ_32
293 *
294 * Return value: if incorrect parameters are provided -%EINVAL.
295 * Zero indicates success.
296 */
297int
298imx_dma_setup_sg(int channel,
299 struct scatterlist *sg, unsigned int sgcount,
300 unsigned int dma_length, unsigned int dev_addr,
301 unsigned int dmamode)
302{
303 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
304
305 if (imxdma->in_use)
306 return -EBUSY;
307
308 imxdma->sg = sg;
309 imxdma->dma_mode = dmamode;
310 imxdma->resbytes = dma_length;
311
312 if (!sg || !sgcount) {
313 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg epty sg list\n",
314 channel);
315 return -EINVAL;
316 }
317
318 if (!sg->length) {
319 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg zero length\n",
320 channel);
321 return -EINVAL;
322 }
323
324 if ((dmamode & DMA_MODE_MASK) == DMA_MODE_READ) {
325 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
326 "dev_addr=0x%08x for read\n",
327 channel, __func__, sg, sgcount, dma_length, dev_addr);
328
329 imx_dmav1_writel(dev_addr, DMA_SAR(channel));
330 imx_dmav1_writel(imxdma->ccr_from_device, DMA_CCR(channel));
331 } else if ((dmamode & DMA_MODE_MASK) == DMA_MODE_WRITE) {
332 pr_debug("imxdma%d: %s sg=%p sgcount=%d total length=%d "
333 "dev_addr=0x%08x for write\n",
334 channel, __func__, sg, sgcount, dma_length, dev_addr);
335
336 imx_dmav1_writel(dev_addr, DMA_DAR(channel));
337 imx_dmav1_writel(imxdma->ccr_to_device, DMA_CCR(channel));
338 } else {
339 printk(KERN_ERR "imxdma%d: imx_dma_setup_sg bad dmamode\n",
340 channel);
341 return -EINVAL;
342 }
343
344 imx_dma_sg_next(channel, sg);
345
346 return 0;
347}
348EXPORT_SYMBOL(imx_dma_setup_sg);
349
350int
351imx_dma_config_channel(int channel, unsigned int config_port,
352 unsigned int config_mem, unsigned int dmareq, int hw_chaining)
353{
354 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
355 u32 dreq = 0;
356
357 imxdma->hw_chaining = 0;
358
359 if (hw_chaining) {
360 imxdma->hw_chaining = 1;
361 if (!imx_dma_hw_chain(imxdma))
362 return -EINVAL;
363 }
364
365 if (dmareq)
366 dreq = CCR_REN;
367
368 imxdma->ccr_from_device = config_port | (config_mem << 2) | dreq;
369 imxdma->ccr_to_device = config_mem | (config_port << 2) | dreq;
370
371 imx_dmav1_writel(dmareq, DMA_RSSR(channel));
372
373 return 0;
374}
375EXPORT_SYMBOL(imx_dma_config_channel);
376
377void imx_dma_config_burstlen(int channel, unsigned int burstlen)
378{
379 imx_dmav1_writel(burstlen, DMA_BLR(channel));
380}
381EXPORT_SYMBOL(imx_dma_config_burstlen);
382
383/**
384 * imx_dma_setup_handlers - setup i.MX DMA channel end and error notification
385 * handlers
386 * @channel: i.MX DMA channel number
387 * @irq_handler: the pointer to the function called if the transfer
388 * ends successfully
389 * @err_handler: the pointer to the function called if the premature
390 * end caused by error occurs
391 * @data: user specified value to be passed to the handlers
392 */
393int
394imx_dma_setup_handlers(int channel,
395 void (*irq_handler) (int, void *),
396 void (*err_handler) (int, void *, int),
397 void *data)
398{
399 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
400 unsigned long flags;
401
402 if (!imxdma->name) {
403 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
404 __func__, channel);
405 return -ENODEV;
406 }
407
408 local_irq_save(flags);
409 imx_dmav1_writel(1 << channel, DMA_DISR);
410 imxdma->irq_handler = irq_handler;
411 imxdma->err_handler = err_handler;
412 imxdma->data = data;
413 local_irq_restore(flags);
414 return 0;
415}
416EXPORT_SYMBOL(imx_dma_setup_handlers);
417
418/**
419 * imx_dma_setup_progression_handler - setup i.MX DMA channel progression
420 * handlers
421 * @channel: i.MX DMA channel number
422 * @prog_handler: the pointer to the function called if the transfer progresses
423 */
424int
425imx_dma_setup_progression_handler(int channel,
426 void (*prog_handler) (int, void*, struct scatterlist*))
427{
428 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
429 unsigned long flags;
430
431 if (!imxdma->name) {
432 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
433 __func__, channel);
434 return -ENODEV;
435 }
436
437 local_irq_save(flags);
438 imxdma->prog_handler = prog_handler;
439 local_irq_restore(flags);
440 return 0;
441}
442EXPORT_SYMBOL(imx_dma_setup_progression_handler);
443
444/**
445 * imx_dma_enable - function to start i.MX DMA channel operation
446 * @channel: i.MX DMA channel number
447 *
448 * The channel has to be allocated by driver through imx_dma_request()
449 * or imx_dma_request_by_prio() function.
450 * The transfer parameters has to be set to the channel registers through
451 * call of the imx_dma_setup_single() or imx_dma_setup_sg() function
452 * and registers %BLR(channel), %RSSR(channel) and %CCR(channel) has to
453 * be set prior this function call by the channel user.
454 */
455void imx_dma_enable(int channel)
456{
457 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
458 unsigned long flags;
459
460 pr_debug("imxdma%d: imx_dma_enable\n", channel);
461
462 if (!imxdma->name) {
463 printk(KERN_CRIT "%s: called for not allocated channel %d\n",
464 __func__, channel);
465 return;
466 }
467
468 if (imxdma->in_use)
469 return;
470
471 local_irq_save(flags);
472
473 imx_dmav1_writel(1 << channel, DMA_DISR);
474 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) & ~(1 << channel), DMA_DIMR);
475 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) | CCR_CEN |
476 CCR_ACRPT, DMA_CCR(channel));
477
478#ifdef CONFIG_ARCH_MX2
479 if ((cpu_is_mx21() || cpu_is_mx27()) &&
480 imxdma->sg && imx_dma_hw_chain(imxdma)) {
481 imxdma->sg = sg_next(imxdma->sg);
482 if (imxdma->sg) {
483 u32 tmp;
484 imx_dma_sg_next(channel, imxdma->sg);
485 tmp = imx_dmav1_readl(DMA_CCR(channel));
486 imx_dmav1_writel(tmp | CCR_RPT | CCR_ACRPT,
487 DMA_CCR(channel));
488 }
489 }
490#endif
491 imxdma->in_use = 1;
492
493 local_irq_restore(flags);
494}
495EXPORT_SYMBOL(imx_dma_enable);
496
497/**
498 * imx_dma_disable - stop, finish i.MX DMA channel operatin
499 * @channel: i.MX DMA channel number
500 */
501void imx_dma_disable(int channel)
502{
503 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
504 unsigned long flags;
505
506 pr_debug("imxdma%d: imx_dma_disable\n", channel);
507
508 if (imx_dma_hw_chain(imxdma))
509 del_timer(&imxdma->watchdog);
510
511 local_irq_save(flags);
512 imx_dmav1_writel(imx_dmav1_readl(DMA_DIMR) | (1 << channel), DMA_DIMR);
513 imx_dmav1_writel(imx_dmav1_readl(DMA_CCR(channel)) & ~CCR_CEN,
514 DMA_CCR(channel));
515 imx_dmav1_writel(1 << channel, DMA_DISR);
516 imxdma->in_use = 0;
517 local_irq_restore(flags);
518}
519EXPORT_SYMBOL(imx_dma_disable);
520
521#ifdef CONFIG_ARCH_MX2
522static void imx_dma_watchdog(unsigned long chno)
523{
524 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
525
526 imx_dmav1_writel(0, DMA_CCR(chno));
527 imxdma->in_use = 0;
528 imxdma->sg = NULL;
529
530 if (imxdma->err_handler)
531 imxdma->err_handler(chno, imxdma->data, IMX_DMA_ERR_TIMEOUT);
532}
533#endif
534
535static irqreturn_t dma_err_handler(int irq, void *dev_id)
536{
537 int i, disr;
538 struct imx_dma_channel *imxdma;
539 unsigned int err_mask;
540 int errcode;
541
542 disr = imx_dmav1_readl(DMA_DISR);
543
544 err_mask = imx_dmav1_readl(DMA_DBTOSR) |
545 imx_dmav1_readl(DMA_DRTOSR) |
546 imx_dmav1_readl(DMA_DSESR) |
547 imx_dmav1_readl(DMA_DBOSR);
548
549 if (!err_mask)
550 return IRQ_HANDLED;
551
552 imx_dmav1_writel(disr & err_mask, DMA_DISR);
553
554 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
555 if (!(err_mask & (1 << i)))
556 continue;
557 imxdma = &imx_dma_channels[i];
558 errcode = 0;
559
560 if (imx_dmav1_readl(DMA_DBTOSR) & (1 << i)) {
561 imx_dmav1_writel(1 << i, DMA_DBTOSR);
562 errcode |= IMX_DMA_ERR_BURST;
563 }
564 if (imx_dmav1_readl(DMA_DRTOSR) & (1 << i)) {
565 imx_dmav1_writel(1 << i, DMA_DRTOSR);
566 errcode |= IMX_DMA_ERR_REQUEST;
567 }
568 if (imx_dmav1_readl(DMA_DSESR) & (1 << i)) {
569 imx_dmav1_writel(1 << i, DMA_DSESR);
570 errcode |= IMX_DMA_ERR_TRANSFER;
571 }
572 if (imx_dmav1_readl(DMA_DBOSR) & (1 << i)) {
573 imx_dmav1_writel(1 << i, DMA_DBOSR);
574 errcode |= IMX_DMA_ERR_BUFFER;
575 }
576 if (imxdma->name && imxdma->err_handler) {
577 imxdma->err_handler(i, imxdma->data, errcode);
578 continue;
579 }
580
581 imx_dma_channels[i].sg = NULL;
582
583 printk(KERN_WARNING
584 "DMA timeout on channel %d (%s) -%s%s%s%s\n",
585 i, imxdma->name,
586 errcode & IMX_DMA_ERR_BURST ? " burst" : "",
587 errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
588 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
589 errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
590 }
591 return IRQ_HANDLED;
592}
593
594static void dma_irq_handle_channel(int chno)
595{
596 struct imx_dma_channel *imxdma = &imx_dma_channels[chno];
597
598 if (!imxdma->name) {
599 /*
600 * IRQ for an unregistered DMA channel:
601 * let's clear the interrupts and disable it.
602 */
603 printk(KERN_WARNING
604 "spurious IRQ for DMA channel %d\n", chno);
605 return;
606 }
607
608 if (imxdma->sg) {
609 u32 tmp;
610 struct scatterlist *current_sg = imxdma->sg;
611 imxdma->sg = sg_next(imxdma->sg);
612
613 if (imxdma->sg) {
614 imx_dma_sg_next(chno, imxdma->sg);
615
616 tmp = imx_dmav1_readl(DMA_CCR(chno));
617
618 if (imx_dma_hw_chain(imxdma)) {
619 /* FIXME: The timeout should probably be
620 * configurable
621 */
622 mod_timer(&imxdma->watchdog,
623 jiffies + msecs_to_jiffies(500));
624
625 tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
626 imx_dmav1_writel(tmp, DMA_CCR(chno));
627 } else {
628 imx_dmav1_writel(tmp & ~CCR_CEN, DMA_CCR(chno));
629 tmp |= CCR_CEN;
630 }
631
632 imx_dmav1_writel(tmp, DMA_CCR(chno));
633
634 if (imxdma->prog_handler)
635 imxdma->prog_handler(chno, imxdma->data,
636 current_sg);
637
638 return;
639 }
640
641 if (imx_dma_hw_chain(imxdma)) {
642 del_timer(&imxdma->watchdog);
643 return;
644 }
645 }
646
647 imx_dmav1_writel(0, DMA_CCR(chno));
648 imxdma->in_use = 0;
649 if (imxdma->irq_handler)
650 imxdma->irq_handler(chno, imxdma->data);
651}
652
653static irqreturn_t dma_irq_handler(int irq, void *dev_id)
654{
655 int i, disr;
656
657#ifdef CONFIG_ARCH_MX2
658 if (cpu_is_mx21() || cpu_is_mx27())
659 dma_err_handler(irq, dev_id);
660#endif
661
662 disr = imx_dmav1_readl(DMA_DISR);
663
664 pr_debug("imxdma: dma_irq_handler called, disr=0x%08x\n",
665 disr);
666
667 imx_dmav1_writel(disr, DMA_DISR);
668 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
669 if (disr & (1 << i))
670 dma_irq_handle_channel(i);
671 }
672
673 return IRQ_HANDLED;
674}
675
676/**
677 * imx_dma_request - request/allocate specified channel number
678 * @channel: i.MX DMA channel number
679 * @name: the driver/caller own non-%NULL identification
680 */
681int imx_dma_request(int channel, const char *name)
682{
683 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
684 unsigned long flags;
685 int ret = 0;
686
687 /* basic sanity checks */
688 if (!name)
689 return -EINVAL;
690
691 if (channel >= IMX_DMA_CHANNELS) {
692 printk(KERN_CRIT "%s: called for non-existed channel %d\n",
693 __func__, channel);
694 return -EINVAL;
695 }
696
697 local_irq_save(flags);
698 if (imxdma->name) {
699 local_irq_restore(flags);
700 return -EBUSY;
701 }
702 memset(imxdma, 0, sizeof(imxdma));
703 imxdma->name = name;
704 local_irq_restore(flags); /* request_irq() can block */
705
706#ifdef CONFIG_ARCH_MX2
707 if (cpu_is_mx21() || cpu_is_mx27()) {
708 ret = request_irq(MX2x_INT_DMACH0 + channel,
709 dma_irq_handler, 0, "DMA", NULL);
710 if (ret) {
711 imxdma->name = NULL;
712 pr_crit("Can't register IRQ %d for DMA channel %d\n",
713 MX2x_INT_DMACH0 + channel, channel);
714 return ret;
715 }
716 init_timer(&imxdma->watchdog);
717 imxdma->watchdog.function = &imx_dma_watchdog;
718 imxdma->watchdog.data = channel;
719 }
720#endif
721
722 return ret;
723}
724EXPORT_SYMBOL(imx_dma_request);
725
726/**
727 * imx_dma_free - release previously acquired channel
728 * @channel: i.MX DMA channel number
729 */
730void imx_dma_free(int channel)
731{
732 unsigned long flags;
733 struct imx_dma_channel *imxdma = &imx_dma_channels[channel];
734
735 if (!imxdma->name) {
736 printk(KERN_CRIT
737 "%s: trying to free free channel %d\n",
738 __func__, channel);
739 return;
740 }
741
742 local_irq_save(flags);
743 /* Disable interrupts */
744 imx_dma_disable(channel);
745 imxdma->name = NULL;
746
747#ifdef CONFIG_ARCH_MX2
748 if (cpu_is_mx21() || cpu_is_mx27())
749 free_irq(MX2x_INT_DMACH0 + channel, NULL);
750#endif
751
752 local_irq_restore(flags);
753}
754EXPORT_SYMBOL(imx_dma_free);
755
756/**
757 * imx_dma_request_by_prio - find and request some of free channels best
758 * suiting requested priority
759 * @channel: i.MX DMA channel number
760 * @name: the driver/caller own non-%NULL identification
761 *
762 * This function tries to find a free channel in the specified priority group
763 * This function tries to find a free channel in the specified priority group
764 * if the priority cannot be achieved it tries to look for free channel
765 * in the higher and then even lower priority groups.
766 *
767 * Return value: If there is no free channel to allocate, -%ENODEV is returned.
768 * On successful allocation channel is returned.
769 */
770int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio)
771{
772 int i;
773 int best;
774
775 switch (prio) {
776 case (DMA_PRIO_HIGH):
777 best = 8;
778 break;
779 case (DMA_PRIO_MEDIUM):
780 best = 4;
781 break;
782 case (DMA_PRIO_LOW):
783 default:
784 best = 0;
785 break;
786 }
787
788 for (i = best; i < IMX_DMA_CHANNELS; i++)
789 if (!imx_dma_request(i, name))
790 return i;
791
792 for (i = best - 1; i >= 0; i--)
793 if (!imx_dma_request(i, name))
794 return i;
795
796 printk(KERN_ERR "%s: no free DMA channel found\n", __func__);
797
798 return -ENODEV;
799}
800EXPORT_SYMBOL(imx_dma_request_by_prio);
801
802static int __init imx_dma_init(void)
803{
804 int ret = 0;
805 int i;
806
807#ifdef CONFIG_ARCH_MX1
808 if (cpu_is_mx1())
809 imx_dmav1_baseaddr = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
810 else
811#endif
812#ifdef CONFIG_MACH_MX21
813 if (cpu_is_mx21())
814 imx_dmav1_baseaddr = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
815 else
816#endif
817#ifdef CONFIG_MACH_MX27
818 if (cpu_is_mx27())
819 imx_dmav1_baseaddr = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
820 else
821#endif
822 BUG();
823
824 dma_clk = clk_get(NULL, "dma");
825 clk_enable(dma_clk);
826
827 /* reset DMA module */
828 imx_dmav1_writel(DCR_DRST, DMA_DCR);
829
830#ifdef CONFIG_ARCH_MX1
831 if (cpu_is_mx1()) {
832 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", NULL);
833 if (ret) {
834 pr_crit("Wow! Can't register IRQ for DMA\n");
835 return ret;
836 }
837
838 ret = request_irq(MX1_DMA_ERR, dma_err_handler, 0, "DMA", NULL);
839 if (ret) {
840 pr_crit("Wow! Can't register ERRIRQ for DMA\n");
841 free_irq(MX1_DMA_INT, NULL);
842 return ret;
843 }
844 }
845#endif
846 /* enable DMA module */
847 imx_dmav1_writel(DCR_DEN, DMA_DCR);
848
849 /* clear all interrupts */
850 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
851
852 /* disable interrupts */
853 imx_dmav1_writel((1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
854
855 for (i = 0; i < IMX_DMA_CHANNELS; i++) {
856 imx_dma_channels[i].sg = NULL;
857 imx_dma_channels[i].dma_num = i;
858 }
859
860 return ret;
861}
862
863arch_initcall(imx_dma_init);
diff --git a/arch/arm/mach-imx/include/mach/dma-mx1-mx2.h b/arch/arm/mach-imx/include/mach/dma-mx1-mx2.h
new file mode 100644
index 000000000000..df5f522da6b3
--- /dev/null
+++ b/arch/arm/mach-imx/include/mach/dma-mx1-mx2.h
@@ -0,0 +1,10 @@
1#ifndef __MACH_DMA_MX1_MX2_H__
2#define __MACH_DMA_MX1_MX2_H__
3/*
4 * Don't use this header in new code, it will go away when all users are
5 * converted to mach/dma-v1.h
6 */
7
8#include <mach/dma-v1.h>
9
10#endif /* ifndef __MACH_DMA_MX1_MX2_H__ */
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h
new file mode 100644
index 000000000000..287431cc13e5
--- /dev/null
+++ b/arch/arm/mach-imx/include/mach/dma-v1.h
@@ -0,0 +1,107 @@
1/*
2 * linux/arch/arm/mach-imx/include/mach/dma-v1.h
3 *
4 * i.MX DMA registration and IRQ dispatching
5 *
6 * Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
7 * Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
8 * Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25#ifndef __MACH_DMA_V1_H__
26#define __MACH_DMA_V1_H__
27
28#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
29
30#define IMX_DMA_CHANNELS 16
31
32#define DMA_MODE_READ 0
33#define DMA_MODE_WRITE 1
34#define DMA_MODE_MASK 1
35
36#define MX1_DMA_REG(offset) MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR + (offset))
37
38/* DMA Interrupt Mask Register */
39#define MX1_DMA_DIMR MX1_DMA_REG(0x08)
40
41/* Channel Control Register */
42#define MX1_DMA_CCR(x) MX1_DMA_REG(0x8c + ((x) << 6))
43
44#define IMX_DMA_MEMSIZE_32 (0 << 4)
45#define IMX_DMA_MEMSIZE_8 (1 << 4)
46#define IMX_DMA_MEMSIZE_16 (2 << 4)
47#define IMX_DMA_TYPE_LINEAR (0 << 10)
48#define IMX_DMA_TYPE_2D (1 << 10)
49#define IMX_DMA_TYPE_FIFO (2 << 10)
50
51#define IMX_DMA_ERR_BURST (1 << 0)
52#define IMX_DMA_ERR_REQUEST (1 << 1)
53#define IMX_DMA_ERR_TRANSFER (1 << 2)
54#define IMX_DMA_ERR_BUFFER (1 << 3)
55#define IMX_DMA_ERR_TIMEOUT (1 << 4)
56
57int
58imx_dma_config_channel(int channel, unsigned int config_port,
59 unsigned int config_mem, unsigned int dmareq, int hw_chaining);
60
61void
62imx_dma_config_burstlen(int channel, unsigned int burstlen);
63
64int
65imx_dma_setup_single(int channel, dma_addr_t dma_address,
66 unsigned int dma_length, unsigned int dev_addr,
67 unsigned int dmamode);
68
69
70/*
71 * Use this flag as the dma_length argument to imx_dma_setup_sg()
72 * to create an endless running dma loop. The end of the scatterlist
73 * must be linked to the beginning for this to work.
74 */
75#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
76
77int
78imx_dma_setup_sg(int channel, struct scatterlist *sg,
79 unsigned int sgcount, unsigned int dma_length,
80 unsigned int dev_addr, unsigned int dmamode);
81
82int
83imx_dma_setup_handlers(int channel,
84 void (*irq_handler) (int, void *),
85 void (*err_handler) (int, void *, int), void *data);
86
87int
88imx_dma_setup_progression_handler(int channel,
89 void (*prog_handler) (int, void*, struct scatterlist*));
90
91void imx_dma_enable(int channel);
92
93void imx_dma_disable(int channel);
94
95int imx_dma_request(int channel, const char *name);
96
97void imx_dma_free(int channel);
98
99enum imx_dma_prio {
100 DMA_PRIO_HIGH = 0,
101 DMA_PRIO_MEDIUM = 1,
102 DMA_PRIO_LOW = 2
103};
104
105int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
106
107#endif /* __MACH_DMA_V1_H__ */